repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
evanbiederstedt/RRBSfun | epiphen/total_chrY.py | 2 | 32997 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chrY_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chromY.phy", header=None, index=None)
print(tott.shape)
| mit |
hande-qmc/hande | tools/canonical_energy/analyse_canonical.py | 1 | 2708 | #!/usr/bin/env python
import pandas as pd
import os
import pkgutil
import sys
import warnings
import argparse
if not pkgutil.find_loader('pyhande'):
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
options : :class:`ArgumentParser`
Options read in from command line.
'''
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument('-s', '--sim', action='store_true', default=False,
dest='multi_sim', help='Do not average over multiple '
'simulations in the same or from multiple data files.')
parser.add_argument('filename', nargs='+', help='HANDE output.')
options = parser.parse_args(args)
if not options.filename:
parser.print_help()
sys.exit(1)
return options
def main(args):
''' Analyse the output from a canonical estimates calculation.
Parameters
----------
filename : list of strings
files to be analysed.
'''
args = parse_args(args)
hande_out = pyhande.extract.extract_data_sets(args.filename)
(metadata, data) = ([], [])
for (md, df) in hande_out:
# Handle old output with incorrect title...
if md['calc_type'] == 'Canonical energy' or md['calc_type'] == 'RNG':
metadata.append(md)
data.append(df)
if data and not args.multi_sim:
data = pd.concat(data)
# Sanity check: are all the calculations from the same calculation?
# Only check for new metadata format...
if 'beta' in metadata[0]:
beta = metadata[0]['beta']
for md in metadata[1:]:
if 'beta' in md and md['beta'] != beta:
warnings.warn('Beta values in input files not consistent.')
if args.multi_sim:
results = pd.DataFrame([pyhande.canonical.estimates(m, d) for (m, d)
in zip(metadata, data)])
else:
results = pd.DataFrame(pyhande.canonical.estimates(metadata[0], data)).T
try:
float_fmt = '{0:-#.8e}'.format
float_fmt(1.0)
except ValueError:
# GAH. Alternate formatting only added to format function after
# python 2.6..
float_fmt = '{0:-.8e}'.format
# Work around bug in to_string alignment with index=False
lines = results.to_string(float_format=float_fmt).split('\n')
index_wdith = max([len(str(s)) for s in results.index]) + 1
print('\n'.join(l[index_wdith:] for l in lines))
if __name__ == '__main__':
main(sys.argv[1:])
| lgpl-2.1 |
vshtanko/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/formula/tests/test_formula.py | 2 | 5408 | from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
def test_formula_predict_series():
import pandas as pd
import pandas.util.testing as tm
data = pd.DataFrame({"y": [1, 2, 3], "x": [1, 2, 3]}, index=[5, 3, 1])
results = ols('y ~ x', data).fit()
result = results.predict(data)
expected = pd.Series([1., 2., 3.], index=[5, 3, 1])
tm.assert_series_equal(result, expected)
result = results.predict(data.x)
tm.assert_series_equal(result, expected)
result = results.predict(pd.Series([1, 2, 3], index=[1, 2, 3], name='x'))
expected = pd.Series([1., 2., 3.], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
result = results.predict({"x": [1, 2, 3]})
expected = pd.Series([1., 2., 3.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/preprocessing/tests/test_label.py | 26 | 21027 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_true(assert_warns(DeprecationWarning, getattr, lb_1, "multilabel_"))
assert_false(assert_warns(DeprecationWarning, getattr, lb_1,
"indicator_matrix_"))
assert_array_equal(out_2, binaryclass_array)
assert_false(assert_warns(DeprecationWarning, getattr, lb_2,
"multilabel_"))
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_true(assert_warns(DeprecationWarning, getattr, lb_1, "multilabel_"))
assert_array_equal(out_2, indicator)
assert_false(assert_warns(DeprecationWarning, getattr, lb_2,
"multilabel_"))
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
assert_false(assert_warns(DeprecationWarning, getattr, lb, "multilabel_"))
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
y = np.array([[0, 1, 0], [1, 1, 1]])
classes = np.arange(3)
assert_raises(ValueError, label_binarize, y, classes, multilabel=True,
neg_label=2, pos_label=1)
assert_raises(ValueError, label_binarize, y, classes, multilabel=True,
neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
"""Ensure sequences of the same length are not interpreted as a 2-d array
"""
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_seq = [(1,), (0, 1, 2), tuple()]
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
deprecation_message = ("Direct support for sequence of sequences " +
"multilabel representation will be unavailable " +
"from version 0.17. Use sklearn.preprocessing." +
"MultiLabelBinarizer to convert to a label " +
"indicator representation.")
assert_warns_message(DeprecationWarning, deprecation_message,
check_binarized_results, y_seq, classes, pos_label,
neg_label, expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_deprecation_inverse_binarize_thresholding():
deprecation_message = ("Direct support for sequence of sequences " +
"multilabel representation will be unavailable " +
"from version 0.17. Use sklearn.preprocessing." +
"MultiLabelBinarizer to convert to a label " +
"indicator representation.")
assert_warns_message(DeprecationWarning, deprecation_message,
_inverse_binarize_thresholding,
y=csr_matrix([[1, 0], [0, 1]]),
output_type="multilabel-sequences",
classes=[1, 2], threshold=0)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
zmlabe/IceVarFigs | Scripts/SeaIce/JAXA_seaice_1980smeanblack.py | 1 | 8537 | """
Plots Arctic daily sea ice extent from June 2002-present using JAXA metadata
Website : https://ads.nipr.ac.jp/vishop/vishop-extent.html
Author : Zachary M. Labe
Date : 15 May 2016
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import datetime
import urllib.request
import urllib as UL
### Directory and time
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day-1)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
### Load url
url = 'https://ads.nipr.ac.jp/vishop.ver1/data/graph/plot_extent_n_v2.csv'
### Read file
raw_data = urllib.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=0,delimiter=",",)
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
month = dataset[1:,0] # 1-12, nan as month[0]
day = dataset[1:,1] # 1-31, nan as day[0]
mean1980 = dataset[1:,2] # km^2, nan as mean1980[0]
mean1990 = dataset[1:,3] # km^2, nan as mean1990[0]
mean2000 = dataset[1:,4] # km^2, nan as mean2000[0]
years = dataset[1:,5:]
doy = np.arange(0,len(day),1)
### Change units to million km^2
years = years/1e6
### Recent day of current year
currentyear = years[:,-1]
lastday = now.timetuple().tm_yday -1
currentice = currentyear[lastday]
currentanom = currentice - (mean1980[lastday]/1e6)
# Leap year
currentyear[59] = currentyear[58]
### Changes in the last day and week
weekchange = currentice - currentyear[lastday-7]
daychange = currentice - currentyear[lastday-1]
###############################################################################
###############################################################################
###############################################################################
### Plot figure
matplotlib.rc('savefig', facecolor='black')
matplotlib.rc('axes', edgecolor='white')
matplotlib.rc('xtick', color='white')
matplotlib.rc('ytick', color='white')
matplotlib.rc('axes', labelcolor='white')
matplotlib.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 0))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
oldaverage = currentyear.copy()
oldaverage[lastday:] = currentyear[lastday]
### 2000s average
average2000s = mean2000.copy()
average2000s[lastday:] = mean2000[lastday]
average2000s = average2000s/1e6
oldmin = np.where(mean2000 == np.min(mean2000))[0]
### 1990s average
average1990s = mean1990.copy()
average1990s[lastday:] = mean1990[lastday]
average1990s = average1990s/1e6
### 1980s average
average1980s = mean1980.copy()
average1980s[lastday:] = mean1980[lastday]
average1980s = average1980s/1e6
difference = (oldmin - lastday)[0]
### Are we below decadal climatological min?
if (currentyear[lastday]*1e6) < np.nanmin(mean1980):
print( True, '1980')
if (currentyear[lastday]*1e6) < np.nanmin(mean1990):
print(True, '1990')
if (currentyear[lastday]*1e6) < np.nanmin(mean2000):
print(True, '2000')
### Calculate record low SIE
recordlow = np.empty((years.shape[0]))
for i in range(years.shape[0]):
if years[i,-1] == np.nanmin(years[i,:]):
recordlow[i] = 1.
else:
recordlow[i] = 0.
### Begin plot
plt.plot(doy,years[:,:],color='w',linewidth=0.15,
linestyle='-',alpha=0.7)
bar = ax.plot(doy,currentyear,linewidth=2.9,zorder=3,
color='darkorange',)
plt.scatter(doy[lastday],currentyear[lastday],
s=20,color='darkorange',zorder=4)
plt.scatter(doy[lastday],mean2000[lastday]/1e6,
s=20,color='dodgerblue',zorder=11)
plt.scatter(doy[lastday],mean1990[lastday]/1e6,
s=20,color='c',zorder=11)
plt.scatter(doy[lastday],mean1980[lastday]/1e6,
s=20,color='darkmagenta',zorder=11)
plt.plot(doy,mean1980/1e6,linewidth=1.8,linestyle='-',
color='darkmagenta',label=r'1980s Mean')
plt.plot(doy,mean1990/1e6,linewidth=1.8,linestyle='-',
color='c',label=r'1990s Mean')
plt.plot(doy,mean2000/1e6,linewidth=1.8,linestyle='-',
color='dodgerblue',label=r'2000s Mean')
plt.plot(oldaverage,color='darkorange',linestyle=':',linewidth=2.8,zorder=5)
plt.plot(average2000s,color='dodgerblue',linestyle=':',linewidth=1.8,zorder=11)
plt.plot(average1990s,color='c',linestyle=':',linewidth=1.8,zorder=11)
plt.plot(average1980s,color='darkmagenta',linestyle=':',linewidth=1.8,zorder=11)
### Define date
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
strmonth = xlabels[int(currentmn)-1]
asof = strmonth + ' ' + currentdy + ', ' + currentyr
### Add additional information to the plot
xcord = 109
ycord = 9.4
if recordlow[lastday] == 1.0:
plt.text(xcord + 2,ycord,r'\textbf{[*Record Low*]}',fontsize=11,
rotation='horizontal',ha='left',color='aqua')
xcord = lastday - 5.5
ycord = round(currentice)-0.8
plt.text(31.4,16.9,r'\textbf{DATA:} JAXA 2002-2017 (Arctic Data archive System, NIPR)',
fontsize=5,rotation='horizontal',ha='left',color='w',alpha=0.6)
plt.text(31.4,16.7,r'\textbf{SOURCE:} https://ads.nipr.ac.jp/vishop/vishop-extent.html',
fontsize=5,rotation='horizontal',ha='left',color='w',alpha=0.6)
plt.text(31.4,16.5,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5,rotation='horizontal',ha='left',color='w',alpha=0.6)
plt.text(doy[lastday]+8,currentyear[lastday]-0.4,r'\textbf{$\bf{\longrightarrow}$}',
fontsize=18,rotation=140,ha='right',color='darkorange')
plt.text(122,currentyear[lastday]-0.2,r'\textbf{CURRENT}',
fontsize=9.5,rotation='horizontal',ha='left',
color='darkorange',alpha=1)
plt.text(122,mean2000[lastday]/1e6-0.2,r'2000s',
fontsize=10,rotation='horizontal',ha='left',
color='dodgerblue')
plt.text(122,mean1990[lastday]/1e6-0.2,r'1990s',
fontsize=10,rotation='horizontal',ha='left',
color='c')
plt.text(122,mean1980[lastday]/1e6-0.05,r'1980s',
fontsize=10,rotation='horizontal',ha='left',
color='darkmagenta')
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params(axis='both', direction='out',length=5.5,width=2,
which='major',pad=7)
plt.ylabel(r'\textbf{Extent [$\bf{\times 10^{6}}$\ \textbf{km}$\bf{^2}$]}',
fontsize=15,alpha=0.6)
l = plt.legend(shadow=False,fontsize=8.5,loc='lower left',
bbox_to_anchor=(0.768, -0.025),fancybox=True,ncol=1,
frameon=False)
for text in l.get_texts():
text.set_color('w')
text.set_alpha(0.6)
plt.xticks(np.arange(0,366,30.4),xlabels,rotation=0,fontsize=11)
ylabels = map(str,np.arange(1,18,1))
plt.yticks(np.arange(1,18,1),ylabels,fontsize=13)
plt.ylim([10,17])
plt.xlim([30.4,121.59])
fig.suptitle(r'\textbf{ARCTIC SEA ICE}',fontsize=28,color='w',alpha=0.6)
plt.savefig(directoryfigure + 'JAXA_seaice_means_xe5.png',dpi=900)
### Print additional information
print('\n')
print('----JAXA Sea Ice Change----')
print('Day 5 = %s km^2' % ((currentyear[lastday-4] - currentyear[lastday-5])*1e6))
print('Day 4 = %s km^2' % ((currentyear[lastday-3] - currentyear[lastday-4])*1e6))
print('Day 3 = %s km^2' % ((currentyear[lastday-2] - currentyear[lastday-3])*1e6))
print('Day 2 = %s km^2' % ((currentyear[lastday-1] - currentyear[lastday-2])*1e6))
print('Day 1 = %s km^2' % ((currentyear[lastday] - currentyear[lastday-1])*1e6))
print('\n' 'Total 5-day Change = %s km^2 \n' % ((currentyear[lastday]-currentyear[lastday-5])*1e6))
print('2017-1980 = %s km^2' % ((currentyear[lastday]*1e6) - mean1980[lastday]))
print('2017-1990 = %s km^2' % ((currentyear[lastday]*1e6) - mean1990[lastday]))
print('2017-2000 = %s km^2' % ((currentyear[lastday]*1e6) - mean2000[lastday]))
print('\n') | mit |
ElDeveloper/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
davidpng/FCS_Database | FlowAnal/FCS_subroutines/ND_Feature_Extraction.py | 1 | 5931 | # -*- coding: utf-8 -*-
"""
Created on Tue 30 Dec 2014 10:29:41 AM PST
This file describes a feature extraction class for N dimensions
@author: David Ng, MD
"""
__author__ = "David Ng, MD"
__copyright__ = "Copyright 2014"
__license__ = "GPL v3"
__version__ = "1.0"
__maintainer__ = "Daniel Herman"
__email__ = "[email protected]"
__status__ = "Production"
"""Installed Packages"""
import pandas as pd
import numpy as np
import scipy as sp
import h5py
"""Built in packages"""
import os.path
import logging
log = logging.getLogger(__name__)
class ND_Feature_Extraction(object):
def __init__(self,FCS,bins,**kwargs):
""" Performs N-Dimenstional Feature Extration on FCS.data
This class takes a data FCS object which as been compensated and
normalized and generates a sparse array indexed on bin_number and
normalized number of events in that bin.
Accessiable Parameters:
.type -- <string> containing a short name for this type
of feature extraction
.bin_description -- <pd.Series> indexed on data.column names and
storing number of bins on that
channel/column
.histogram -- <csr_matrix> indexed on bin_number and the
normalized number of events in
that bin.
Accessiable Functions:
.Return_Coordinates -- Returns the bin centroid for a given index
or bin number (I think...)
"""
self.type = 'Full'
if 'exclude_param' in kwargs:
exclude = kwargs['exclude_param']
else:
exclude = ['FSC-H','SSC-A','Time']
#generate list of columns to be used
columns = [c for c in FCS.data.columns if c not in exclude]
#generate a dictionary describing the bins to be used
bin_dict = self._Generate_Bin_Dict(columns,bins)
self.bin_description = bin_dict
#bin the data so that coordinates are generated for every data point in FCS.data
vector_length,coordinates = self._Uniform_Bin_Data(input_data = FCS.data, bin_dict = bin_dict)
#generate a sparse array of from the given coordinates
self.histogram = self._coord2sparse_histogram(vector_length,
coordinates,
**kwargs).tocsr()
def _coord2sparse_histogram(self,vector_length,coordinates,normalize=True,**kwargs):
"""
generates a sparse matrix with normalized histogram counts
each bin describes the fraction of total events within it (i.e. < 1)
"""
output=sp.sparse.lil_matrix((1,vector_length), dtype=np.float32)
for i in coordinates:
output[0,i]+=1
if normalize:
return output/ len(coordinates)
else:
return output
def _Uniform_Bin_Data(self,input_data,bin_dict):
"""
fits event parameters to an integer 'binned' value
"""
basis = [1] #intialize a list of basis values
for i in bin_dict.values:
basis.append(i*basis[-1]) # basis values will be generated dependent on previous value
# ex base-10 basis = [1,10,100,1000]
# logic and algorithm from Donald Kunth's Art of Computer Programming Vol 1
vector_length = basis.pop() # this is the highest coordinate value (max length of array)
basis = pd.Series(data=basis,index = bin_dict.index.values)
rounded = input_data.copy() # copy input_data since we will soon operate on it.
for key in bin_dict.index.values:
rounded[key] = np.floor(rounded[key]*bin_dict[key]) # iterate and round over every column
output = rounded[bin_dict.index.values].dot(basis) # apply dot product to multiply columns
# by basis vector (see Kunth)
log.debug("Vector Length: {}, \nRounded: {}, \nBasis: {}, \
\nCoordinates: {}".format(vector_length,rounded,basis,output))
if len(input_data) == 0:
raise ValueError("FCS data is empty!")
if len(output) == 0:
raise ValueError("Coordinate Data is empty!")
return vector_length, output.apply(np.int64)
def _Generate_Bin_Dict(self,columns,bins):
"""
Performs error checking and type converion for bins
"""
if isinstance(bins,int):
bin_dict = pd.Series([bins] * len(columns),index=columns)
elif isinstance(bins,list):
if len(bins) != len(columns):
raise RuntimeWarning("number of bins in the list does not match the number of parameters")
else:
bin_dict = pd.Series(bins,columns)
elif isinstance(bins,dict):
if bins.keys() not in columns or columns not in bins.keys():
raise RuntimeWarning("The bin keys do not match the provided columns")
else:
raise RuntimeWarning("bin dict not implemented")
else:
raise TypeError("provided bins parameter is not supported")
return bin_dict
def Return_Coordinates(self,index):
"""
Returns the bin parameters
"""
if isinstance(index,int):
index = [index] # make sure index is a list
coords = self.histogram.indices[index]
self.x = np.array(np.unravel_index(coords,list(self.bin_description)),dtype=np.float32).T
temp = self.x / np.array(self.bin_description)[np.newaxis]
return pd.DataFrame(temp,index=coords,columns=self.bin_description.index.values)
| gpl-3.0 |
herilalaina/scikit-learn | examples/linear_model/plot_lasso_lars.py | 40 | 1075 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
_, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/calibration/plot_calibration_curve.py | 12 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100,000 samples (1,000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
yunque/librosa | tests/mpl_ic.py | 3 | 11767 | # CREATED:2015-02-17 14:41:28 by Brian McFee <[email protected]>
# this function is lifted wholesale from matploblib v1.4.2,
# and modified so that images are stored explicitly under the tests path
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import gc
import os
import sys
import shutil
import warnings
import unittest
import nose
import numpy as np
import matplotlib.tests
import matplotlib.units
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry):
plt.close('all')
gc.collect()
matplotlib.tests.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry)
def cleanup(func):
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry)
return wrapped_function
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=13,
freetype_version=None, remove_text=False,
savefig_kwarg=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
# mods = module_name.split('.')
# mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
# assert mods.pop(0) == 'tests'
# subdir = os.path.join(*mods)
subdir = module_name
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = sys.modules[sub_mod].__path__
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
| isc |
florian-f/sklearn | sklearn/metrics/cluster/supervised.py | 4 | 26701 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD Style.
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from ...utils.fixes import unique
from .expected_mutual_info_fast import expected_mutual_information
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propogation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = unique(labels_true, return_inverse=True)
clusters, cluster_idx = unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homegenous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-pefect labelings that futher split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-pefect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are splitted across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-Measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-Measure is the hormonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-Measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completly splitted across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occuring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occuring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustement of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completly splitted across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completly splitted across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
maxiee/MyCodes | PyLinearAlgebra/Chapter5.Eigenvectors_Eigenvalues/006_orithogonal_compute_3d.py | 1 | 1189 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def inner_product(u, v):
len_u = len(u)
len_v = len(v)
if len_u != len_v:
return None
ret = 0
for i in range(len_u):
ret += u[i] * v[i]
return ret
def project(y, u):
return inner_product(y, u)/inner_product(u, u)*u
u1 = np.array([2, 5, -1]).T
u2 = np.array([-2, 1, 1]).T
y = np.array([1, 2, 3]).T
p1 = project(y, u1)
p2 = project(y, u2)
p = p1 + p2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([0,u1[0]], [0,u1[1]], [0,u1[2]])
ax.plot([0,u2[0]], [0,u2[1]], [0,u2[2]])
ax.scatter(0, 0, 0)
ax.scatter(y[0], y[1], y[2], color='r')
ax.scatter(p[0], p[1], p[2], color='r')
ax.scatter(p1[0], p1[1], p1[2])
ax.scatter(p2[0], p2[1], p2[2])
ax.plot([y[0], p[0]], [y[1], p[1]], [y[2], p[2]], '--')
ax.plot([y[0], p1[0]], [y[1], p1[1]], [y[2], p1[2]], '--', color='g', alpha=0.5)
ax.plot([y[0], p2[0]], [y[1], p2[1]], [y[2], p2[2]], '--', color='g', alpha=0.5)
ax.plot([p[0], p1[0]], [p[1], p1[1]], [p[2], p1[2]], '--', color='g', alpha=0.5)
ax.plot([p[0], p2[0]], [p[1], p2[1]], [p[2], p2[2]], '--', color='g', alpha=0.5)
plt.show()
| gpl-3.0 |
adobe-research/video-lecture-summaries | Scripts/fgpixel_classify.py | 1 | 2036 | #!/usr/bin/env python
import processvideo
import processframe as pf
import os
import ntpath
import util
import numpy as np
import matplotlib.pyplot as plt
import cv2
import sys
if __name__ == "__main__":
videolist = ["..\\SampleVideos\\more\\armando1\\armando1.mp4", "..\\SampleVideos\\more\\armando2\\armando2.mp4",
"..\\SampleVideos\\more\\khan1\\khan1.mp4", "..\\SampleVideos\\more\\khan2\\khan2.mp4",
"..\\SampleVideos\\more\\hwt1\\hwt1.mp4" , "..\\SampleVideos\\more\\hwt2\\hwt2.mp4",
"..\\SampleVideos\\more\\mit1\\mit1.mp4", "..\\SampleVideos\\more\\mit2\\mit2.mp4", "..\\SampleVideos\\more\\mit3\\mit3.mp4",
"..\\SampleVideos\\more\\tecmath1\\tecmath1.mp4", "..\\SampleVideos\\more\\tecmath2\\tecmath2.mp4",
"..\\SampleVideos\\more\\udacity1\\udacity1.mp4", "..\\SampleVideos\\more\\udacity2\\udacity2.mp4"]
for video in videolist:
pv = processvideo.ProcessVideo(video)
counts = pv.readfgpix()
# Smooth and subsample
smoothsample = util.smooth(np.array(counts), window_len = pv.framerate)
subsample = counts[0:len(smoothsample):int(pv.framerate)]
t = np.linspace(0, len(subsample), len(subsample))
actionfilename = pv.videoname + "_action.txt"
actionfile = open(actionfilename, "w")
scroll_thres = 5e3
dec_thres = -2e3
prevpix = 0
color = []
starttimes = []
endtimes = []
for i in range(0, len(subsample)):
curpix = subsample[i]
if (curpix - prevpix < dec_thres or abs(curpix-prevpix) > scroll_thres): # erase or scroll
status = 0
endtimes.append(i)
else:
status = 1
starttimes.append(i)
actionfile.write("%i\n" % status)
prevpix = curpix
actionfile.close()
| bsd-2-clause |
lmjohns3/cube-experiment | cubes/fill.py | 1 | 7630 | import climate
import numpy as np
import pandas as pd
from .database import Experiment
logging = climate.get_logger(__name__)
g = climate.add_group('dropout-filling options')
g.add_argument('--root', metavar='DIR',
help='load data files from tree at DIR')
g.add_argument('--output', metavar='DIR',
help='save smoothed data files to tree at DIR')
g.add_argument('--pattern', default='*', metavar='SHPAT',
help='process only trials matching this pattern')
g.add_argument('--autoencoder-rank', type=float, metavar='K',
help='reconstruction rank')
g.add_argument('--svt-threshold', type=float, metavar='S',
help='truncate singular values at threshold S')
g.add_argument('--window', type=int, metavar='T',
help='process windows of T frames')
CENTERS = [
'marker34-l-ilium',
'marker35-r-ilium',
'marker36-r-hip',
'marker43-l-hip',
]
PHASESPACE_TOLERANCE = 0.001 # error tolerance of phasespace system
def stack(dfs, window):
'''Assemble multiple dfs into a single df with a multiindex.
Parameters
----------
dfs : list of pd.DataFrame
Data frames for source data. The frames will be stacked into a single
large frame to use while filling dropouts. This stacked frame can be
split up using :meth:`unstack`.
window : int
Window length for filling dropouts.
Returns
-------
data : pd.DataFrame
A stacked data frame.
'''
cols = [c for c in dfs[0].columns if c.startswith('marker') and c[-1] in 'xyz']
pad = pd.DataFrame(float('nan'), index=list(range(window - 1)), columns=cols)
chunks, keys = [pad], [-1]
for i, df in enumerate(dfs):
chunks.extend([df[cols], pad])
keys.extend([i, -len(chunks)])
df = pd.concat(chunks, axis=0, keys=keys)
num_frames, num_channels = df.shape
num_entries = num_frames * num_channels
filled_ratio = df.count().sum() / num_entries
logging.info('missing %d of (%d, %d) = %d values (%.1f%% filled)',
num_entries - df.count().sum(),
num_frames, num_channels, num_entries,
100 * filled_ratio)
# if a column is completely missing, refuse to process the data.
for c in df.columns:
if df[c].count() == 0:
raise ValueError('%s: no visible values!', c)
return df
def center(df):
'''Shift an entire data frame by the location of the CENTERS.
The centers are basically the hip markers; this moves the data so that all
markers are centered on the origin (as a group).
Parameters
----------
df : pd.DataFrame
A data frame containing motion-capture marker locations.
Returns
-------
center : pd.DataFrame
A frame representing the center of the markers at each time step in the
original data frame.
'''
center = pd.DataFrame(
dict(x=df[[m + '-x' for m in CENTERS]].mean(axis=1),
y=df[[m + '-y' for m in CENTERS]].mean(axis=1),
z=df[[m + '-z' for m in CENTERS]].mean(axis=1)))
for c in df.columns:
df[c] -= center[c[-1]]
return center
def restore(df, centers):
'''Restore data in the given frame to the given center locations.
Parameters
----------
df : pd.DataFrame
Data frame containing stacked, centered mocap data.
centers : pd.DataFrame
Frame containing center locations for each time step in the recording.
'''
for c in df.columns:
df.loc[:, c] += centers.loc[:, c[-1]]
def unstack(df, dfs):
'''Unstack a stacked frame into multiple individual frames.
Parameters
----------
df : pd.DataFrame
Data frame containing stacked, centered mocap data.
dfs : list of pd.DataFrame
Individual target data frames.
'''
for i, d in enumerate(dfs):
d.loc[:, df.columns] = df.loc[(i, ), :]
def window(df, window, fillna=0):
'''Create windowed arrays of marker position data.
Parameters
----------
df : pd.DataFrame
Data frame containing stacked marker position data.
window : int
Number of successive frames to include in each window.
fillna : float, int, str, or None
If an integer or float, fill dropouts with this value. If a string,
interpolate missing data linearly. If None, do not fill dropouts.
'''
visible = (~df.isnull()).values
position = df.values
if isinstance(fillna, (float, int)):
# just fill dropouts with a constant value.
position = df.fillna(fillna).values
if fillna is not None:
# interpolate dropouts linearly.
position = df.interpolate().ffill().bfill().values
# here we create windows of consecutive data frames, all stacked together
# along axis 1. for example, with 10 2-dimensional frames, we can stack them
# into windows of length 4 as follows:
#
# data windows
# 0 A B A B C D E F G H
# 1 C D C D E F G H I J
# 2 E F E F G H I J K L
# 3 G H G H I J K L M N
# 4 I J I J K L M N O P
# 5 K L K L M N O P Q R
# 6 M N M N O P Q R S T
# 7 O P
# 8 Q R
# 9 S T
#
# this is more or less like a convolution with a sliding rectangular window.
# this stacked data matrix is the one we'll want to fill in the SVT process
# below.
pos = np.concatenate(
[position[i:len(df)-(window-1-i)] for i in range(window)], axis=1)
vis = np.concatenate(
[visible[i:len(df)-(window-1-i)] for i in range(window)], axis=1)
data_norm = (vis * pos * pos).sum()
logging.info('processing windowed data %s: norm %s', pos.shape, data_norm)
assert np.isfinite(data_norm)
return pos, vis, data_norm
def update(df, prediction, window, only_dropouts=True):
'''Update a stacked data frame using predicted marker positions.
Parameters
----------
df : pd.DataFrame
Stacked data frame to update with predicted marker locations.
prediction : ndarray
Array of predicted marker locations.
window : int
Window size. Windows will be unstacked and averaged before updating.
only_dropouts : bool
If True (default), only fill in values for dropouts in the original data
frame. If False, replace all values in the original frame with
predictions.
'''
# above, we created <window> duplicates of our data, each offset by 1 frame,
# and stacked (along axis 1) into a big ol matrix. in effect, we have
# <window> copies of each frame; here, we unpack these duplicates, remove
# their offsets, take the average, and put them back in the linear frame.
w = window - 1
cols = df.columns
rows = pd.MultiIndex(
levels=df.index.levels, labels=[x[w:-w] for x in df.index.labels])
parts = np.split(prediction, window, axis=1)
mean = np.mean([p[w - j:len(p) - j] for j, p in enumerate(parts)], axis=0)
if only_dropouts:
df.update(df.loc[rows, cols].fillna(
pd.DataFrame(mean, index=rows, columns=cols)))
else:
df.loc[rows, cols] = mean
def main(fill, args, *fill_args):
for _, ts in Experiment(args.root).by_subject(args.pattern):
ts = list(ts)
for t in ts:
t.load()
t.mask_dropouts()
try:
fill([t.df for t in ts], *fill_args)
except Exception as e:
logging.exception('error filling dropouts!')
continue
[t.save(t.root.replace(args.root, args.output)) for t in ts]
| mit |
negrinho/deep_architect | setup.py | 1 | 2982 | from setuptools import setup, find_packages
print(find_packages())
long_description = """
DeepArchitect is an architecture search framework with a focus on modularity,
extensibility, composability, and ease of use.
DeepArchitect uses composable and modular operators to express search
spaces over computational graphs that are then passed to search algorithms that
sample architectures from them with the goal of maximizing a desired performance
metric.
We aim to impact the workflows of researchers and practitioners with DeepArchitect.
For researchers, DeepArchitect aims to make architecture search research more
reusable and reproducible by providing them with a modular framework that they
can use to implement new search algorithms and new search spaces while reusing
a large amount of existing code.
For practicioners, DeepArchitect aims to augment their workflow by providing them
with a tool that allows them to easily write a search space encoding the large
number of choices involved in designing an architecture and use a search
algorithm automatically find an architecture in the search space.
DeepArchitect has the following **main components**:
* a language for writing composable and expressive search spaces over computational
graphs in arbitrary domains (e.g., Tensorflow, Keras, Pytorch, and even
non deep learning frameworks such as scikit-learn and preprocessing pipelines);
* search algorithms that can be used for arbitrary search spaces;
* logging functionality to easily keep track of the results of a search;
* visualization functionality to explore and inspect logging information resulting
from a search experiment.
"""
setup(
name='deep_architect',
version='0.1.0',
description=
"DeepArchitect: Architecture search so easy that you'll think it's magic!",
long_description=long_description,
url='https://github.com/negrinho/deep_architect',
long_description_content_type='text/markdown',
keywords=[
'architecture search',
'framework',
'deep learning',
'pytorch',
'tensorflow',
],
license='MIT',
author='Renato Negrinho',
author_email='[email protected]',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(include=["deep_architect*"]),
python_requires=">=3.6",
install_requires=[
'numpy',
'scipy',
"scikit-learn",
"tensorflow==1.15",
"torch>=1.2",
"keras>=2.3",
"matplotlib",
"graphviz"
],
extras_require={
"docs": ["sphinx"
"sphinx_rtd_theme"],
"viz": ['matplotlib', 'graphviz'],
"explorer": ["dash==1.0.1", "dash-daq==0.1.0"]
})
| mit |
Messaoud-Boudjada/dipy | doc/examples/reconst_dsi_metrics.py | 13 | 4539 | """
===============================
Calculate DSI-based scalar maps
===============================
We show how to calculate two DSI-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your dataset.
First import the necessary modules:
"""
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi
from dipy.reconst.dsi import DiffusionSpectrumModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.get_affine()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model and apply it to the data.
"""
dsmodel = DiffusionSpectrumModel(gtab, qgrid_size=35, filter_width=18.5)
"""
Lets just use one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Normalize the signal by the b0
"""
dataslice = dataslice / (dataslice[..., 0, None]).astype(np.float)
"""
Calculate the return to origin probability on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = dsmodel.fit(dataslice).rtop_signal()
"""
Now we calculate the return to origin probability on the propagator,
that corresponds to its central value.
By default the propagator is divided by its sum in order to obtain a properly normalized pdf,
however this normalization changes the values of rtop, therefore in order to compare it
with the rtop previously calculated on the signal we turn the normalized parameter to false.
"""
print('Calculating... rtop_pdf')
rtop_pdf = dsmodel.fit(dataslice).rtop_pdf(normalized=False)
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Leaving the normalized parameter to the default changes the values of the
rtop but not the contrast between the voxels.
"""
print('Calculating... rtop_pdf_norm')
rtop_pdf_norm = dsmodel.fit(dataslice).rtop_pdf()
"""
Let's calculate the mean square displacement on the normalized propagator.
"""
print('Calculating... msd_norm')
msd_norm = dsmodel.fit(dataslice).msd_discrete()
"""
Turning the normalized parameter to false makes it possible to calculate
the mean square displacement on the propagator without normalization.
"""
print('Calculating... msd')
msd = dsmodel.fit(dataslice).msd_discrete(normalized=False)
"""
Show the rtop images and save them in rtop.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf_norm')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='rtop_pdf')
ax3.set_axis_off()
ind = ax3.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('rtop.png')
"""
.. figure:: rtop.png
:align: center
**Return to origin probability**.
Show the msd images and save them in msd.png.
"""
fig = plt.figure(figsize=(7, 3))
ax1 = fig.add_subplot(1, 2, 1, title='msd_norm')
ax1.set_axis_off()
ind = ax1.imshow(msd_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(1, 2, 2, title='msd')
ax2.set_axis_off()
ind = ax2.imshow(msd.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('msd.png')
"""
.. figure:: msd.png
:align: center
**Mean square displacement**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et al., "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et al., "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008
.. include:: ../links_names.inc
"""
| bsd-3-clause |
iagapov/ocelot | cpbd/track.py | 1 | 19638 | __author__ = 'Sergey Tomin'
from ocelot.cpbd.optics import *
from ocelot.cpbd.beam import *
#from mpi4py import MPI
from numpy import delete, array, linspace, sqrt
from ocelot.cpbd.errors import *
from ocelot.cpbd.elements import *
from time import time
from scipy.stats import truncnorm
from copy import copy, deepcopy
import sys
try:
from scipy.signal import argrelextrema
extrema_chk = 1
except:
extrema_chk = 0
#c0=299792458
#E_ele_eV=5.109986258350895e+05
def aperture_limit(lat, xlim = 1, ylim = 1):
tws=twiss(lat, Twiss(), nPoints=1000)
bxmax = max([tw.beta_x for tw in tws])
bymax = max([tw.beta_y for tw in tws])
bx0 = tws[0].beta_x
by0 = tws[0].beta_y
px_lim = float(xlim)/np.sqrt(bxmax*bx0)
py_lim = float(ylim)/np.sqrt(bymax*by0)
xlim = float(xlim)*np.sqrt(bx0/bxmax)
ylim = float(ylim)*np.sqrt(by0/bymax)
return xlim, ylim, px_lim, py_lim
def arg_peaks(data, extrema_chk = extrema_chk):
"""
the function search peaks of spectrum and return positions of all peaks
if extrema_chk == 1 uses numpy module
if extrema_chk == 0 uses independent code (see below)
"""
if extrema_chk == 1:
return argrelextrema(data, np.greater)[0]
else:
diff_y = np.diff(data)
extrm_y = np.diff(np.sign(diff_y))
return np.where(extrm_y<0)[0]+1
def spectrum(data1D):
"""
input: 1D sample data
output: frequency and fourier transform
"""
len_data1D = len(data1D)
ft = np.abs(np.fft.fft(data1D))
ft_shift = np.fft.fftshift(ft)
freq = np.fft.fftshift(np.fft.fftfreq(len_data1D))
return freq, ft_shift
def find_nearest(positions, value):
"""
input: 1D array and value
the function searches nearest value in the array to the given value
"""
idx = (np.abs(positions-value)).argmin()
return positions[idx]
def find_highest(sorted_posns, value, diap):
"""
input: 1D array and value
the function searches highest value in the array to the given value
"""
poss = []
for pos in sorted_posns:
if value-diap<=pos<=value+diap:
poss.append(pos)
#print poss
return poss[-1]
#idx = (np.abs(sorted_posns-value)).argmin()
#return sorted_posns[idx]
def nearest_particle(track_list, xi,yi):
#x_array = np.unique(np.sort(map(lambda pxy: pxy.x, pxy_list)))
y_array = np.unique(np.sort([pxy.y for pxy in track_list]))
yi = find_nearest(y_array, yi)
x_array_i = []
for pxy in track_list:
if pxy.y == yi:
x_array_i.append(pxy.x)
xi = find_nearest(array(x_array_i), xi)
#print "inside nearest_particle, xi, yi : ", xi, yi
for pxy in track_list:
#print "inside nearest_particle: ", pxy.x, pxy.y
if pxy.x == xi and pxy.y == yi:
return pxy
def harmonic_position(data1D, nu = None, diap = 0.1, nearest = False):
"""
function searches three highest harmonics and return:
a. the highest if nu == None
b. the nearest harmonics to the nu (if nu != None)
"""
freq, ft_shift = spectrum(data1D)
ft_maxi = arg_peaks(ft_shift)
if len(ft_maxi) == 0:
return -0.001
freq_peaks = freq[ft_maxi][int(len(ft_maxi)/2):]
peaks = ft_shift[ft_maxi][int(len(ft_maxi)/2):]
main_3 = freq_peaks[np.argsort(peaks)]
if nearest:
return find_nearest(main_3, nu)
if nu == None:
return main_3[-1]
if diap == None:
main_3 = main_3[-5:]
nearest_nu = find_nearest(main_3, nu)
else:
nearest_nu = find_highest(main_3, nu, diap)
return nearest_nu
def freq_analysis(track_list, lat, nturns, harm = True, diap = 0.10, nearest = False, nsuperperiods = 1):
def beta_freq(lat):
tws = twiss(lat, Twiss())
nux = tws[-1].mux/2./pi*nsuperperiods
nuy = tws[-1].muy/2./pi*nsuperperiods
print ("freq. analysis: Qx = ",nux, " Qy = ", nuy)
nux = abs(int(nux+0.5) - nux)
nuy = abs(int(nuy+0.5) - nuy)
print ("freq. analysis: nux = ", nux)
print ("freq. analysis: nuy = ", nuy)
return nux, nuy
nux, nuy = None, None
if harm == True:
nux, nuy = beta_freq(lat)
#fma(pxy_list, nux = nux, nuy = nuy)
for n, pxy in enumerate(track_list):
if pxy.turn == nturns-1:
if len(pxy.p_list) == 1:
#print len(pxy.p_list)
print ("For frequency analysis coordinates are needed for each turns. Check tracking option 'save_track' must be True ")
return track_list
x = [p[0] for p in pxy.p_list]
y = [p[2] for p in pxy.p_list]
pxy.mux = harmonic_position(x, nux, diap, nearest)
pxy.muy = harmonic_position(y, nuy, diap, nearest)
return track_list
class Track_info:
def __init__(self, particle, x=0., y=0.):
self.particle = particle
self.turn = 0
self.x = particle.x #initail coordinate
self.y = particle.y #initail coordinate
#self.x_array = [p.x]
#self.y_array = [p.y]
self.mux = -0.001
self.muy = -0.001
self.p_list = [[particle.x, particle.px, particle.y, particle.py, particle.tau, particle.p]]
def get_x(self):
return np.array([p[0] for p in self.p_list])
def get_xp(self):
return np.array([p[1] for p in self.p_list])
def get_y(self):
return np.array([p[2] for p in self.p_list])
def get_yp(self):
return np.array([p[3] for p in self.p_list])
def contour_da(track_list, nturns, lvl = 0.9):
"""
the function defines contour of DA. If particle "lived" > lvl*nturns then we set up nturns
if particle "lived" < lvl*nturns then we set up 0
"""
if lvl>1:
lvl = 1
elif lvl<=0:
lvl = 1
ctr_da = []
for pxy in track_list:
if pxy.turn >= lvl*(nturns-1):
ctr_da.append(nturns)
else:
ctr_da.append(0)
return np.array(ctr_da)
def stable_particles(track_list, nturns):
pxy_list_sbl = []
for pxy in track_list:
if pxy.turn >= nturns-1:
pxy_list_sbl.append(pxy)
return np.array(pxy_list_sbl)
def phase_space_transform(x,y, tws):
"""
curved line of second order
a11*x**2 + a22*y**2 + 2*a12*x*y + 2*a13*x + 2*a23*y + a33 = 0
gamma*x**2 + 2*alpha*x*x' + beta*x'**2 = const
"""
angle = np.arctan(2*tws.alpha_x/(tws.gamma_x-tws.beta_x))/2.
x = x*np.cos(angle) - y*np.sin(angle)
y = x*np.sin(angle) + y*np.cos(angle)
return x,y
def create_track_list(x_array, y_array, p_array, energy=0.):
"""
the function create list of Pxy
"""
track_list = []
for p in p_array:
for y in (y_array):
for x in (x_array):
particle = Particle(x=x, y=y, p=p, E=energy)
pxy = Track_info(particle, x, y)
track_list.append(pxy)
return track_list
def ellipse_track_list(beam, n_t_sigma = 3, num = 1000, type = "contour"):
beam.sizes()
#sigma_x = sqrt((sigma_e*tws0.Dx)**2 + emit*tws0.beta_x)
#sigma_xp = sqrt((sigma_e*tws0.Dxp)**2 + emit*tws0.gamma_x)
if type == "contour":
t = linspace(0,2*pi, num)
x = n_t_sigma*beam.sigma_x*np.cos(t)
y = n_t_sigma*beam.sigma_xp*np.sin(t)
else:
x = truncnorm( -n_t_sigma, n_t_sigma, loc=0, scale=beam.sigma_x).rvs(num)
y = truncnorm( -n_t_sigma, n_t_sigma, loc=0, scale=beam.sigma_xp).rvs(num)
tws0 = Twiss(beam)
x_array, xp_array = phase_space_transform(x,y, tws0)
track_list = []
for x,y in zip(x_array + beam.x, xp_array + beam.xp):
p = Particle(x = x, px = y, p=-0.0)
pxy = Track_info(p, x, y)
track_list.append(pxy)
return track_list
def track_nturns(lat, nturns, track_list, nsuperperiods=1, save_track=True):
xlim, ylim, px_lim, py_lim = aperture_limit(lat, xlim = 1, ylim = 1)
navi = Navigator()
t_maps = get_map(lat, lat.totalLen, navi)
track_list_const = copy(track_list)
p_array = ParticleArray()
p_list = [p.particle for p in track_list]
p_array.list2array(p_list)
for i in range(nturns):
print(i)
for n in range(nsuperperiods):
for tm in t_maps:
tm.apply(p_array)
p_indx = p_array.rm_tails(xlim, ylim, px_lim, py_lim)
track_list = delete(track_list, p_indx)
for n, pxy in enumerate(track_list):
pxy.turn = i
if save_track:
pxy.p_list.append(p_array.rparticles[:, n])
return np.array(track_list_const)
'''
def tracking_second(lat, nturns, track_list, nsuperperiods, save_track = True):
xlim, ylim, px_lim, py_lim = aperture_limit(lat, xlim = 1, ylim = 1)
navi = Navigator()
#t_maps, delta_e = get_map(lat, lat.totalLen, navi)
track_list_const = copy(track_list)
#p_array = ParticleArray(n = len(track_list))
#for i, pxy in enumerate(track_list):
# p_array[i] = pxy.p
for i in range(nturns):
print(i)
for n in range(nsuperperiods):
for elem in lat.sequence:
for pxy in track_list:
pxy.p = elem.transfer_map*pxy.p
x = [pxy.p.x for pxy in track_list]
px = [pxy.p.px for pxy in track_list]
y = [pxy.p.y for pxy in track_list]
py = [pxy.p.py for pxy in track_list]
#p_indx = p_array.rm_tails(xlim, ylim, px_lim, py_lim)
ind_angles = append(argwhere(px > px_lim), argwhere(py > py_lim))
p_idxs = unique(append(argwhere(x > xlim), append(argwhere(y > ylim), append(argwhere(x != x), append(argwhere(y!= y), ind_angles)) )))
track_list = delete(track_list, p_idxs)
for n, pxy in enumerate(track_list):
pxy.turn = i
pxy.p_list = append(pxy.p_list,pxy.p)
#if save_track:
# pxy.p_list.append(p_array.particles[n*6:n*6+6])
return np.array(track_list_const)
'''
def track_nturns_mpi(mpi_comm, lat, nturns, track_list, errors = None, nsuperperiods = 1, save_track = True):
size = mpi_comm.Get_size()
rank = mpi_comm.Get_rank()
lat_copy = create_copy(lat, nsuperperiods = nsuperperiods)
nsuperperiods = 1
if errors != None:
if rank == 0:
lat, errors = errors_seed(lat_copy, errors)
else:
errors = None
errors = mpi_comm.bcast(errors, root=0)
for i, elem in enumerate(lat_copy.sequence):
elem.dx = errors[0][i]
elem.dy = errors[1][i]
elem.dtilt = errors[2][i]
lat = MagneticLattice(lat_copy.sequence, method=lat_copy.method)
if size == 1:
# it is made to prevent memory crash in mpi_comm.gather() for one-tread case and for case of big pxy_list
# (for instance, number of pxy in the list - nx*ny = 120*60 and nturns = 1000 (nturns means length of pxy class))
# for instance, for case nturns = 500 is all ok
# but for nturns = 1000 program crashes with error in mpi_comm.gather()
# the same situation if treads not so much - solution increase number of treads.
print("nsuperperiods = ", nsuperperiods)
track_list = track_nturns(lat, nturns, track_list, nsuperperiods, save_track=save_track)
return track_list
if rank == 0:
# dividing data into chunks
chunks_track_list = [[] for _ in range(size)]
N = len(track_list)
for i, x in enumerate(track_list):
chunks_track_list[int(size*i/N)].append(x)
else:
track_list = None
chunks_track_list = None
start = time()
track_list = mpi_comm.scatter(chunks_track_list, root=0)
print(" scatter time = ", time() - start, " sec, rank = ", rank, " len(pxy_list) = ", len(track_list) )
start = time()
track_list = track_nturns(lat, nturns, track_list, nsuperperiods, save_track =save_track)
print( " scanning time = ", time() - start, " sec, rank = ", rank)
start = time()
out_track_list = mpi_comm.gather(track_list, root=0)
print(" gather time = ", time() - start, " sec, rank = ", rank)
if rank == 0:
start = time()
track_list = []
for i, chank in enumerate(out_track_list):
for pxy in chank:
track_list.append(pxy)
print(" time exec = ", time() - start)
return track_list
def fma(lat, nturns, x_array, y_array, nsuperperiods = 1):
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
rank = mpi_comm.Get_rank()
track_list = create_track_list(x_array, y_array)
track_list = track_nturns_mpi(mpi_comm, lat, nturns, track_list, errors = None, nsuperperiods = nsuperperiods)
if rank == 0:
nx = len(x_array)
ny = len(y_array)
ctr_da = contour_da(track_list, nturns)
#ctr_da = tra.countour_da()
track_list = freq_analysis(track_list, lat, nturns, harm = True)
da_mux = array(map(lambda pxy: pxy.mux, track_list))
da_muy = array(map(lambda pxy: pxy.muy, track_list))
return ctr_da.reshape(ny,nx), da_mux.reshape(ny,nx), da_muy.reshape(ny,nx)
def da_mpi(lat, nturns, x_array, y_array, errors = None, nsuperperiods = 1):
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
rank = mpi_comm.Get_rank()
track_list = create_track_list(x_array, y_array)
track_list = track_nturns_mpi(mpi_comm, lat, nturns, track_list, errors = errors, nsuperperiods = nsuperperiods, save_track=False)
if rank == 0:
da = array(map(lambda track: track.turn, track_list))#.reshape((len(y_array), len(x_array)))
nx = len(x_array)
ny = len(y_array)
return da.reshape(ny, nx)
def tracking_step(lat, particle_list, dz, navi):
"""
tracking for a fixed step dz
:param lat: Magnetic Lattice
:param particle_list: ParticleArray or Particle list
:param dz: step in [m]
:param navi: Navigator
:return: None
"""
if navi.z0 + dz > lat.totalLen:
dz = lat.totalLen - navi.z0
t_maps = get_map(lat, dz, navi)
for tm in t_maps:
start = time()
tm.apply(particle_list)
logger.debug("tm: l="+ str(tm.length) +" class=" + tm.__class__.__name__ + " \n"
"tracking_step -> apply: time exec = " + str(time() - start) + " sec")
return
def track(lattice, p_array, navi, print_progress=True, calc_tws=True):
"""
tracking through the lattice
:param lattice: Magnetic Lattice
:param p_array: ParticleArray
:param navi: Navigator
:return: twiss list, ParticleArray
"""
tw0 = get_envelope(p_array) if calc_tws else Twiss()# get_envelope(p_array)
#print(tw0)
tws_track = [tw0]
L = 0.
while np.abs(navi.z0 - lattice.totalLen) > 1e-10:
dz, proc_list = navi.get_next()
tracking_step(lat=lattice, particle_list=p_array, dz=dz, navi=navi)
for p in proc_list:
p.z0 = navi.z0
p.apply(p_array, dz)
tw = get_envelope(p_array) if calc_tws else Twiss()
L += dz
tw.s += L
tws_track.append(tw)
if print_progress:
poc_names = [p.__class__.__name__ for p in proc_list]
sys.stdout.write( "\r" + "z = " + str(navi.z0)+" / "+str(lattice.totalLen) + " : applied: " + ", ".join(poc_names) )
sys.stdout.flush()
return tws_track, p_array
def lattice_track(lat, p):
plist = [copy(p)]
for elem in lat.sequence:
elem.transfer_map.apply([p])
#print(p)
if not (elem.__class__ in [Bend, RBend, SBend] and elem.l != 0.): #, "hcor", "vcor"
if elem.__class__ == Edge:
#print elem.pos
if elem.pos == 1:
continue
plist.append(copy(p))
return plist
def merge_drifts(lat):
print( "before merging: len(sequence) = ", len(lat.sequence) )
L = 0.
seq = []
new_elem = None
for elem in lat.sequence:
#next_elem = lat.sequence[i+1]
if elem.__class__ == Drift:
L += elem.l
new_elem = Drift(l=L, eid=elem.id)
else:
if new_elem != None:
seq.append(new_elem)
L = 0.
new_elem = None
seq.append(elem)
if new_elem != None:
seq.append(new_elem)
print( "after merging: len(sequence) = ", len(seq) )
return MagneticLattice(sequence=seq)
"""
def show_da(out_da, x_array, y_array):
from matplotlib import pyplot as plt
from numpy import linspace, max, min
#print "time execution = ", time() - start , " s"
nx = len(x_array)
ny = len(y_array)
#print(nx, ny, len(out_da))
out_da = out_da.reshape(ny,nx)
xmin, xmax, ymin, ymax = min(x_array), max(x_array), min(y_array), max(y_array)
#plt.subplot(111, axisbg='darkslategray')
extent = xmin, xmax, ymin, ymax
#print extent
#plt.savetxt("da.txt", da)
plt.figure(figsize=(10, 7))
fig1 = plt.contour(out_da, linewidths=2,extent = extent)#, colors = 'r')
#fig1 = plt.contourf(out_da, 20,cmap=plt.cm.rainbow,extent = extent)#, colors = 'r')
#plt.axis_bgcolor("#bdb76b")
plt.grid(True)
plt.xlabel("X, m")
plt.ylabel("Y, m")
cb = plt.colorbar()
cb.set_label('Nturns')
#cb.ax.set_yticklabels(map(str, linspace(min(out_da), max(out_da), 5) ))
#plt.savefig('da_error_'+str(int(np.random.rand()*100))+'.png')
plt.show()
def show_mu(contour_da, mux, muy, x_array, y_array, zones = None ):
from matplotlib import pyplot as plt
nx = len(x_array)
ny = len(y_array)
t= linspace(0,3.14, num = 100)
contour_da = contour_da.reshape(ny,nx)
mux = mux.reshape(ny,nx)
muy = muy.reshape(ny,nx)
xmin, xmax, ymin, ymax = min(x_array), max(x_array), min(y_array), max(y_array)
plt.figure(1,figsize=(10, 7)) #axisbg='darkslategray'
extent = xmin, xmax, ymin, ymax
my_cmap = plt.cm.Paired
#my_cmap.set_under('w')
#norm = mlb.colors.Normalize(vmin=-0.005, vmax=max(mux))
fig1 = plt.contour(contour_da, 1,extent = extent, linewidths=2,colors='k')#, colors = 'r')
fig1 = plt.contourf(mux,40, cmap=my_cmap, extent = extent)#, colors = 'r')
cb = plt.colorbar(cmap=my_cmap)
fig1 = plt.contourf(mux,10, levels=[-1,-.0001], colors='w',extent = extent)
if zones != None:
x_zone = zones[0]
y_zone = zones[1]
plt.plot(x_zone*cos(t), y_zone*sin(t), "g", lw = 2)
plt.plot(2*x_zone*cos(t), 2*y_zone*sin(t), "b", lw = 2)
plt.plot(3*x_zone*cos(t), 3*y_zone*sin(t), "r", lw = 2)
plt.plot(4*x_zone*cos(t), 4*y_zone*sin(t), "y", lw = 2)
plt.grid(True)
#plt.figure(figsize=(10, 7))
plt.xlabel("X, m")
plt.ylabel("Y, m")
cb.set_label('Qx')
plt.figure(2,figsize=(10, 7))
fig1 = plt.contour(contour_da, 1,extent = extent, linewidths=2,colors='k')#, colors = 'r')
fig1 = plt.contourf(muy,40, cmap=my_cmap, extent = extent)#, colors = 'r')
if zones != None:
x_zone = zones[0]
y_zone = zones[1]
plt.plot(x_zone*cos(t), y_zone*sin(t), "g", lw = 2)
plt.plot(2*x_zone*cos(t), 2*y_zone*sin(t), "b", lw = 2)
plt.plot(3*x_zone*cos(t), 3*y_zone*sin(t), "r", lw = 2)
plt.plot(4*x_zone*cos(t), 4*y_zone*sin(t), "y", lw = 2)
#x = np.linspace(-, 0.01, 0.0001)
#plt.plot()
cb = plt.colorbar(cmap=my_cmap)
fig1 = plt.contourf(muy,10, levels=[-1,-.0001], colors='w',extent = extent)
plt.xlabel("X, m")
plt.ylabel("Y, m")
plt.grid(True)
cb.set_label('Qy')
plt.show()
"""
| gpl-3.0 |
ChanderG/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
LenzDu/Kaggle-Competition-Sberbank | lightGBM.py | 1 | 1566 | # author: vrtjso
import numpy as np
import pandas as pd
import lightgbm as lgb
import gc
from sklearn.preprocessing import StandardScaler
from Utils import CreateOutput
trainDf = pd.read_csv('train_featured.csv')
Xtrain = trainDf.drop(['price_doc','w'],1)
w = trainDf.w.values
Ytrain = trainDf.price_doc
# scaler = StandardScaler().fit(Ytrain)
# Ytrain = scaler.transform(Ytrain)
Ytrain = Ytrain * 0.0000001
train = lgb.Dataset(Xtrain, Ytrain, weight=w)
del Xtrain, Ytrain; gc.collect()
#min CV on 0.1(new): 2350 (num_leave:15, min_data:30)
#min CV 0.1 normalized: 0.511 (num_leave:15, min_data:30)
params = {'objective':'regression','metric':'rmse',
'learning_rate':0.1,'max_depth':-1,'sub_feature':0.7,'sub_row':1,
'num_leaves':15,'min_data':30,'max_bin':20,
'bagging_fraction':0.9,'bagging_freq':40,'verbosity':-1}
lgbcv = lgb.cv(params, train, 10000, nfold=6, early_stopping_rounds=50,
verbose_eval=50, show_stdv=False)['rmse-mean']
print('The final CV score:', lgbcv[-1])
# best_round = len(lgbcv)
# bst = lgb.train(params, train, best_round)
# fs = bst.feature_importance()
# f_name = bst.feature_name()
# f = dict()
# for i in range(0,len(fs)):
# f[f_name[i]]=fs[i]
# Xtest = pd.read_csv('test_featured.csv')
# prediction = bst.predict(Xtest) * 10000000
# # prediction = scaler.inverse_transform(prediction)
# output = pd.read_csv('test.csv')
# output = output[['id']]
# output['price_doc'] = prediction
# output.to_csv(r'Ensemble\Submission_lgb.csv',index=False)
| mit |
ironmussa/Optimus | tests/creator/creator-profiler.py | 1 | 4785 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # This notebook create the tests in python code. All this cells must be run to executed the tests
# %load_ext autoreload
# %autoreload 2
# + {"outputHidden": false, "inputHidden": false}
import sys
sys.path.append("../..")
# -
from optimus import Optimus
from optimus.helpers.test import Test
op = Optimus(master='local', verbose=True)
# +
import pandas as pd
from pyspark.sql.types import *
from datetime import date, datetime
cols = [
("names", "str"),
("height(ft)", ShortType()),
("function", "str"),
("rank", ByteType()),
("age", "int"),
("weight(t)", "float"),
"japanese name",
"last position seen",
"date arrival",
"last date seen",
("attributes", ArrayType(FloatType())),
("Date Type", DateType()),
("timestamp", TimestampType()),
("Cybertronian", BooleanType()),
("function(binary)", BinaryType()),
("NullType", NullType())
]
rows = [
("Optimus", -28, "Leader", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10",
"2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True, bytearray("Leader", "utf-8"),
None),
("bumbl#ebéé ", 17, "Espionage", 7, 5000000, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", "1980/04/10",
"2015/08/10", [5.334, 2000.0], date(2015, 8, 10), datetime(2014, 6, 24), True, bytearray("Espionage", "utf-8"),
None),
("ironhide&", 26, "Security", 7, 5000000, 4.0, ["Roadbuster"], "37.789563,-122.400356", "1980/04/10",
"2014/07/10", [7.9248, 4000.0], date(2014, 6, 24), datetime(2014, 6, 24), True, bytearray("Security", "utf-8"),
None),
("Jazz", 13, "First Lieutenant", 8, 5000000, 1.80, ["Meister"], "33.670666,-117.841553", "1980/04/10",
"2013/06/10", [3.9624, 1800.0], date(2013, 6, 24), datetime(2014, 6, 24), True,
bytearray("First Lieutenant", "utf-8"), None),
("Megatron", None, "None", 10, 5000000, 5.70, ["Megatron"], None, "1980/04/10", "2012/05/10", [None, 5700.0],
date(2012, 5, 10), datetime(2014, 6, 24), True, bytearray("None", "utf-8"), None),
("Metroplex_)^$", 300, "Battle Station", 8, 5000000, None, ["Metroflex"], None, "1980/04/10", "2011/04/10",
[91.44, None], date(2011, 4, 10), datetime(2014, 6, 24), True, bytearray("Battle Station", "utf-8"), None),
(None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None),
]
source_df = op.create.df(cols ,rows)
source_df.table()
# -
# ### End Init Section
# ## Profiler
from pyspark.ml.linalg import Vectors
import re
a="a\'a"
re.escape(a)
print(a)
t = Test(op, source_df, "df_profiler", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector",
"import numpy as np",
"nan = np.nan",
"import datetime",
"from pyspark.sql import functions as F",
"from optimus.profiler.profiler import Profiler",
"null = None",
"true = True",
"p= Profiler()"], path = "df_profiler", final_path="..")
# +
from pyspark.sql import functions as F
def func(col_name, attrs):
return F.col(col_name) * 2
numeric_col = "height(ft)"
numeric_col_B = "rank"
numeric_col_C = "rank"
string_col = "function"
date_col = "date arrival"
date_col_B = "last date seen"
new_col = "new col"
array_col = "attributes"
# -
from optimus.profiler.profiler import Profiler
p= Profiler()
p.run(source_df, "*")
t.create(p, "dataset", None, 'json', None, source_df,"*")
t.run()
mismatch = {"names":"dd/mm/yyyy","height(ft)":r'^([0-2][0-9]|(3)[0-1])(\/)(((0)[0-9])|((1)[0-2]))(\/)\d{4}$',"function":"yyyy-mm-dd"}
t.create(p, "dataset", "mismatch", 'json', None, source_df,"*", mismatch=mismatch)
t.run()
t.create(p, "columns_stats", None, 'json', None, source_df,"*")
t.run()
t.create(p, "columns_agg", None, 'json', None, source_df,"*")
t.run()
a = "{'name'=a'a}"
print(a)
import json
json.dumps("{'name'=a'a}")
from optimus.profiler.profiler import Profiler
op.profiler.run(source_df, "*")
source_df.cols.range("height(ft)")
| apache-2.0 |
ZhangXinNan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 46 | 13101 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
kdebrab/pandas | pandas/io/formats/latex.py | 4 | 9407 | # -*- coding: utf-8 -*-
"""
Module for formatting output data in Latex.
"""
from __future__ import print_function
import numpy as np
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import TableFormatter
class LatexFormatter(TableFormatter):
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
longtable : boolean, default False
Use a longtable environment instead of tabular.
See Also
--------
HTMLFormatter
"""
def __init__(self, formatter, column_format=None, longtable=False,
multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
.format(name=type(self.frame).__name__,
col=self.frame.columns,
idx=self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False, sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names, na_rep=self.fmt.na_rep
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]]
out = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[' ' * len(i[-1])] * clevels + i for i in out]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else '{}' for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, '
'not {typ}'.format(typ=type(column_format)))
if not self.longtable:
buf.write('\\begin{{tabular}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
else:
buf.write('\\begin{{longtable}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next '
'page}}}} \\\\\n'.format(n=len(row)))
buf.write('\\midrule\n')
buf.write('\\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.fmt.kwds.get('escape', True):
# escape backslashes first
crow = [(x.replace('\\', '\\textbackslash ')
.replace('_', '\\_')
.replace('%', '\\%').replace('$', '\\$')
.replace('#', '\\#').replace('{', '\\{')
.replace('}', '\\}').replace('~', '\\textasciitilde ')
.replace('^', '\\textasciicircum ')
.replace('&', '\\&')
if (x and x != '{}') else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
crow = ['\\textbf{{{x}}}'.format(x=x)
if j < ilevels and x.strip() not in ['', '{}'] else x
for j, x in enumerate(crow)]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if (i >= nlevels and self.fmt.index and self.multirow and
ilevels > 1):
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
def _format_multicolumn(self, row, ilevels):
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = list(row[:ilevels])
ncol = 1
coltext = ''
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'
.format(ncol=ncol, fmt=self.multicolumn_format,
txt=coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
for c in row[ilevels:]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row, ilevels, i, rows):
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(ilevels):
if row[j].strip():
nrow = 1
for r in rows[i + 1:]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format(
nrow=nrow, row=row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'
.format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
| bsd-3-clause |
pbrusco/ml-eeg | ml/visualizations.py | 1 | 10056 | # coding: utf-8
from . import signal_processing
from . import data_import
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
import seaborn as sns
from IPython.display import display
import math
from sklearn import metrics
import numpy as np
import mne
def plot_roc_curve(y_actual, y_scores, ax, is_permutation, fontsize=30):
fpr, tpr, thresholds = metrics.roc_curve(y_actual, y_scores)
roc_auc = metrics.auc(fpr, tpr)
if is_permutation:
ax.plot(fpr, tpr, "b-", alpha=0.5, lw=0.3)
else:
ax.plot([0, 1], [0, 1], '--', lw=6, color="k", label='Chance')
ax.plot(fpr, tpr, "g-", alpha=1, lw=8, label='ROC (area = {})'.format("{:.3f}".format(roc_auc)))
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
plt.tick_params(axis='both', which='major', labelsize=30)
ax.set_xlabel('False Positive Rate', fontsize=fontsize)
ax.set_ylabel('True Positive Rate', fontsize=fontsize)
def subject_table(data):
table = data.reindex_axis(["session",
"experiment",
"auc",
"auc_p_val",
"acc",
"acc_p_val",
"support",
], axis=1)
if not table.empty:
s = table.style\
.applymap(_set_color, subset=["auc_p_val", "acc_p_val"])\
.applymap(_bold, subset=["session"])
display(s)
def _set_color(val):
if math.isnan(val):
color = "rgba(255, 255, 255, 1)"
elif val < 0.05:
color = "rgba(216, 246, 206, 0.5)"
elif val <= 0.1:
color = "rgba(242, 245, 169, 0.5)"
else:
color = "rgba(245, 169, 169, 0.3)"
return 'background-color: %s;' % color
def _bold(val):
return 'font-weight: bold'
def _bar(val):
if val > 0.6:
return _bar_color(val, "green")
elif val > 0.4:
return _bar_color(val, "yellow")
else:
return _bar_color(val, "red")
def _bar_2(val):
return _bar_color(val, "blue")
def _bar_color(val, color):
base = 'width: 10em; height: 80%;'
attrs = (base + 'background: linear-gradient(90deg,rgba{c} {w}%, '
'transparent 0%)')
color = sns.light_palette(color, as_cmap=True)(val)
c = (int(color[0] * 255), int(color[1] * 255), int(color[2] * 255), 0.3)
return attrs.format(c=c, w=val * 100)
def plot_epochs_average(data, y_lim, tmin, window, freq, marks=[], ax=None, color="red", label="epochs mean"):
# Data shape: (samples, trial)
if not ax:
ax = plt.gca()
t0, tf = window
t0_frame = signal_processing.frame_at(t0, freq, tmin)
tf_frame = signal_processing.frame_at(tf, freq, tmin)
samples = data.shape[0]
for epoch_id in range(0, data.shape[1]):
c_plt, = ax.plot([t0 + (s / freq) for s in range(0, samples)], data[:, epoch_id], color=color, alpha=0.05)
epochs_mean = data[t0_frame:tf_frame, :].mean(1)
c2_plt, = ax.plot([t0 + (s / freq) for s in range(0, samples)], epochs_mean, color=color, linewidth=2.0, label=label)
set_plot(plt, y_lim, window, t0, tf, marks, ax)
draw_rectangle(plt, window, ax, label=None)
def plot_epochs_comparition(data_dict, y_lim, tmin, window, freq, marks, epochs_max_number):
t0, tf = window
t0_frame = signal_processing.frame_at(t0, freq, tmin)
tf_frame = signal_processing.frame_at(tf, freq, tmin)
plt.figure()
for condition in list(data_dict.keys()):
epochs_mean = data_dict[condition][:, t0_frame:tf_frame, 0:epochs_max_number].mean(2)
samples = epochs_mean.shape[1]
plt.plot([t0 + (s / freq) for s in range(0, samples)], epochs_mean.mean(0), linewidth=2.0, label=condition)
set_plot(plt, y_lim, window, t0, tf, marks, plt)
draw_rectangle(plt, window)
plt.legend()
def set_plot(plt, y_lim, window, t0, tf, marks, ax):
plt.ylim(y_lim)
plt.xlim(window)
# plt.xticks([x/1000.0 for x in range(-2000, 101, 100) if (x/1000.0)>=t0 and (x/1000.0)<=tf])
ax.axvline(marks[0], color="black")
ax.axvline(marks[1], color="black")
def draw_rectangle(plt, window, ax=None, label="stimulus"):
if not ax:
ax = plt.gca()
rect = patches.Rectangle((window[0], -40), width=-window[0] - 0.4, height=80,
color='grey',
alpha=0.5)
ax.add_patch(rect)
rect = patches.Rectangle((0, -40), width=window[1], height=80,
color='grey',
alpha=0.5)
ax.add_patch(rect)
rect = patches.Rectangle((-0.4, -40), width=0.4, height=80,
fill=None, edgecolor="black", label=label)
ax.add_patch(rect)
return rect
def barplot(session, data, measure):
plt.figure()
sns.set(style="white", context="talk", font_scale=1.0)
sns.despine(bottom=True)
no_perumutations = data[not data.experiment.str.endswith("permutation")]
session_data = no_perumutations[no_perumutations.session == session]
x = list(session_data.experiment)
y = list(session_data[measure])
hue = list(session_data.extraction_method)
permutations = list(session_data[measure + "_permutation"])
supports = list(session_data.support)
bar = sns.barplot(x=x,
y=y,
palette="Set1",
hue=hue,
data=session_data)
rectangles = []
for i, p in enumerate(bar.patches):
height = p.get_height()
xmin = p.get_x()
width = p.get_width()
bar.text(xmin, height + 0.01, '%1.2f' % (y[i]), fontsize=14)
rectangles.append(
patches.Rectangle(
(xmin, permutations[i]), # (x,y)
width, # width
0.01, # height
color="black",
)
)
bar.text(p.get_x() + 0.1, 0.1, "N={} ".format(supports[i]), rotation=90, fontsize=14, backgroundcolor="w")
for rectangle in rectangles:
bar.add_patch(rectangle)
plt.ylabel("AUC")
plt.ylim([0, 1])
plt.title("Session {}".format(session))
plt.tight_layout()
def feature_importances_by_window_size(features_table, title):
# https://gist.github.com/wmvanvliet/6d7c78ea329d4e9e1217
gr = sns.stripplot(x="starting_time", y="feature_importances_folds_mean", data=features_table, hue="window_size", palette="Set2")
gr.set_xticklabels(gr.get_xticklabels(), rotation=90)
gr.set_title(title)
plt.draw()
return gr.get_figure()
def topomap(values_by_time, montage_file, freq, cmap="Greys", fontsize=15, title=""):
montage = data_import.read_montage(montage_file)
vmin = values_by_time.feature_importances_folds_mean.min()
vmax = values_by_time.feature_importances_folds_mean.max()
# vmin, vmax = (0.0005, 0.0015)
l = mne.channels.make_eeg_layout(mne.create_info(montage.ch_names, freq, ch_types="eeg", montage=montage))
times = sorted(set(values_by_time.time))
fig, axes = plt.subplots(1, len(times), figsize=(3 * len(times), 5))
if not isinstance(axes, np.ndarray):
axes = np.array([axes])
[ax.axis('off') for ax in axes]
for top_n, (time, ax) in enumerate(zip(times, axes)):
time_data = values_by_time[values_by_time["time"] == time]
t = list(time_data.time)[0]
image, _ = mne.viz.plot_topomap(list(time_data["values"]), l.pos[:, 0:2], vmin=vmin, vmax=vmax, outlines="skirt", axes=ax, show_names=False, names=l.names, show=False, cmap=cmap)
if top_n == len(axes) - 1:
divider = make_axes_locatable(ax)
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax.set_title("{} ms".format(t), fontsize=fontsize)
fig.suptitle(title, fontsize=16)
plt.draw()
def lines(features_table, title=""):
plt.figure()
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
for idx, row in features_table.iterrows():
alpha = row.window_size / features_table.window_size.max()
plt.hlines(y=row.feature_importances_folds_mean, lw=5, alpha=alpha, xmin=row.starting_time, xmax=row.end_time)
plt.ylim([features_table.feature_importances_folds_mean.min(), features_table.feature_importances_folds_mean.max()])
plt.xlim([features_table.starting_time.min(), features_table.end_time.max()])
plt.draw()
def window_bars(features_table, title="", fontsize=20):
features_table.sort_values(["window_size", "starting_time"], ascending=False, inplace=True)
fig = plt.figure()
ax = fig.add_subplot(111)
cmap = matplotlib.cm.get_cmap('Greys')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
vmin, vmax = (features_table.feature_importances_folds_mean.min(), features_table.feature_importances_folds_mean.max())
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for idx, (_, row) in enumerate(features_table.iterrows()):
val = row.feature_importances_folds_mean
# plt.hlines(y=idx, lw=3, color=cmap(norm(val)), xmin=row.starting_time, xmax=row.end_time)
p = patches.Rectangle(
(row.starting_time, idx), # (x, y)
row.window_size, # width
1, # height
facecolor=cmap(norm(val)),
# edgecolor="blue"
)
ax.add_patch(p)
ax.set_title(title, fontsize=fontsize)
plt.xlim([features_table.starting_time.min(), features_table.end_time.max()])
plt.ylim([-1, len(features_table) + 2])
divider = make_axes_locatable(ax)
ax_colorbar = divider.append_axes('right', size='60%', pad=0.01)
img = plt.imshow(np.array([[vmin, vmax]]), cmap=cmap)
img.set_visible(False)
plt.colorbar(img, cax=ax_colorbar, orientation="vertical")
plt.draw()
| gpl-3.0 |
Scapogo/zipline | tests/calendars/test_nyse_calendar.py | 5 | 9411 | from unittest import TestCase
import pandas as pd
from .test_trading_calendar import ExchangeCalendarTestBase
from zipline.utils.calendars.exchange_calendar_nyse import NYSEExchangeCalendar
class NYSECalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = 'nyse'
calendar_class = NYSEExchangeCalendar
MAX_SESSION_HOURS = 6.5
def test_2012(self):
# holidays we expect:
holidays_2012 = [
pd.Timestamp("2012-01-02", tz='UTC'),
pd.Timestamp("2012-01-16", tz='UTC'),
pd.Timestamp("2012-02-20", tz='UTC'),
pd.Timestamp("2012-04-06", tz='UTC'),
pd.Timestamp("2012-05-28", tz='UTC'),
pd.Timestamp("2012-07-04", tz='UTC'),
pd.Timestamp("2012-09-03", tz='UTC'),
pd.Timestamp("2012-11-22", tz='UTC'),
pd.Timestamp("2012-12-25", tz='UTC')
]
for session_label in holidays_2012:
self.assertNotIn(session_label, self.calendar.all_sessions)
# early closes we expect:
early_closes_2012 = [
pd.Timestamp("2012-07-03", tz='UTC'),
pd.Timestamp("2012-11-23", tz='UTC'),
pd.Timestamp("2012-12-24", tz='UTC')
]
for early_close_session_label in early_closes_2012:
self.assertIn(early_close_session_label,
self.calendar.early_closes)
def test_special_holidays(self):
# 9/11
# Sept 11, 12, 13, 14 2001
self.assertNotIn(pd.Period("9/11/2001"), self.calendar.all_sessions)
self.assertNotIn(pd.Period("9/12/2001"), self.calendar.all_sessions)
self.assertNotIn(pd.Period("9/13/2001"), self.calendar.all_sessions)
self.assertNotIn(pd.Period("9/14/2001"), self.calendar.all_sessions)
# Hurricane Sandy
# Oct 29, 30 2012
self.assertNotIn(pd.Period("10/29/2012"), self.calendar.all_sessions)
self.assertNotIn(pd.Period("10/30/2012"), self.calendar.all_sessions)
# various national days of mourning
# Gerald Ford - 1/2/2007
self.assertNotIn(pd.Period("1/2/2007"), self.calendar.all_sessions)
# Ronald Reagan - 6/11/2004
self.assertNotIn(pd.Period("6/11/2004"), self.calendar.all_sessions)
# Richard Nixon - 4/27/1994
self.assertNotIn(pd.Period("4/27/1994"), self.calendar.all_sessions)
def test_new_years(self):
"""
Check whether the TradingCalendar contains certain dates.
"""
# January 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
start_session = pd.Timestamp("2012-01-02", tz='UTC')
end_session = pd.Timestamp("2013-12-31", tz='UTC')
sessions = self.calendar.sessions_in_range(start_session, end_session)
day_after_new_years_sunday = pd.Timestamp("2012-01-02",
tz='UTC')
self.assertNotIn(day_after_new_years_sunday, sessions,
"""
If NYE falls on a weekend, {0} the Monday after is a holiday.
""".strip().format(day_after_new_years_sunday)
)
first_trading_day_after_new_years_sunday = pd.Timestamp("2012-01-03",
tz='UTC')
self.assertIn(first_trading_day_after_new_years_sunday, sessions,
"""
If NYE falls on a weekend, {0} the Tuesday after is the first trading day.
""".strip().format(first_trading_day_after_new_years_sunday)
)
# January 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
new_years_day = pd.Timestamp("2013-01-01", tz='UTC')
self.assertNotIn(new_years_day, sessions,
"""
If NYE falls during the week, e.g. {0}, it is a holiday.
""".strip().format(new_years_day)
)
first_trading_day_after_new_years = pd.Timestamp("2013-01-02",
tz='UTC')
self.assertIn(first_trading_day_after_new_years, sessions,
"""
If the day after NYE falls during the week, {0} \
is the first trading day.
""".strip().format(first_trading_day_after_new_years)
)
def test_thanksgiving(self):
"""
Check TradingCalendar Thanksgiving dates.
"""
# November 2005
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30
start_session_label = pd.Timestamp('2005-01-01', tz='UTC')
end_session_label = pd.Timestamp('2012-12-31', tz='UTC')
sessions = self.calendar.sessions_in_range(start_session_label,
end_session_label)
thanksgiving_with_four_weeks = pd.Timestamp("2005-11-24", tz='UTC')
self.assertNotIn(thanksgiving_with_four_weeks, sessions,
"""
If Nov has 4 Thursdays, {0} Thanksgiving is the last Thursday.
""".strip().format(thanksgiving_with_four_weeks)
)
# November 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30
thanksgiving_with_five_weeks = pd.Timestamp("2006-11-23", tz='UTC')
self.assertNotIn(thanksgiving_with_five_weeks, sessions,
"""
If Nov has 5 Thursdays, {0} Thanksgiving is not the last week.
""".strip().format(thanksgiving_with_five_weeks)
)
first_trading_day_after_new_years_sunday = pd.Timestamp("2012-01-03",
tz='UTC')
self.assertIn(first_trading_day_after_new_years_sunday, sessions,
"""
If NYE falls on a weekend, {0} the Tuesday after is the first trading day.
""".strip().format(first_trading_day_after_new_years_sunday)
)
def test_day_after_thanksgiving(self):
# November 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3
# 4 5 6 7 8 9 10
# 11 12 13 14 15 16 17
# 18 19 20 21 22 23 24
# 25 26 27 28 29 30
fourth_friday_open = pd.Timestamp('11/23/2012 11:00AM', tz='EST')
fourth_friday = pd.Timestamp('11/23/2012 3:00PM', tz='EST')
self.assertTrue(self.calendar.is_open_on_minute(fourth_friday_open))
self.assertFalse(self.calendar.is_open_on_minute(fourth_friday))
# November 2013
# Su Mo Tu We Th Fr Sa
# 1 2
# 3 4 5 6 7 8 9
# 10 11 12 13 14 15 16
# 17 18 19 20 21 22 23
# 24 25 26 27 28 29 30
fifth_friday_open = pd.Timestamp('11/29/2013 11:00AM', tz='EST')
fifth_friday = pd.Timestamp('11/29/2013 3:00PM', tz='EST')
self.assertTrue(self.calendar.is_open_on_minute(fifth_friday_open))
self.assertFalse(self.calendar.is_open_on_minute(fifth_friday))
def test_early_close_independence_day_thursday(self):
"""
Until 2013, the market closed early the Friday after an
Independence Day on Thursday. Since then, the early close is on
Wednesday.
"""
# July 2002
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
wednesday_before = pd.Timestamp('7/3/2002 3:00PM', tz='EST')
friday_after_open = pd.Timestamp('7/5/2002 11:00AM', tz='EST')
friday_after = pd.Timestamp('7/5/2002 3:00PM', tz='EST')
self.assertTrue(self.calendar.is_open_on_minute(wednesday_before))
self.assertTrue(self.calendar.is_open_on_minute(friday_after_open))
self.assertFalse(self.calendar.is_open_on_minute(friday_after))
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
wednesday_before = pd.Timestamp('7/3/2013 3:00PM', tz='EST')
friday_after_open = pd.Timestamp('7/5/2013 11:00AM', tz='EST')
friday_after = pd.Timestamp('7/5/2013 3:00PM', tz='EST')
self.assertFalse(self.calendar.is_open_on_minute(wednesday_before))
self.assertTrue(self.calendar.is_open_on_minute(friday_after_open))
self.assertTrue(self.calendar.is_open_on_minute(friday_after))
class CalendarStartEndTestCase(TestCase):
def test_start_end(self):
"""
Check TradingCalendar with defined start/end dates.
"""
start = pd.Timestamp('2010-1-3', tz='UTC')
end = pd.Timestamp('2010-1-10', tz='UTC')
calendar = NYSEExchangeCalendar(start=start, end=end)
expected_first = pd.Timestamp('2010-1-4', tz='UTC')
expected_last = pd.Timestamp('2010-1-8', tz='UTC')
self.assertTrue(calendar.first_trading_session == expected_first)
self.assertTrue(calendar.last_trading_session == expected_last)
| apache-2.0 |
loli/semisupervisedforests | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
xuewei4d/scikit-learn | asv_benchmarks/benchmarks/ensemble.py | 7 | 3520 | from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import (RandomForestClassifier,
GradientBoostingClassifier,
HistGradientBoostingClassifier)
from .common import Benchmark, Estimator, Predictor
from .datasets import (_20newsgroups_highdim_dataset,
_20newsgroups_lowdim_dataset,
_synth_classification_dataset)
from .utils import make_gen_classif_scorers
class RandomForestClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for RandomForestClassifier.
"""
param_names = ['representation', 'n_jobs']
params = (['dense', 'sparse'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, n_jobs = params
if representation == 'sparse':
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, n_jobs = params
n_estimators = 500 if Benchmark.data_size == 'large' else 100
estimator = RandomForestClassifier(n_estimators=n_estimators,
min_samples_split=10,
max_features='log2',
n_jobs=n_jobs,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class GradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GradientBoostingClassifier.
"""
param_names = ['representation']
params = (['dense', 'sparse'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, = params
if representation == 'sparse':
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, = params
n_estimators = 100 if Benchmark.data_size == 'large' else 10
estimator = GradientBoostingClassifier(n_estimators=n_estimators,
max_features='log2',
subsample=0.5,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class HistGradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for HistGradientBoostingClassifier.
"""
param_names = []
params = ()
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000,
n_features=100,
n_classes=5)
return data
def make_estimator(self, params):
estimator = HistGradientBoostingClassifier(max_iter=100,
max_leaf_nodes=15,
early_stopping=False,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| bsd-3-clause |
mtjvc/gpew | examples/chi2_vs_gp.py | 1 | 4487 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import numpy as np
import emcee
import george
from george import kernels
import os
import sys
currentframe = inspect.currentframe()
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(currentframe)))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import profiles
import gpew
import matplotlib.pyplot as pl
def single_kernel_noisemodel(p):
"""
Simple one squared-exponential kernel noise model.
"""
return george.GP(p[0] * kernels.ExpSquaredKernel(p[1]))
def single_kernel_lnprior(p):
amp, xcen, sigma, lna, lnalpha = p
if (-50. < lna < 0. and amp > 0. and sigma > 0. and xcen > 8685 and
xcen < 8690):
return 0.0
return -np.inf
def chi2_lnprior(p):
amp, xcen, sigma = p
if (amp > 0. and sigma > 0. and xcen > 8685 and xcen < 8690):
return 0.0
return -np.inf
d = np.loadtxt('spec.txt').T
sel = (d[0] > 8680) & (d[0] < 8696)
yerr = np.ones_like(d[0][sel]) * 0.01
lines = [(d[0][sel], d[1][sel], yerr)]
pfiles = [profiles.gaussian]
pparn = np.cumsum([0] +\
[len(inspect.getargspec(i)[0]) - 1 for i in pfiles])
###############################################################################
# GP modelled line
initial = [0.28, 8687.82, 1.53, -6.1, 0.3]
nwalkers = 128
ndim = len(initial)
niter = 100
noisemodel = single_kernel_noisemodel
data = [lines, pfiles, pparn, noisemodel, single_kernel_lnprior]
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
samples = sampler.flatchain
xcen = samples[:, 1]
mxcen = np.mean(xcen)
xs = np.linspace(-8.1, 8.1, 100)
models = []
clean_models = []
ew = []
for s in samples[np.random.randint(len(samples), size=100)]:
pars = s[pparn[0]:pparn[1]]
profile = 1 - pfiles[0](lines[0][0], *pars)
profilexs = 1 - pfiles[0](xs + mxcen, *pars)
clean_models.append(profilexs)
ew.append(np.sum((1 - profilexs[1:]) * (xs[1:] - xs[:-1])))
if noisemodel is not None:
nmp = np.exp(s[pparn[-1]:])
nm = noisemodel(nmp)
nm.compute(lines[0][0], lines[0][2])
m = nm.sample_conditional(lines[0][1] - profile,
xs + mxcen) + profilexs
models.append(m)
offset = 0.0
pl.errorbar(lines[0][0] - mxcen, lines[0][1] + offset, yerr=lines[0][2],
fmt=".k", capsize=0)
pl.text(xs[0], offset + 1.02, '%.2f +- %.2f' % (np.mean(ew),
np.std(ew)))
la = np.array(clean_models).T
lstd = np.std(la, axis=1)
lavg = np.average(la, axis=1)
y1, y2 = lavg + lstd + offset, lavg - lstd + offset
pl.fill_between(xs, y1, y2, alpha=0.3)
gpa = np.array(models).T
gpstd = np.std(gpa, axis=1)
gpavg = np.average(gpa, axis=1)
y1, y2 = gpavg + gpstd + offset, gpavg - gpstd + offset
pl.fill_between(xs, y1, y2, color='r', alpha=0.3)
###############################################################################
# Chi2 modelled line
initial = [0.28, 8687.82, 1.53]
ndim = len(initial)
noisemodel = None
data = [lines, pfiles, pparn, noisemodel, chi2_lnprior]
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
samples = sampler.flatchain
xcen = samples[:, 1]
mxcen = np.mean(xcen)
clean_models = []
ew = []
for s in samples[np.random.randint(len(samples), size=100)]:
pars = s[pparn[0]:pparn[1]]
profilexs = 1 - pfiles[0](xs + mxcen, *pars)
clean_models.append(profilexs)
ew.append(np.sum((1 - profilexs[1:]) * (xs[1:] - xs[:-1])))
offset = 0.3
pl.errorbar(lines[0][0] - mxcen, lines[0][1] + offset, yerr=lines[0][2],
fmt=".k", capsize=0)
pl.text(xs[0], offset + 1.02, '%.2f +- %.2f' % (np.mean(ew),
np.std(ew)))
la = np.array(clean_models).T
lstd = np.std(la, axis=1)
lavg = np.average(la, axis=1)
y1, y2 = lavg + lstd + offset, lavg - lstd + offset
pl.fill_between(xs, y1, y2, alpha=0.3)
pl.show()
| mit |
weixuanfu/tpot | tpot/config/regressor_cuml.py | 1 | 3882 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
# This configuration provides users with access to a GPU the ability to
# use RAPIDS cuML and DMLC/XGBoost regressors as estimators alongside
# the scikit-learn preprocessors in the TPOT default configuration.
regressor_config_cuml = {
# cuML + DMLC/XGBoost Regressors
"cuml.linear_model.ElasticNet": {
"l1_ratio": np.arange(0.0, 1.01, 0.05),
"tol": [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
},
"cuml.neighbors.KNeighborsRegressor": {
"n_neighbors": range(1, 101),
"weights": ["uniform"],
},
"cuml.linear_model.Lasso": {
"normalize": [True, False]
},
"cuml.linear_model.Ridge": {
},
"xgboost.XGBRegressor": {
"n_estimators": [100],
"max_depth": range(3, 10),
"learning_rate": [1e-2, 1e-1, 0.5, 1.],
"subsample": np.arange(0.05, 1.01, 0.05),
"min_child_weight": range(1, 21),
"alpha": [1, 10],
"tree_method": ["gpu_hist"],
"n_jobs": [1],
"verbosity": [0],
"objective": ["reg:squarederror"]
},
# Sklearn Preprocesssors
"sklearn.preprocessing.Binarizer": {
"threshold": np.arange(0.0, 1.01, 0.05)
},
"sklearn.decomposition.FastICA": {
"tol": np.arange(0.0, 1.01, 0.05)
},
"sklearn.cluster.FeatureAgglomeration": {
"linkage": ["ward", "complete", "average"],
"affinity": ["euclidean", "l1", "l2", "manhattan", "cosine"]
},
"sklearn.preprocessing.MaxAbsScaler": {
},
"sklearn.preprocessing.MinMaxScaler": {
},
"sklearn.preprocessing.Normalizer": {
"norm": ["l1", "l2", "max"]
},
"sklearn.kernel_approximation.Nystroem": {
"kernel": ["rbf", "cosine", "chi2", "laplacian", "polynomial", "poly", "linear", "additive_chi2", "sigmoid"],
"gamma": np.arange(0.0, 1.01, 0.05),
"n_components": range(1, 11)
},
"sklearn.decomposition.PCA": {
"svd_solver": ["randomized"],
"iterated_power": range(1, 11)
},
"sklearn.kernel_approximation.RBFSampler": {
"gamma": np.arange(0.0, 1.01, 0.05)
},
"sklearn.preprocessing.RobustScaler": {
},
"sklearn.preprocessing.StandardScaler": {
},
"tpot.builtins.ZeroCount": {
},
"tpot.builtins.OneHotEncoder": {
"minimum_fraction": [0.05, 0.1, 0.15, 0.2, 0.25],
"sparse": [False],
"threshold": [10]
},
# Selectors
"sklearn.feature_selection.SelectFwe": {
"alpha": np.arange(0, 0.05, 0.001),
"score_func": {
"sklearn.feature_selection.f_classif": None
}
},
"sklearn.feature_selection.SelectPercentile": {
"percentile": range(1, 100),
"score_func": {
"sklearn.feature_selection.f_classif": None
}
},
"sklearn.feature_selection.VarianceThreshold": {
"threshold": [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
}
}
| lgpl-3.0 |
quantopian/zipline | tests/events/test_events.py | 1 | 17152 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from inspect import isabstract
import random
from unittest import TestCase
import warnings
from nose_parameterized import parameterized
import pandas as pd
from six import iteritems
from six.moves import range, map
from trading_calendars import get_calendar
import zipline.utils.events
from zipline.utils.events import (
EventRule,
StatelessRule,
Always,
Never,
AfterOpen,
ComposedRule,
BeforeClose,
NotHalfDay,
NthTradingDayOfWeek,
NDaysBeforeLastTradingDayOfWeek,
NthTradingDayOfMonth,
NDaysBeforeLastTradingDayOfMonth,
StatefulRule,
OncePerDay,
_build_offset,
_build_date,
_build_time,
EventManager,
Event,
MAX_MONTH_RANGE,
MAX_WEEK_RANGE,
TradingDayOfMonthRule,
TradingDayOfWeekRule
)
def param_range(*args):
return ([n] for n in range(*args))
class TestUtils(TestCase):
@parameterized.expand([
('_build_date', _build_date),
('_build_time', _build_time),
])
def test_build_none(self, name, f):
with self.assertRaises(ValueError):
f(None, {})
def test_build_offset_default(self):
default = object()
self.assertIs(default, _build_offset(None, {}, default))
def test_build_offset_both(self):
with self.assertRaises(ValueError):
_build_offset(datetime.timedelta(minutes=1), {'minutes': 1}, None)
def test_build_offset_exc(self):
with self.assertRaises(TypeError):
# object() is not an instance of a timedelta.
_build_offset(object(), {}, None)
def test_build_offset_kwargs(self):
kwargs = {'minutes': 1}
self.assertEqual(
_build_offset(None, kwargs, None),
datetime.timedelta(**kwargs),
)
def test_build_offset_td(self):
td = datetime.timedelta(minutes=1)
self.assertEqual(
_build_offset(td, {}, None),
td,
)
def test_build_date_both(self):
with self.assertRaises(ValueError):
_build_date(
datetime.date(year=2014, month=9, day=25), {
'year': 2014,
'month': 9,
'day': 25,
},
)
def test_build_date_kwargs(self):
kwargs = {'year': 2014, 'month': 9, 'day': 25}
self.assertEqual(
_build_date(None, kwargs),
datetime.date(**kwargs),
)
def test_build_date_date(self):
date = datetime.date(year=2014, month=9, day=25)
self.assertEqual(
_build_date(date, {}),
date,
)
def test_build_time_both(self):
with self.assertRaises(ValueError):
_build_time(
datetime.time(hour=1, minute=5), {
'hour': 1,
'minute': 5,
},
)
def test_build_time_kwargs(self):
kwargs = {'hour': 1, 'minute': 5}
self.assertEqual(
_build_time(None, kwargs),
datetime.time(**kwargs),
)
class TestEventManager(TestCase):
def setUp(self):
self.em = EventManager()
self.event1 = Event(Always())
self.event2 = Event(Always())
def test_add_event(self):
self.em.add_event(self.event1)
self.assertEqual(len(self.em._events), 1)
def test_add_event_prepend(self):
self.em.add_event(self.event1)
self.em.add_event(self.event2, prepend=True)
self.assertEqual([self.event2, self.event1], self.em._events)
def test_add_event_append(self):
self.em.add_event(self.event1)
self.em.add_event(self.event2)
self.assertEqual([self.event1, self.event2], self.em._events)
def test_checks_should_trigger(self):
class CountingRule(Always):
count = 0
def should_trigger(self, dt):
CountingRule.count += 1
return True
for r in [CountingRule] * 5:
self.em.add_event(Event(r()))
self.em.handle_data(None, None, datetime.datetime.now())
self.assertEqual(CountingRule.count, 5)
class TestEventRule(TestCase):
def test_is_abstract(self):
with self.assertRaises(TypeError):
EventRule()
def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
super(Always, Always()).should_trigger('a')
def minutes_for_days(cal, ordered_days=False):
"""
500 randomly selected days.
This is used to make sure our test coverage is unbiased towards any rules.
We use a random sample because testing on all the trading days took
around 180 seconds on my laptop, which is far too much for normal unit
testing.
We manually set the seed so that this will be deterministic.
Results of multiple runs were compared to make sure that this is actually
true.
This returns a generator of tuples each wrapping a single generator.
Iterating over this yields a single day, iterating over the day yields
the minutes for that day.
"""
random.seed('deterministic')
if ordered_days:
# Get a list of 500 trading days, in order. As a performance
# optimization in AfterOpen and BeforeClose, we rely on the fact that
# the clock only ever moves forward in a simulation. For those cases,
# we guarantee that the list of trading days we test is ordered.
ordered_session_list = random.sample(list(cal.all_sessions), 500)
ordered_session_list.sort()
def session_picker(day):
return ordered_session_list[day]
else:
# Other than AfterOpen and BeforeClose, we don't rely on the the nature
# of the clock, so we don't care.
def session_picker(day):
return random.choice(cal.all_sessions[:-1])
return [cal.minutes_for_session(session_picker(cnt))
for cnt in range(500)]
class RuleTestCase(object):
CALENDAR_STRING = "foo"
@classmethod
def setUpClass(cls):
# On the AfterOpen and BeforeClose tests, we want ensure that the
# functions are pure, and that running them with the same input will
# provide the same output, regardless of whether the function is run 1
# or N times. (For performance reasons, we cache some internal state
# in AfterOpen and BeforeClose, but we don't want it to affect
# purity). Hence, we use the same before_close and after_open across
# subtests.
cls.before_close = BeforeClose(hours=1, minutes=5)
cls.after_open = AfterOpen(hours=1, minutes=5)
cls.class_ = None # Mark that this is the base class.
cal = get_calendar(cls.CALENDAR_STRING)
cls.before_close.cal = cal
cls.after_open.cal = cal
def test_completeness(self):
"""
Tests that all rules are being tested.
"""
if not self.class_:
return # This is the base class testing, it is always complete.
classes_to_ignore = [TradingDayOfWeekRule, TradingDayOfMonthRule]
dem = {
k for k, v in iteritems(vars(zipline.utils.events))
if isinstance(v, type) and
issubclass(v, self.class_) and
v is not self.class_ and
v not in classes_to_ignore and
not isabstract(v)
}
ds = {
k[5:] for k in dir(self)
if k.startswith('test') and k[5:] in dem
}
self.assertTrue(
dem <= ds,
msg='This suite is missing tests for the following classes:\n' +
'\n'.join(map(repr, dem - ds)),
)
class StatelessRulesTests(RuleTestCase):
@classmethod
def setUpClass(cls):
super(StatelessRulesTests, cls).setUpClass()
cls.class_ = StatelessRule
cls.cal = get_calendar(cls.CALENDAR_STRING)
# First day of 09/2014 is closed whereas that for 10/2014 is open
cls.sept_sessions = cls.cal.sessions_in_range(
pd.Timestamp('2014-09-01', tz='UTC'),
pd.Timestamp('2014-09-30', tz='UTC'),
)
cls.oct_sessions = cls.cal.sessions_in_range(
pd.Timestamp('2014-10-01', tz='UTC'),
pd.Timestamp('2014-10-31', tz='UTC'),
)
cls.sept_week = cls.cal.minutes_for_sessions_in_range(
pd.Timestamp("2014-09-22", tz='UTC'),
pd.Timestamp("2014-09-26", tz='UTC')
)
cls.HALF_SESSION = None
cls.FULL_SESSION = None
def test_Always(self):
should_trigger = Always().should_trigger
for session_minutes in minutes_for_days(self.cal):
self.assertTrue(all(map(should_trigger, session_minutes)))
def test_Never(self):
should_trigger = Never().should_trigger
for session_minutes in minutes_for_days(self.cal):
self.assertFalse(any(map(should_trigger, session_minutes)))
def test_AfterOpen(self):
minute_groups = minutes_for_days(self.cal, ordered_days=True)
should_trigger = self.after_open.should_trigger
for session_minutes in minute_groups:
for i, minute in enumerate(session_minutes):
# Should only trigger at the 64th minute
if i != 64:
self.assertFalse(should_trigger(minute))
else:
self.assertTrue(should_trigger(minute))
def test_invalid_offset(self):
with self.assertRaises(ValueError):
AfterOpen(hours=12, minutes=1)
with self.assertRaises(ValueError):
AfterOpen(hours=0, minutes=0)
with self.assertRaises(ValueError):
BeforeClose(hours=12, minutes=1)
with self.assertRaises(ValueError):
BeforeClose(hours=0, minutes=0)
def test_BeforeClose(self):
minute_groups = minutes_for_days(self.cal, ordered_days=True)
should_trigger = self.before_close.should_trigger
for minute_group in minute_groups:
for minute in minute_group:
# Should only trigger at the 65th-to-last minute
if minute != minute_group[-66]:
self.assertFalse(should_trigger(minute))
else:
self.assertTrue(should_trigger(minute))
def test_NotHalfDay(self):
rule = NotHalfDay()
rule.cal = self.cal
if self.HALF_SESSION:
for minute in self.cal.minutes_for_session(self.HALF_SESSION):
self.assertFalse(rule.should_trigger(minute))
if self.FULL_SESSION:
for minute in self.cal.minutes_for_session(self.FULL_SESSION):
self.assertTrue(rule.should_trigger(minute))
def test_NthTradingDayOfWeek_day_zero(self):
"""
Test that we don't blow up when trying to call week_start's
should_trigger on the first day of a trading environment.
"""
rule = NthTradingDayOfWeek(0)
rule.cal = self.cal
first_open = self.cal.open_and_close_for_session(
self.cal.all_sessions[0]
)
self.assertTrue(first_open)
def test_NthTradingDayOfWeek(self):
for n in range(MAX_WEEK_RANGE):
rule = NthTradingDayOfWeek(n)
rule.cal = self.cal
should_trigger = rule.should_trigger
prev_period = self.cal.minute_to_session_label(self.sept_week[0])
n_tdays = 0
for minute in self.sept_week:
period = self.cal.minute_to_session_label(minute)
if prev_period < period:
n_tdays += 1
prev_period = period
if should_trigger(minute):
self.assertEqual(n_tdays, n)
else:
self.assertNotEqual(n_tdays, n)
def test_NDaysBeforeLastTradingDayOfWeek(self):
for n in range(MAX_WEEK_RANGE):
rule = NDaysBeforeLastTradingDayOfWeek(n)
rule.cal = self.cal
should_trigger = rule.should_trigger
for minute in self.sept_week:
if should_trigger(minute):
n_tdays = 0
session = self.cal.minute_to_session_label(
minute,
direction="none"
)
next_session = self.cal.next_session_label(session)
while next_session.dayofweek > session.dayofweek:
session = next_session
next_session = self.cal.next_session_label(session)
n_tdays += 1
self.assertEqual(n_tdays, n)
def test_NthTradingDayOfMonth(self):
for n in range(MAX_MONTH_RANGE):
rule = NthTradingDayOfMonth(n)
rule.cal = self.cal
should_trigger = rule.should_trigger
for sessions_list in (self.sept_sessions, self.oct_sessions):
for n_tdays, session in enumerate(sessions_list):
# just check the first 10 minutes of each session
for m in self.cal.minutes_for_session(session)[0:10]:
if should_trigger(m):
self.assertEqual(n_tdays, n)
else:
self.assertNotEqual(n_tdays, n)
def test_NDaysBeforeLastTradingDayOfMonth(self):
for n in range(MAX_MONTH_RANGE):
rule = NDaysBeforeLastTradingDayOfMonth(n)
rule.cal = self.cal
should_trigger = rule.should_trigger
sessions = reversed(self.oct_sessions)
for n_days_before, session in enumerate(sessions):
for m in self.cal.minutes_for_session(session)[0:10]:
if should_trigger(m):
self.assertEqual(n_days_before, n)
else:
self.assertNotEqual(n_days_before, n)
def test_ComposedRule(self):
minute_groups = minutes_for_days(self.cal)
rule1 = Always()
rule2 = Never()
for minute in minute_groups:
composed = rule1 & rule2
should_trigger = composed.should_trigger
self.assertIsInstance(composed, ComposedRule)
self.assertIs(composed.first, rule1)
self.assertIs(composed.second, rule2)
self.assertFalse(any(map(should_trigger, minute)))
@parameterized.expand([
('month_start', NthTradingDayOfMonth),
('month_end', NDaysBeforeLastTradingDayOfMonth),
('week_start', NthTradingDayOfWeek),
('week_end', NthTradingDayOfWeek),
])
def test_pass_float_to_day_of_period_rule(self, name, rule_type):
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter('always')
rule_type(n=3) # Shouldn't trigger a warning.
rule_type(n=3.0) # Should trigger a warning about float coercion.
self.assertEqual(len(raised_warnings), 1)
# We only implicitly convert from float to int when there's no loss of
# precision.
with self.assertRaises(TypeError):
rule_type(3.1)
def test_invalid_offsets(self):
with self.assertRaises(ValueError):
NthTradingDayOfWeek(5)
with self.assertRaises(ValueError):
NthTradingDayOfWeek(-1)
with self.assertRaises(ValueError):
NthTradingDayOfMonth(-1)
with self.assertRaises(ValueError):
NthTradingDayOfMonth(24)
class StatefulRulesTests(RuleTestCase):
CALENDAR_STRING = "NYSE"
@classmethod
def setUpClass(cls):
super(StatefulRulesTests, cls).setUpClass()
cls.class_ = StatefulRule
cls.cal = get_calendar(cls.CALENDAR_STRING)
def test_OncePerDay(self):
class RuleCounter(StatefulRule):
"""
A rule that counts the number of times another rule triggers
but forwards the results out.
"""
count = 0
def should_trigger(self, dt):
st = self.rule.should_trigger(dt)
if st:
self.count += 1
return st
for minute_group in minutes_for_days(self.cal):
rule = RuleCounter(OncePerDay())
for minute in minute_group:
rule.should_trigger(minute)
self.assertEqual(rule.count, 1)
| apache-2.0 |
alexmoratalla/yambopy | tests/test_si.py | 2 | 12610 | from __future__ import print_function
#
# Author: Henrique Pereira Coutada Miranda
# Tests for yambopy
# Si
#
import matplotlib
import unittest
import sys
import os
import shutil
import argparse
import subprocess
import filecmp
import shutil as sh
from yambopy import *
from qepy import *
class TestPW_Si(unittest.TestCase):
""" This class creates the input files for Si and compares them to reference files
"""
def get_inputfile(self):
qe = PwIn()
qe.atoms = [['Si',[0.125,0.125,0.125]],
['Si',[-.125,-.125,-.125]]]
qe.atypes = {'Si': [28.086,"Si.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'si'"
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../pseudos'"
qe.system['celldm(1)'] = 10.3
qe.system['ecutwfc'] = 40
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 1
qe.system['ibrav'] = 2
qe.kpoints = [4, 4, 4]
qe.electrons['conv_thr'] = 1e-8
return qe
def test_pw_input_relax(self):
""" Generate a silicon pw.x input file for the relaxation cycle
"""
if not os.path.isdir('relax'):
os.mkdir('relax')
qe = self.get_inputfile()
qe.control['calculation'] = "'vc-relax'"
qe.ions['ion_dynamics'] = "'bfgs'"
qe.cell['cell_dynamics'] = "'bfgs'"
qe.write('relax/si.scf')
self.assertEqual(filecmp.cmp('relax/si.scf', 'reference/si/relax_si.scf'),True)
def test_pw_input_scf(self):
""" Generate a silicon pw.x input file for the self consistent cycle
"""
if not os.path.isdir('scf'):
os.mkdir('scf')
qe = self.get_inputfile()
qe.control['calculation'] = "'scf'"
qe.write('scf/si.scf')
self.assertEqual(filecmp.cmp('scf/si.scf', 'reference/si/scf_si.scf'),True)
def test_pw_input_nscf(self):
""" Generate a silicon pw.x input file for the non self consistent cycle
"""
if not os.path.isdir('nscf'):
os.mkdir('nscf')
qe = self.get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = 30
qe.system['force_symmorphic'] = ".true."
qe.kpoints = [2, 2, 2]
qe.write('nscf/si.nscf')
self.assertEqual(filecmp.cmp('nscf/si.nscf', 'reference/si/nscf_si.nscf'),True)
class TestPW_Si_Run(unittest.TestCase):
""" This class creates the input files and runs the pw.x code
"""
def test_pw_si(sef):
""" Run relaxation, self consistent cycle and non self consistent cycle
"""
print("\nstep 1: relax")
os.system('cd relax; pw.x < si.scf > si.scf.log')
e = PwXML('si',path='relax')
pos = e.get_scaled_positions()
q = PwIn('scf/si.scf')
print("old celldm(1)", q.system['celldm(1)'])
q.system['celldm(1)'] = e.cell[0][2]*2
print("new celldm(1)", q.system['celldm(1)'])
q.atoms = list(zip([a[0] for a in q.atoms],pos))
q.write('scf/si.scf')
print("step 2: scf")
os.system('cd scf; pw.x < si.scf > si.scf.log')
os.system('cp -r scf/si.save nscf')
print("step 3: nscf")
os.system('cd nscf; pw.x < si.nscf > si.nscf.log')
class TestYamboPrep_Si(unittest.TestCase):
def test_yambo_preparation(self):
""" Run p2y and yambo to prepare the database
"""
if not os.path.isdir('database'):
os.mkdir('database')
os.system('cd nscf/si.save; p2y 2> ../../database/p2y.log')
os.system('cd nscf/si.save; yambo 2> ../../database/yambo.log')
os.system('mv nscf/si.save/SAVE database')
class TestYamboIn_GW_Si(unittest.TestCase):
def setUp(self):
""" Prepare the databases
"""
if not os.path.isdir('database/SAVE'):
os.makedirs('database')
os.system('cd database; tar xfz ../reference/si/yambo_gw_conv/gw_conv.tar.gz')
if not os.path.isdir('gw_conv/SAVE'):
sh.copytree('database/SAVE','gw_conv/SAVE')
if not os.path.isdir('gw/SAVE'):
sh.copytree('database/SAVE','gw/SAVE')
def test_gw_input(self):
""" Test if we can initialize the YamboIn class for a typical GW input file
"""
y = YamboIn('yambo -p p -g n -V all',folder='gw')
def test_gw_convergence(self):
""" Test if we can generate multiple input files changing some variables
"""
y = YamboIn('yambo -p p -g n -V all',folder='gw_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXp': [[1,2,5], 'Ry'],
'BndsRnXp': [[1,10],[1,20],[1,30]] }
y.optimize(conv)
return y
class TestYamboIn_GW_Si_Run(unittest.TestCase):
def test_yambo_gw_si(self):
""" Run GW calculation with yambo
"""
y = YamboIn('yambo -p p -g n -V all',folder='gw_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXp': [[1,2,5], 'Ry'],
'BndsRnXp': [[1,10],[1,20],[1,30]] }
y.optimize(conv)
def run(filename):
folder = filename.split('.')[0]
print(filename, folder)
os.system('cd gw_conv; yambo -F %s -J %s -C %s 2> %s.log'%(filename,folder,folder,folder))
y.optimize(conv,run=run)
def test_yambopy_analysegw(self):
""" Test the yambopy analysegw executable
"""
os.system('yambopy analysegw gw_conv FFTGvecs -bc 5 -kc 3 -bv 4 -kv 1 -nd')
out = np.loadtxt('analyse_gw_conv/gw_conv_FFTGvecs.dat')
ref = np.loadtxt('reference/si/analyse_gw_conv/gw_conv_FFTGvecs.dat')
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
os.system('yambopy analysegw gw_conv BndsRnXp -bc 5 -kc 3 -bv 4 -kv 1 -nd')
out = np.loadtxt('analyse_gw_conv/gw_conv_BndsRnXp.dat')
ref = np.loadtxt('reference/si/analyse_gw_conv/gw_conv_BndsRnXp.dat')
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
class TestYamboIn_BSE_Si(unittest.TestCase):
def setUp(self):
""" Prepare the databases
"""
if not os.path.isdir('database/SAVE'):
os.makedirs('database')
os.system('cd database; tar xfz ../reference/si/yambo_bse_conv/bse_conv.tar.gz')
if not os.path.isdir('bse/SAVE'):
sh.copytree('database/SAVE','bse/SAVE')
if not os.path.isdir('bse_conv/SAVE'):
sh.copytree('database/SAVE','bse_conv/SAVE')
def test_bse_input(self):
""" Test if we can initialize the YamboIn class for a typical BSE input file
"""
y = YamboIn('yambo -b -o b -k sex -y h -V all',folder='bse')
def test_bse_convergence(self):
""" Test if we can generate multiple input files changing some variables
"""
y = YamboIn('yambo -b -o b -k sex -y d -V all',folder='bse_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXs': [[1,2,5], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30]] }
y.optimize(conv)
return y
class TestYamboIn_BSE_Si_Run(unittest.TestCase):
def test_yambo_bse_si(self):
""" Run BSE calculation with yambo
"""
y = YamboIn('yambo -b -o b -k sex -y d -V all',folder='bse_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXs': [[1,2,5], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30]] }
print()
def run(filename):
folder = filename.split('.')[0]
print(filename, folder)
os.system('cd bse_conv; yambo -F %s -J %s -C %s 2> %s.log'%(filename,folder,folder,folder))
y.optimize(conv,run=run)
class TestYamboOut_BSE_Si(unittest.TestCase):
def test_yamboout_bse_si(self):
""" Read the yambo BSE output files and write them as .json
"""
for dirpath,dirnames,filenames in os.walk('bse_conv'):
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
y = YamboOut(dirpath,save_folder='bse_conv')
y.pack()
def test_yamboanalyse_bse_si(self):
""" Analyse the BSE .json output files
"""
y = YamboAnalyser('bse_conv')
y.plot_bse('eps')
def test_yambopy_analysebse(self):
""" Test the yambopy analysebse executable
"""
os.system('yambopy analysebse bse_conv FFTGvecs -nd')
out = np.loadtxt('analyse_bse_conv/bse_conv_FFTGvecs_excitons.dat')
ref = np.loadtxt('reference/si/analyse_bse_conv/bse_conv_FFTGvecs_excitons.dat')
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
os.system('yambopy analysebse bse_conv BndsRnXs -nd')
out = np.loadtxt('analyse_bse_conv/bse_conv_BndsRnXs_excitons.dat')
ref = np.loadtxt('reference/si/analyse_bse_conv/bse_conv_BndsRnXs_excitons.dat')
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
class TestYamboOut_GW_Si(unittest.TestCase):
def test_yamboout_gw_si(self):
""" Read the yambo GW output files
"""
for dirpath,dirnames,filenames in os.walk('gw_conv'):
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
y = YamboOut(dirpath,save_folder='gw_conv')
y.pack()
def test_yamboanalyse_gw_si(self):
""" Analyse the yambo GW .json output files
"""
y = YamboAnalyser('gw_conv')
y.plot_gw('qp')
if __name__ == '__main__':
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-i','--input', action="store_true",
help='Generate the input files and compare with the reference ones')
parser.add_argument('-f','--full', action="store_true",
help='Generate the input files, run them and compare the results')
parser.add_argument('-c','--clean', action="store_true",
help='Clean all the data from a previous run')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
#first test if yambo is installed
sp = subprocess.PIPE
yambo_not_available = subprocess.call("yambo", shell=True, stdout=sp, stderr=sp)
if yambo_not_available:
print("yambo not found, please install it before running the tests")
sys.exit(1)
# Count the number of errors
nerrors = 0
ul = unittest.TestLoader()
tr = unittest.TextTestRunner(verbosity=2)
#
# Test pw.x
#
suite = ul.loadTestsFromTestCase(TestPW_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestPW_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test p2y and yambo
#
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboPrep_Si)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test GW on yambo
#
suite = ul.loadTestsFromTestCase(TestYamboIn_GW_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboIn_GW_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
suite = ul.loadTestsFromTestCase(TestYamboOut_GW_Si)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test BSE on yambo
#
suite = ul.loadTestsFromTestCase(TestYamboIn_BSE_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboIn_BSE_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
suite = ul.loadTestsFromTestCase(TestYamboOut_BSE_Si)
nerrors += not tr.run(suite).wasSuccessful()
#clean tests
if args.clean or nerrors==0:
print("cleaning...")
os.system('rm -rf scf bse bse_conv gw gw_conv nscf relax database '
'analyse_bse_conv analyse_gw_conv proj.in')
print("done!")
sys.exit(nerrors)
| bsd-3-clause |
SeelozInc/spark-timeseries | python/setup.py | 3 | 1045 | from setuptools import setup, find_packages
import os
import re
# determine version
VERSION_FILE="sparkts/_version.py"
verstrline = open(VERSION_FILE, "rt").read()
VERSION_REGEX = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VERSION_REGEX, verstrline, re.M)
if mo:
version_string = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
JAR_FILE = 'sparkts-' + version_string + '-jar-with-dependencies.jar'
setup(
name='sparkts',
description = 'A library for analyzing large time series data with Apache Spark',
author = 'Sandy Ryza',
author_email = '[email protected]',
url = 'https://github.com/sryza/spark-timeseries',
version=version_string,
packages=find_packages(),
include_package_data = True,
classifiers = [],
keywords = ['spark', 'time', 'series', 'data', 'analysis'],
install_requires = [
'pandas >= 0.13',
'numpy >= 1.9.2'
],
test_requires = [
'nose == 1.3.7',
'unittest2 >= 1.0.0'
]
)
| apache-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/core/reshape/util.py | 20 | 1915 | import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
"""
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
See also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
pandas.compat.product : An alias for itertools.product.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
| mit |
pratapvardhan/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
ashhher3/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
nlpub/hyperstar | dictionary.ru.py | 2 | 4269 | #!/usr/bin/env python3
__author__ = 'Dmitry Ustalov'
import argparse
import csv
import random
from collections import defaultdict
import numpy as np
from gensim.models.word2vec import Word2Vec
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
parser = argparse.ArgumentParser(description='Russian Dictionary.')
parser.add_argument('--w2v', default='all.norm-sz100-w10-cb0-it1-min100.w2v', nargs='?',
help='Path to the word2vec model.')
parser.add_argument('--seed', default=228, type=int, nargs='?', help='Random seed.')
args = vars(parser.parse_args())
RANDOM_SEED = args['seed']
random.seed(RANDOM_SEED)
w2v = Word2Vec.load_word2vec_format(args['w2v'], binary=True, unicode_errors='ignore')
w2v.init_sims(replace=True)
print('Using %d word2vec dimensions from "%s".' % (w2v.layer1_size, args['w2v']))
hypernyms_patterns = defaultdict(lambda: list())
hypernyms_wiktionary = defaultdict(lambda: list())
synonyms = defaultdict(lambda: list())
with open('pairs-isas-aa.csv') as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
hyponym, hypernym, frequency = row['hyponym'], row['hypernym'], float(row['freq'])
if frequency < 100:
continue
if hyponym in w2v and hypernym in w2v and hypernym not in hypernyms_patterns[hyponym]:
hypernyms_patterns[hyponym].append(hypernym)
with open('all_ru_pairs_ruwikt20160210_parsed.txt') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
hyponym, hypernym = None, None
if row[3] == 'hypernyms':
hyponym, hypernym = row[1], row[2]
elif row[3] == 'hyponyms':
hyponym, hypernym = row[2], row[1]
elif row[3] == 'synonyms':
if row[1] in w2v and row[2] in w2v:
if row[2] not in synonyms[row[1]]:
synonyms[row[1]].append(row[2])
if row[1] not in synonyms[row[2]]:
synonyms[row[2]].append(row[1])
continue
else:
continue
if hypernym not in hypernyms_wiktionary[hyponym] and hyponym in w2v and hypernym in w2v:
hypernyms_wiktionary[hyponym].append(hypernym)
keys_wiktionary = [k for k in hypernyms_wiktionary.keys() if len(hypernyms_wiktionary[k]) > 0]
wiktionary_train, wiktionary_validation_test = train_test_split(np.arange(len(keys_wiktionary), dtype='int32'),
test_size=.4, random_state=RANDOM_SEED)
wiktionary_validation, wiktionary_test = train_test_split(wiktionary_validation_test, test_size=.5,
random_state=RANDOM_SEED)
hypernyms_train = {k: hypernyms_wiktionary[k] for i in wiktionary_train for k in (keys_wiktionary[i],)}
for hyponym, hypernyms in hypernyms_patterns.items():
if hyponym in hypernyms_train:
for hypernym in hypernyms:
if not hypernym in hypernyms_train[hyponym]:
hypernyms_train[hyponym].append(hypernym)
hypernyms_validation = {k: hypernyms_wiktionary[k] for i in wiktionary_validation for k in (keys_wiktionary[i],)}
hypernyms_test = {k: hypernyms_wiktionary[k] for i in wiktionary_test for k in (keys_wiktionary[i],)}
subsumptions_train = [(x, y) for x, ys in hypernyms_train.items() for y in ys]
subsumptions_validation = [(x, y) for x, ys in hypernyms_validation.items() for y in ys]
subsumptions_test = [(x, y) for x, ys in hypernyms_test.items() for y in ys]
def write_subsumptions(subsumptions, filename):
with open(filename, 'w') as f:
writer = csv.writer(f, dialect='excel-tab', lineterminator='\n')
for pair in subsumptions:
writer.writerow(pair)
write_subsumptions(subsumptions_train, 'subsumptions-train.txt')
write_subsumptions(subsumptions_validation, 'subsumptions-validation.txt')
write_subsumptions(subsumptions_test, 'subsumptions-test.txt')
with open('synonyms.txt', 'w') as f:
writer = csv.writer(f, dialect='excel-tab', lineterminator='\n')
for word, words in synonyms.items():
writer.writerow((word, ','.join(words)))
| mit |
mattloose/RUscripts | ampbalance.py | 1 | 23483 | #!C:\anaconda python
import sys, os, re
import time
import threading, thread
from Bio import SeqIO
from StringIO import StringIO
import string
import mlpy
import random
import math
import csv
import numpy as np
import array as ar
import configargparse
import subprocess
import shutil
import glob
import h5py
from itertools import islice
from collections import OrderedDict
import psutil
import multiprocessing
import platform
sys.path.append("ReadUntil")
from ruutils import process_model_file
global oper
oper = platform.system()
if oper is 'Windows': # MS
oper = 'windows'
else:
oper = 'linux' # MS
## linux version
if (oper is "linux"):
config_file = os.path.join(os.path.sep, os.path.dirname(os.path.realpath('__file__')), 'amp.config')
## linux version
if (oper is "windows"):
config_file = os.path.join(os.path.sep, os.path.dirname(os.path.realpath('__file__')), 'ampW.config')
__version__ = "1.0"
__date__ = "29th March 2016"
parser = configargparse.ArgParser(description='ampbalance: A program designed to balance amplicons from a specific reference sequence post sequencing on ONT minIONs but prebasecalling. Developed by Matt Loose @mattloose or [email protected] for help!',default_config_files=[config_file])
parser.add('-fasta', '--reference_fasta_file', type=str, dest='fasta', required=True, default=None, help="The fasta format file for the reference sequence for your organism.")
parser.add('-ids', '--reference_amplicon_positions', type=str, required=True, default=None, help="A file containing a list of amplicon positions defined for the reference sequence. 1 amplicon per line in the format fasta_sequence_name:start-stop e.g J02459:27-1938", dest='ids')
parser.add('-w', '--watch-dir', type=str, required=True, default=None, help="The path to the folder containing the downloads directory with fast5 reads to analyse - e.g. C:\data\minion\downloads (for windows).", dest='watchdir')
parser.add('-o', '--output-dir', type=str, required=True, default="prefiltered", help="The path to the destination folder for the preprocessed reads" , dest="targetpath")
parser.add('-d', '--depth',type=int, required=True, default=None, help = 'The desired coverage depth for each amplicon. Note this is unlikely to be achieved for each amplicon and should probably be an overestimate of the minimum coverage required.', dest='depth')
parser.add('-procs', '--proc_num', type=int, dest='procs',required=True, help = 'The number of processors to run this on.')
parser.add('-cautious', '--cautious', action='store_true', help="DTW of long reads on low memory systems can cause unexpected crashes. This option will prevent automatic skipping on any reads over 10,000 events. You can optionally increase this length with the -l parameter. USE WITH CAUTION AS THIS MAY CAUSE A SYSTEM TO CRASH.", dest='caution')
parser.add('-l', '--length',type=int, required=False, default=10000, help = 'A limit on the length of read that ampbalance will attempt to align using DTW - Long reads can cause problems on low memory systems' , dest='length')
parser.add('-t', '--template_model',type=str, required=True, help = 'The appropriate template model file to use', dest='temp_model')
parser.add('-c', '--complement_model',type=str, required=True, help = 'The appropriate complement model file to use', dest='comp_model')
parser.add('-v', '--verbose-true', action='store_true', help="Print detailed messages while processing files.", default=False, dest='verbose')
parser.add_argument('-ver', '--version', action='version',version=('%(prog)s version={version} date={date}').format(version=__version__,date=__date__))
args = parser.parse_args()
###########################################################
def make_hdf5_object_attr_hash(hdf5object, fields):
att_hash=dict()
for field in fields:
if (field in hdf5object.attrs.keys() ):
#print "filed: ",field (args.ref_fasta is not None), hdf5object.attrs[field]
att_hash[field]=hdf5object.attrs[field]
return att_hash
def scale(a): # MS
mu = np.mean(a, None)
sigma = np.std(a)
if sigma == 0: return 0
else: return (a - mu) / sigma
"""######################################################
def process_model_file(model_file):
model_kmers = dict()
with open(model_file, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
d = list(reader)
#print d
for r in range(0, len(d)):
#print r, d[r]
kmer = d[r][0]
mean = d[r][1]
#print r, kmer, mean
model_kmers[kmer]=mean
return model_kmers,len(d[r][0])"""
######################################################
def get_amplicons():
print "Reading amplicons"
if (args.verbose is True):
print "ids is of type", type(amplicons)
for sequence in amplicons:
if (args.verbose is True):
print sequence
start = int(float(sequence.split(':', 1 )[1].split('-',1)[0]))
stop = int(float(sequence.split(':', 1 )[1].split('-',1)[1]))
if (args.verbose is True):
print start
print stop
REVERSE_stop = seqlengths[sequence.split(':',1)[0]]-start
REVERSE_start = seqlengths[sequence.split(':',1)[0]]-stop
if (args.verbose is True):
print REVERSE_stop
print REVERSE_start
######################################################
def get_seq_len(ref_fasta):
seqlens=dict()
for record in SeqIO.parse(ref_fasta, 'fasta'):
seq=record.seq
seqlens[record.id]=len(seq)
return seqlens
#######################################################################
def raw_squiggle_search2(squiggle,hashthang):
result=[]
#print args.speedmode
for ref in hashthang:
try:
#queryarray = sklearn.preprocessing.scale(np.array(squiggle),axis=0,with_mean=True,with_std=True,copy=True)
queryarray = scale(squiggle)
dist, cost, path = mlpy.dtw_subsequence(queryarray,hashthang[ref]['Fprime'])
#if (args.verbose is True):
# memory_usage_psutil()
result.append((dist,ref,"F",path[1][0],path[1][-1],path[0][0],path[0][-1]))
dist, cost, path = mlpy.dtw_subsequence(queryarray,hashthang[ref]['Rprime'])
result.append((dist,ref,"R",(len(hashthang[ref]['Rprime'])-path[1][-1]),(len(hashthang[ref]['Rprime'])-path[1][0]),path[0][0],path[0][-1]))
#if (args.verbose is True):
# memory_usage_psutil()
except Exception,err:
print "Warp Fail"
return sorted(result,key=lambda result: result[0])[0][1],sorted(result,key=lambda result: result[0])[0][0],sorted(result,key=lambda result: result[0])[0][2],sorted(result,key=lambda result: result[0])[0][3],sorted(result,key=lambda result: result[0])[0][4],sorted(result,key=lambda result: result[0])[0][5],sorted(result,key=lambda result: result[0])[0][6]
######################################################
def process_ref_fasta_raw(ref_fasta,model_kmer_means,model_kmer_len):
#print "processing the reference fasta."
kmer_len=model_kmer_len
kmer_means=dict()
for record in SeqIO.parse(ref_fasta, 'fasta'):
kmer_means[record.id]=dict()
kmer_means[record.id]["F"]=list()
kmer_means[record.id]["R"]=list()
kmer_means[record.id]["Fprime"]=list()
kmer_means[record.id]["Rprime"]=list()
if (args.verbose is True):
print "ID", record.id
print "length", len(record.seq)
print "FORWARD STRAND"
seq = record.seq
for x in range(len(seq)+1-kmer_len):
kmer = str(seq[x:x+kmer_len])
#print seq[x:x+kmer_len]
kmer_means[record.id]["F"].append(float(model_kmer_means[kmer]))
if (args.verbose is True):
print "REVERSE STRAND"
seq = revcomp = record.seq.reverse_complement()
for x in range(len(seq)+1-kmer_len):
kmer = str(seq[x:x+kmer_len])
kmer_means[record.id]["R"].append(float(model_kmer_means[kmer]))
#kmer_means[record.id]["Fprime"]=sklearn.preprocessing.scale(kmer_means[record.id]["F"], axis=0, with_mean=True, with_std=True, copy=True)
kmer_means[record.id]["Fprime"]=scale(kmer_means[record.id]["F"])
#kmer_means[record.id]["Rprime"]=sklearn.preprocessing.scale(kmer_means[record.id]["R"], axis=0, with_mean=True, with_std=True, copy=True)
kmer_means[record.id]["Rprime"]=scale(kmer_means[record.id]["R"])
return kmer_means
#######################################################################
def process_hdf5((filename,kmerhashT,kmerhashC,amplicons,ampstartdict,ampenddict,procampres)):
readprediction=dict()
if (args.verbose is True):
print filename
hdf = h5py.File(filename, 'r')
for read in hdf['Analyses']['EventDetection_000']['Reads']:
events = hdf['Analyses']['EventDetection_000']['Reads'][read]['Events'][()]
event_collection=list()
time_collection=list()
for event in events:
event_collection.append(float(event['mean']))
time_collection.append(event['start'])
#print event_collection
#print time_collection
read_id_fields = ['duration','hairpin_found','hairpin_event_index','read_number','scaling_used','start_mux','start_time',]
read_info_hash = make_hdf5_object_attr_hash(hdf['Analyses/EventDetection_000/Reads/'+read],read_id_fields)
if read_info_hash['hairpin_found']==1:
procampres["HF"] += 1
template_time = time_collection[read_info_hash['hairpin_event_index']]-time_collection[0]
complement_time = time_collection[len(time_collection)-1]-time_collection[read_info_hash['hairpin_event_index']]
ratiotempcomp = float(complement_time)/float(template_time)
if (args.verbose is True):
print "!!! Hairpin Found !!!"
print "Template Length:", len(event_collection[0:read_info_hash['hairpin_event_index']])
print "Complement Length:", len(event_collection[read_info_hash['hairpin_event_index']:len(event_collection)])
# print "Template Time", template_time
# print "Complement Time", complement_time
if (len(event_collection[0:read_info_hash['hairpin_event_index']]) > (args.length)) or (len(event_collection[read_info_hash['hairpin_event_index']:len(event_collection)]) > (args.length)):
procampres["BF"] += 1
if (args.verbose is True):
print "******** WARNING THIS READ COULD CRASH WINDOWS ********"
print "Skipped", filename
if (args.caution is False):
break
#try:
(seqmatchnameT,distanceT,frT,rsT,reT,qsT,qeT) = raw_squiggle_search2(event_collection[0:read_info_hash['hairpin_event_index']],kmerhashT)
if (args.verbose is True):
print "Warp 1 Complete"
#except Exception,err:
# print "A time warp failed:", err
#try:
(seqmatchnameC,distanceC,frC,rsC,reC,qsC,qeC) = raw_squiggle_search2(event_collection[read_info_hash['hairpin_event_index']:len(event_collection)],kmerhashC)
if (args.verbose is True):
print "Warp 2 Complete"
#except Exception,err:
# print "A time warp failed:", err
if (seqmatchnameC==seqmatchnameT and frT != frC and reC >= rsT and rsC <= reT):
if (args.verbose is True):
print "Good Candidate"
if (rsT < rsC):
start = rsT
else:
start = rsC
if (reT > reC):
end = reT
else:
end = reC
for amplicon in amplicons:
ampstart = int(float(amplicon.split(':', 1 )[1].split('-',1)[0]))
ampstop = int(float(amplicon.split(':', 1 )[1].split('-',1)[1]))
if (args.verbose is True):
print start,end
amplicon, value = min(ampstartdict.items(), key=lambda (_, v): abs(v - start))
if (args.verbose is True):
print amplicon, value
key2, value2 = min(ampenddict.items(), key=lambda (_, v): abs(v - end))
if (args.verbose is True):
print key2, value2
if amplicon == key2:
#if 1.3 < ratiotempcomp < 1.7:
procampres[amplicon] += 1
if (amplicon not in readprediction):
readprediction[amplicon]=dict()
if (0 not in readprediction[amplicon]):
readprediction[amplicon][0]=dict()
if (filename not in readprediction[amplicon][0]):
readprediction[amplicon][0][filename]=dict()
readprediction[amplicon][0][filename]["name"]=filename
readprediction[amplicon][0][filename]["matchdistance"]=distanceT
#elif 1 < ratiotempcomp < 1.7:
# procampres[amplicon] += 1
# if (amplicon not in readprediction):
# readprediction[amplicon]=dict()
# if (1 not in readprediction[amplicon]):
# readprediction[amplicon][1]=dict()
# if (filename not in readprediction[amplicon][1]):
# readprediction[amplicon][1][filename]=dict()
# readprediction[amplicon][1][filename]["name"]=filename
# readprediction[amplicon][1][filename]["matchdistance"]=distanceT
else:
if (amplicon not in readprediction):
readprediction[amplicon]=dict()
if (1 not in readprediction[amplicon]):
readprediction[amplicon][1]=dict()
if (filename not in readprediction[amplicon][1]):
readprediction[amplicon][1][filename]=dict()
readprediction[amplicon][1][filename]["name"]=filename
readprediction[amplicon][1][filename]["matchdistance"]=distanceT
# else:
# if 1 < ratiotempcomp < 1.7:
# procampres[amplicon] += 1
# if (amplicon not in readprediction):
# readprediction[amplicon]=dict()
# if (3 not in readprediction[amplicon]):
# readprediction[amplicon][3]=dict()
# if (filename not in readprediction[amplicon][3]):
# readprediction[amplicon][3][filename]=dict()
# readprediction[amplicon][3][filename]["name"]=filename
# readprediction[amplicon][3][filename]["matchdistance"]=distanceT
# else:
# procampres[amplicon] += 1
# if (amplicon not in readprediction):
# readprediction[amplicon]=dict()
# if (4 not in readprediction[amplicon]):
# readprediction[amplicon][4]=dict()
# if (filename not in readprediction[amplicon][4]):
# readprediction[amplicon][4][filename]=dict()
# readprediction[amplicon][4][filename]["name"]=filename
# readprediction[amplicon][4][filename]["matchdistance"]=distanceT
else:
if (args.verbose is True):
print "Template and Complement don't overlap sufficiently"
procampres["DO"] += 1
if (args.verbose is True):
print "Template",frT,rsT,reT
print "Complement",frC,rsC,reC
else:
procampres["NH"] += 1
if (args.verbose is True):
print "!!! Hairpin Not Found !!!"
hdf.close()
procampres["TF"]-=1
if (args.verbose is True):
print procampres,
print filename+" done"
else:
print procampres
return readprediction
######################################################
def check_basecalled(hdf):
'''
Function to check if an hdf file is basecalled.
'''
for element in hdf:
for element2 in hdf[element]:
for element3 in hdf[element][element2]:
for element4 in hdf[element][element2][element3]:
if any("Model" in s for s in [element,element2,element3,element4]):
return True
return False
######################
if __name__ == "__main__":
multiprocessing.freeze_support()
p = multiprocessing.Pool(args.procs)
manager = multiprocessing.Manager()
amplicon_file = open(args.ids, "r")
amplicons = []
for line in amplicon_file.readlines():
amplicons.append(line.rstrip())
if (args.verbose is True):
print amplicons
amplicon_file.close()
fasta_file = args.fasta
model_file_template = args.temp_model
model_file_complement = args.comp_model
model_kmer_means_template,tempkmerlen=process_model_file(model_file_template)
model_kmer_means_complement,compkmerlen=process_model_file(model_file_complement)
kmerhashT = process_ref_fasta_raw(fasta_file,model_kmer_means_template,tempkmerlen)
kmerhashC = process_ref_fasta_raw(fasta_file,model_kmer_means_complement,compkmerlen)
seqlengths = get_seq_len(fasta_file)
get_amplicons()
ampdict=[]
ampstartdict=dict()
ampenddict=dict()
counter = 0
procampres=manager.dict()
for amplicon in amplicons:
counter+=1
ampstart = int(float(amplicon.split(':', 1 )[1].split('-',1)[0]))
ampstop = int(float(amplicon.split(':', 1 )[1].split('-',1)[1]))
ampstartdict[counter]=ampstart
ampenddict[counter]=ampstop
ampdict.append((counter,ampstart,ampstop))
procampres[counter]=0
procampres["DO"]=0
procampres["HF"]=0
procampres["NH"]=0
procampres["BF"]=0
print "******AMP DICTIONARY*******"
print type(ampstartdict)
print ampstartdict
readprediction=dict()
print procampres
print "Now we are going to try and open the raw reads and do the same as we have done above..."
d=list()
filenamecounter=0
for filename in glob.glob(os.path.join(args.watchdir, '*.fast5')):
filenamecounter+=1
d.append([filename,kmerhashT,kmerhashC,amplicons,ampstartdict,ampenddict,procampres])
procdata=tuple(d)
procampres["TF"]=filenamecounter
results = p.map(process_hdf5, (procdata),chunksize=1)
p.close()
masterreadprediction=dict()
for element in results:
for amplicon in element:
if (amplicon not in masterreadprediction):
masterreadprediction[amplicon]=dict()
for quality in element[amplicon]:
if (quality not in masterreadprediction[amplicon]):
masterreadprediction[amplicon][quality]=dict()
for filename in element[amplicon][quality]:
if (filename not in masterreadprediction[amplicon][quality]):
masterreadprediction[amplicon][quality][filename]=dict()
masterreadprediction[amplicon][quality][filename]["name"]=element[amplicon][quality][filename]["name"]
masterreadprediction[amplicon][quality][filename]["matchdistance"]=element[amplicon][quality][filename]["matchdistance"]
print "Amplicon Read Counts"
for amplicon in masterreadprediction:
numberofreads = 0
for i in range(5):
try:
if len(masterreadprediction[amplicon][i].keys()) > 0:
numberofreads += len(masterreadprediction[amplicon][i].keys())
except Exception, err:
print "",
print "Amplicon Number:",amplicon,"Reads:",numberofreads
print "Copying Amplicon Data"
for amplicon in masterreadprediction:
print "Amplicon Number",amplicon
counter = 0
for i in range(5):
try:
if (len(masterreadprediction[amplicon][i].keys())>0):
if (args.verbose is True):
print len(masterreadprediction[amplicon][i].keys())
if (counter < args.depth):
ordered0 = OrderedDict(sorted(masterreadprediction[amplicon][i].iteritems(), key=lambda x: x[1]['matchdistance']))
for read in ordered0:
if (args.verbose is True):
print "Checking if read is basecalled"
print read
hdf = h5py.File(read, 'r')
readstatus=False
if check_basecalled(hdf) is True:
readstatus=True
hdf.close()
if (args.verbose is True):
print read, ordered0[read]["matchdistance"]
if not os.path.exists(args.targetpath):
os.makedirs(args.targetpath)
if readstatus is True:
#destdir = os.path.join(destdir,str(amplicon),"downloads")
destdir = os.path.join(args.targetpath,"downloads")
else:
destdir = os.path.join(args.targetpath)
if not os.path.exists(destdir):
os.makedirs(destdir)
try:
filetocheck = os.path.split(read)
sourcefile = read
destfile = os.path.join(destdir,filetocheck[1])
if (args.verbose is True):
print "sourcefile is:",sourcefile
print "destfile is:",destfile
try:
shutil.copy(sourcefile,destfile)
except Exception, err:
print "File Copy Failed",err
except Exception, err:
print "Weird bug I don't GROK"
counter += 1
if counter >= args.depth:
break
except Exception, err:
if (args.verbose is True):
print "No reads of class "+str(i)
exit()
| mit |
AlexanderFabisch/scikit-learn | sklearn/feature_extraction/hashing.py | 41 | 6175 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
iABC2XYZ/abc | BPM/BPM_CM1.2.py | 1 | 9214 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Peiyong Jiang
作者: 姜培勇
[email protected]
本文件解释:
1.需要判断是否有束流。因为这里是CM1,因此只用acct3即可,当acct3没有束流时候,显示“acct3”没有束流。
2.需要判断螺线管能不能加,如果某个螺线管不能加,直接显示该螺线管的名称
3.需要判断bpm能不能动。如果bpm不动,那么提示那个bpm不动
4.需要知道螺线管的电流上下值
5.前面MEBT一定要调好,可以在很小的范围内浮动。
6. BPM数据:BPM5-BPM10,x11和y11
7. 螺线管电流:s1-s5
"""
import time
import numpy as np
import os
import matplotlib.pyplot as plt
plt.close('all')
def GetACCT3():
acct3=np.random.random() # acct3=caget('ADS_SLIT_VALUE.CHAN6_VAL_M')
return acct3
def GetBeamCurrent():
acct1=np.random.random() # acct1=caget('ADS_SLIT_VALUE.CHAN4_VAL_M')
acct2=np.random.random() # acct2=caget('ADS_SLIT_VALUE.CHAN5_VAL_M')
acct3=np.random.random() # acct3=caget('ADS_SLIT_VALUE.CHAN6_VAL_M')
fc2=np.random.random() # fc2=caget('ADS_SLIT_VALUE.CHAN2_VAL')
return acct1,acct2,acct3,fc2
def CheckBeamCurrent(acct1,acct2,acct3,fc2,acct1Check=1,acct2Check=1,acct3Check=1,fc2Check=1):
flagCheckCurrent=0
if acct1<acct1Check:
print "Current:acct1"
print iRec
flagCheckCurrent=1
if acct2<acct2Check:
print "Current:acct2"
print iRec
flagCheckCurrent=1
if acct3<acct3Check:
print "Current:acct3"
print iRec
flagCheckCurrent=1
if fc2>fc2Check:
print "Current:fc2"
print iRec
flagCheckCurrent=1
return flagCheckCurrent
def GetDCCurrent(numDC,numCM=1):
iDCH=[]
iDCV=[]
for i in xrange(numDC):
nameDC_H=nameDC.replace('H_99','H_0'+str(i+1)).replace('X',str(numCM))
nameDC_V=nameDC.replace('H_99','V_0'+str(i+1)).replace('X',str(numCM))
iDCHTmp=np.random.random() # iDCHTmp=caget(nameDC_H)
iDCVTmp=np.random.random() # iDCVTmp=caget(nameDC_V)
iDCH.append(iDCHTmp)
iDCV.append(iDCVTmp)
return iDCH,iDCV
def CheckDCCurrent(iRec,numDC,numCM=1):
if iRec==0:
iDCH_0,iDCV_0=[argMax+1]*5,[argMax+1]*5
else:
iDCH_0,iDCV_0=iDCH,iDCV
iDCH,iDCV=GetDCCurrent(numDC)
flagCheckDCCurrent=0
for i in range(numDC):
if np.abs(iDCH[i]-iDCH_0[i])<1e-3:
print 'DC: H: '+str(i+1)
print iRec
flagCheckDCCurrent=1
if np.abs(iDCV[i]-iDCV_0[i])<1e-3:
print 'DC: V: '+str(i+1)
print iRec
flagCheckDCCurrent=1
return flagCheckDCCurrent
def GetBPM(idStart=5,idEnd=10):
xBPM=[]
yBPM=[]
for i in range(idStart,idEnd+1):
xNameBPM=nameBPM.replace('0-X',str(i)+'-X')
yNameBPM=nameBPM.replace('0-X',str(i)+'-Y')
xBPMTmp=np.random.random() # xBPMTmp=caget(xNameBPM)
yBPMTmp=np.random.random() # yBPMTmp=caget(yNameBPM)
xBPM.append(xBPMTmp)
yBPM.append(yBPMTmp)
return xBPM,yBPM
def CheckBPM(iRec,idStart=5,idEnd=10):
if iRec==0:
xBPM_0,yBPM_0=[100.]*(idEnd+1-idStart),[100.]*(idEnd+1-idStart)
else:
xBPM_0,yBPM_0=xBPM,yBPM
xBPM,yBPM=GetBPM(idStart,idEnd)
flagCheckBPM=0
for i in range(idEnd+1-idStart):
if np.abs(xBPM[i]-xBPM_0[i])<1e-3:
print 'BPM: X: '+str(i+idStart)
print iRec
flagCheckBPM=1
if np.abs(yBPM[i]-yBPM_0[i])<1e-3:
print 'BPM: Y: '+str(i+idStart)
print iRec
flagCheckBPM=1
return flagCheckBPM
def GenRandDCCurrent(numDC,argMin,argMax):
iDCH=np.random.random((numDC))*(argMax-argMin)+argMin
iDCV=np.random.random((numDC))*(argMax-argMin)+argMin
iDCHFlag=np.sign(np.random.random((numDC))-0.5)
iDCVFlag=np.sign(np.random.random((numDC))-0.5)
return iDCH,iDCV,iDCHFlag,iDCVFlag
def Reflect(iDCH,argMin,argMax,iDCHFlag):
print iDCHFlag
iDCHFlag[iDCH>argMax]=-iDCHFlag[iDCH>argMax]
iDCHFlag[iDCH<argMin]=-iDCHFlag[iDCH<argMin]
print iDCHFlag
print '-'*15
iDCH[iDCH>argMax]=argMax*2-iDCH[iDCH>argMax]
iDCH[iDCH<argMin]=argMin*2-iDCH[iDCH<argMin]
return iDCH,iDCHFlag
def UpdateRandDCCurrent(iDCH,iDCV,argMin,argMax,argAmp,iDCHFlag,iDCVFlag):
numDC=len(iDCH)
dDCH=(np.random.random((numDC)))*argAmp*iDCHFlag
dDCV=(np.random.random((numDC)))*argAmp*iDCVFlag
iDCH+=dDCH
iDCV+=dDCV
iDCH,iDCHFlag=Reflect(iDCH,argMin,argMax,iDCHFlag)
iDCV,iDCVFlag=Reflect(iDCV,argMin,argMax,iDCVFlag)
return iDCH,iDCV,iDCHFlag,iDCVFlag
def GenDCHCrruent(iRec,stepFreshGen,numDC,argMin,argMax,argAmp):
if iRec % stepFreshGen==0:
iDCH,iDCV,iDCHFlag,iDCVFlag=GenRandDCCurrent(numDC,argMin,argMax)
else:
iDCH,iDCV,iDCHFlag,iDCVFlag=UpdateRandDCCurrent(iDCH,iDCV,argMin,argMax,argAmp,iDCHFlag,iDCVFlag)
iDCH,iDCV=np.round(iDCH*100)/100,np.round(iDCV*100)/100
return iDCH,iDCV
def PutDCH(iDCH,iDCV,numDC,numCM=1):
nameDC='HCMX_PS:DCH_99:IMon'
for i in range(0,numDC):
nameDCH=nameDC.replace('X',str(numCM)).replace('H_99','H_0'+str(i+1)).replace('Mon','Set')
nameDCV=nameDC.replace('X',str(numCM)).replace('H_99','V_0'+str(i+1)).replace('Mon','Set')
#caput(nameDCH,iDCH[i-idBPMStart])
#caput(nameDCV,iDCH[i-idBPMStart])
print nameDCH
print nameDCV
def TimeSleep(iRec,stepFreshGen,timeSleep,timeDead=15):
if iRec % stepFreshGen==0:
time.sleep(timeDead)
time.sleep(timeSleep)
def GetSolenoidCurren(numDC,numCM=1):
iSolen=[]
for i in range(numDC):
nameSol=nameS.replace('X',str(numCM)).replace('99','0'+str(i+1))
iSolenTmp=np.random.random() # iSolenTmp=caget(nameSol)
iSolen.append(iSolenTmp)
return iSolen
def CheckSolenoid(iSolen,numDC,numCM=1):
if iRec==0:
iSolen_0=GetSolenoidCurren(numDC,numCM)
else:
iSolen_0=iSolen
iSolen=GetSolenoidCurren(numDC,numCM)
flagSolen=0
for i in range(numDC):
if np.abs(iSolen[i]-iSolen_0[i])>3:
print "Sol: "+str(i+1)
print iRec
flagSolen=1
return flagSolen
def STR(iDCHPut):
str_iDCHPut=str(np.round(np.array(iDCHPut)*100.)/100.)[1:-1].strip().replace('\n',' ').replace(',',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ')+' '
return str_iDCHPut
argMin=-65
argMax=65
argAmp=10
numDC=5
numCM=1
idBPMStart=5
idBPMEnd=10
stepFreshGen=50
timeSleep=0.5
testFlagCheckBeamCurrent=1
testFlagSolen=1
testFlagCheckDCCurrent=1
testFlagCheckBPM=1
testGetACCT3=1
nameS='HCMX_PS:SOL_99:IMon'
nameDC='HCMX_PS:DCH_99:IMon'
nameBPM='BPM:0-X11'
now2Write=time.asctime().replace(' ',' ').replace(' ','_')[4::]
nameRec='Rec_'+now2Write+'.dat'
iRec=0
with open(nameRec,'a+') as fid:
while True:
acct1,acct2,acct3,fc2=GetBeamCurrent()
flagCheckBeamCurrent=CheckBeamCurrent(acct1,acct2,acct3,fc2)
if flagCheckBeamCurrent==1 and testFlagCheckBeamCurrent==1:
pass #continue
iSolen=GetSolenoidCurren(numDC,numCM)
flagSolen=CheckSolenoid(iSolen,numDC,numCM)
if flagSolen==1 and testFlagSolen==1:
pass #continue
flagCheckDCCurrent=CheckDCCurrent(iRec,numDC)
if flagCheckDCCurrent==1 and testFlagCheckDCCurrent==1:
iDCH,iDCV = GenDCHCrruent(iRec,stepFreshGen,numDC,argMin,argMax,argAmp)
PutDCH(iDCH,iDCV,idBPMStart=5,idBPMEnd=10,numCM=1)
TimeSleep(iRec,stepFreshGen,timeSleep)
#continue
flagCheckBPM=CheckBPM(iRec,idBPMStart,idBPMEnd)
if flagCheckBPM==1 and testFlagCheckBPM==1:
iDCH,iDCV = GenDCHCrruent(iRec,stepFreshGen,numDC,argMin,argMax,argAmp)
PutDCH(iDCH,iDCV,idBPMStart=5,idBPMEnd=10,numCM=1)
TimeSleep(iRec,stepFreshGen,timeSleep)
#continue
iDCH,iDCV = GenDCHCrruent(iRec,stepFreshGen,numDC,argMin,argMax,argAmp)
PutDCH(iDCH,iDCV,numDC,numCM=1)
TimeSleep(iRec,stepFreshGen,timeSleep)
if testGetACCT3==1:
while GetACCT3()<1.:
print "ACCT 3"
time.sleep(0.55)
acct3=GetACCT3()
xBPM,yBPM =GetBPM(idBPMStart,idBPMEnd)
iDCHPut,iDCVPut=iDCH,iDCV
iDCHGet,iDCVGet=GetDCCurrent(numDC,numCM=1)
iSolenGet=GetSolenoidCurren(numDC,numCM)
str_acct3,str_xBPM,str_yBPM,str_iDCHPut,str_iDCVPut, str_iDCHGet, str_iDCVGet, str_iSolenGet \
=STR(acct3),STR(xBPM),STR(yBPM),STR(iDCHPut),STR(iDCVPut),STR(iDCHGet),STR(iDCVGet),STR(iSolenGet)
strWrite=str_iDCHPut+str_iDCVPut+str_xBPM+str_yBPM+str_acct3+str_iDCHGet+str_iDCVGet+str_iSolenGet
fid.writelines(strWrite+'\n')
iRec+=1
print iRec
break
| gpl-3.0 |
dougalsutherland/skl-groups | skl_groups/kernels/mmk.py | 1 | 4101 | from __future__ import division
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.externals.six.moves import xrange as range
from ..features import as_features
class MeanMapKernel(BaseEstimator, TransformerMixin):
'''
Computes a kernel between bags as the mean pairwise evaluation of a kernel
between the elements of the bags.
Also known as the maximum mean discrepancy (MMD) when used for a hypothesis
test for whether two samples come from the same distribution.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see :mod:`sklearn.metrics.pairwise`.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
See also
--------
:class:`skl_groups.summaries.BagMean` is the equivalent if you have an
explicit kernel map.
'''
_pairwise = False
_pairwise_output = True
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
def fit(self, X, y=None):
'''
Specify the data to which kernel values should be computed.
Parameters
----------
X : list of arrays or :class:`skl_groups.features.Features`
The bags to compute "to".
'''
self.features_ = as_features(X, stack=True, bare=True)
# TODO: could precompute things like squared norms if kernel == "rbf".
# Probably should add support to sklearn instead of hacking it here.
return self
def transform(self, X):
'''
Compute kernels from X to :attr:`features_`.
Parameters
----------
X : list of arrays or :class:`skl_groups.features.Features`
The bags to compute "from". Must have same dimension as
:attr:`features_`.
Returns
-------
K : array of shape ``[len(X), len(features_)]``
The kernel evaluations from X to :attr:`features_`.
'''
X = as_features(X, stack=True, bare=True)
Y = self.features_
if X.dim != Y.dim:
raise ValueError("MMK transform got dimension {} but had {} at fit"
.format(X.dim, Y.dim))
pointwise = pairwise_kernels(X.stacked_features, Y.stacked_features,
metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# TODO: is there a way to do this without a Python loop?
K = np.empty((len(X), len(Y)))
for i in range(len(X)):
for j in range(len(Y)):
K[i, j] = pointwise[X._boundaries[i]:X._boundaries[i+1],
Y._boundaries[j]:Y._boundaries[j+1]].mean()
return K
| bsd-3-clause |
hanfang/glmnet_python | glmnet_python/cvglmnetPlot.py | 1 | 4053 | # -*- coding: utf-8 -*-
"""
--------------------------------------------------------------------------
cvglmnetPlot.m: plot the cross-validation curve produced by cvglmnet
--------------------------------------------------------------------------
DESCRIPTION:
Plots the cross-validation curve, and upper and lower standard
deviation curves, as a function of the lambda values used.
USAGE:
cvglmnetPlot(cvfit);
cvglmnetPlot(cvfit, sign_lambda)
cvglmnetPlot(cvfit, sign_lambda, options)
INPUT ARGUMENTS:
cvobject fitted "cvglmnet" object
sign_lambda Either plot against log(lambda) (default) or its negative if
sign_lambda=-1.
varargin Other errorbar parameters.
DETAILS:
A plot is produced, and nothing is returned.
LICENSE: GPL-2
AUTHORS:
Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
Fortran code was written by Jerome Friedman
R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
The original MATLAB wrapper was written by Hui Jiang,
and is updated and maintained by Junyang Qian.
This Python wrapper (adapted from the Matlab and R wrappers)
is written by Balakumar B.J., [email protected]
Department of Statistics, Stanford University, Stanford, California, USA.
REFERENCES:
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Models via Coordinate Descent,
http://www.jstatsoft.org/v33/i01/
Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010
SEE ALSO:
cvglmnet and glmnet.
EXAMPLES:
scipy.random.seed(1)
x=scipy.random.normal(size = (100,20))
y=scipy.random.normal(size = (100,1))
g2=scipy.random.choice(2,size = (100,1))*1.0
g4=scipy.random.choice(4,size = (100,1))*1.0
plt.figure()
fit1=cvglmnet(x = x.copy(),y = y.copy())
cvglmnetPlot(fit1)
plt.figure()
fit2=cvglmnet(x = x.copy(),y = g2.copy(), family = 'binomial')
cvglmnetPlot(fit2)
plt.figure()
fit3=cvglmnet(x = x.copy(),y = g2.copy(), family = 'binomial', ptype = 'class')
cvglmnetPlot(fit3)
"""
import scipy
import matplotlib.pyplot as plt
def cvglmnetPlot(cvobject, sign_lambda = 1.0, **options):
sloglam = sign_lambda*scipy.log(cvobject['lambdau'])
fig = plt.gcf()
ax1 = plt.gca()
#fig, ax1 = plt.subplots()
plt.errorbar(sloglam, cvobject['cvm'], cvobject['cvsd'], \
ecolor = (0.5, 0.5, 0.5), \
**options
)
plt.hold(True)
plt.plot(sloglam, cvobject['cvm'], linestyle = 'dashed',\
marker = 'o', markerfacecolor = 'r')
xlim1 = ax1.get_xlim()
ylim1 = ax1.get_ylim()
xval = sign_lambda*scipy.log(scipy.array([cvobject['lambda_min'], cvobject['lambda_min']]))
plt.plot(xval, ylim1, color = 'b', linestyle = 'dashed', \
linewidth = 1)
if cvobject['lambda_min'] != cvobject['lambda_1se']:
xval = sign_lambda*scipy.log([cvobject['lambda_1se'], cvobject['lambda_1se']])
plt.plot(xval, ylim1, color = 'b', linestyle = 'dashed', \
linewidth = 1)
ax2 = ax1.twiny()
ax2.xaxis.tick_top()
atdf = ax1.get_xticks()
indat = scipy.ones(atdf.shape, dtype = scipy.integer)
if sloglam[-1] >= sloglam[1]:
for j in range(len(sloglam)-1, -1, -1):
indat[atdf <= sloglam[j]] = j
else:
for j in range(len(sloglam)):
indat[atdf <= sloglam[j]] = j
prettydf = cvobject['nzero'][indat]
ax2.set(XLim=xlim1, XTicks = atdf, XTickLabels = prettydf)
ax2.grid()
ax1.yaxis.grid()
ax2.set_xlabel('Degrees of Freedom')
# plt.plot(xlim1, [ylim1[1], ylim1[1]], 'b')
# plt.plot([xlim1[1], xlim1[1]], ylim1, 'b')
if sign_lambda < 0:
ax1.set_xlabel('-log(Lambda)')
else:
ax1.set_xlabel('log(Lambda)')
ax1.set_ylabel(cvobject['name'])
#plt.show()
| gpl-2.0 |
xyguo/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
jaidevd/scikit-learn | examples/linear_model/plot_logistic.py | 73 | 1568 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
| bsd-3-clause |
kashif/scikit-learn | examples/svm/plot_svm_anova.py | 85 | 2024 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
phdowling/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
alantian/polyglot | docs/conf.py | 4 | 10864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Solution to RTD problem suggested by
# http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/
import mock
MOCK_MODULES = ['numpy', 'PyICU', 'icu']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import polyglot
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.pngmath',
'sphinx.ext.linkcode',
# 'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
# 'IPython.sphinxext.ipython_console_highlighting',
# 'IPython.sphinxext.ipython_directive'
]
autosummary_generate = True
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'polyglot'
copyright = u'2014-2015, Rami Al-Rfou'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = polyglot.__version__
# The full version, including alpha/beta/rc tags.
release = polyglot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
#import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#import klink
#html_theme = 'klink'
#html_theme_path = [klink.get_html_theme_path()]
#import alabaster
#html_theme = 'alabaster'
#html_theme_path = [alabaster.get_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'polyglotdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
'inputenc': '',
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setmainfont{Linux Libertine O}
%\setmonofont{DejaVu Sans Mono}
\setmonofont{Courier New}
%\setmonofont{FreeMono}
''',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index_latex', 'polyglot.tex',
u'polyglot Documentation',
u'Rami Al-Rfou', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'polyglot',
u'polyglot Documentation',
[u'Rami Al-Rfou'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'polyglot',
u'polyglot Documentation',
u'Rami Al-Rfou',
'polyglot',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('polyglot',
u'https://github.com/aboSamoor/polyglot'
'/blob/{revision}/'
'{package}/{path}#L{lineno}')
| gpl-3.0 |
cainiaocome/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
RichardJamesLopez/project_openstreet_data | py_project_openstreetmap_data_chart.py | 1 | 3569 | ### project_openstreetmap_data_chart
import sqlite3
import csv
from pprint import pprint
# From previous file
sqlite_file = "udacity_project_openstreetdata_rjl/bozeman_MT.osm.db"
# connecting
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
# Still have yet to count the amount of unique users that contributed to this file
unique_users = cur.execute("""SELECT COUNT(DISTINCT(e.uid))
FROM (SELECT uid FROM nodes UNION ALL SELECT uid FROM ways) e;""").fetchall()
pprint (unique_users)
# Now return to dissecting the data to search for venue types for possible visualization
cuisine_loc = cur.execute("""SELECT b.id, b.value, nodes.lat, nodes.lon
FROM (SELECT * FROM nodes_tags UNION ALL SELECT * FROM ways_tags) b
JOIN nodes ON b.id = nodes.id
WHERE b.key = 'cuisine'""").fetchall()
pprint(cuisine_loc)
len(cuisine_loc)
# Lets start to fetch the data for venue types in order to plot them together:
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# first ttest
plt.scatter([x[2] for x in cuisine_loc], [y[3] for y in cuisine_loc])
# Let's add some labels
plt.scatter([x[2] for x in cuisine_loc], [y[3] for y in cuisine_loc])
plt.xlabel('Latitude')
plt.ylabel('Longitude')
plt.title('Location of Resaturants in the Bozeman_MT area')
# For sizing parameters of plot:
# https://codeyarns.com/2014/10/27/how-to-change-size-of-matplotlib-plot/
fig_size = plt.rcParams["figure.figsize"]
fig_size
fig_size[0] = 12
fig_size[1] = 9
plt.rcParams['figure.figsize'] = fig_size
# Now the same for coffee shops: (but first let me get a count of the cafes)
cafe = cur.execute("""SELECT b.id, b.value, nodes.lat, nodes.lon
FROM (SELECT * FROM nodes_tags UNION ALL SELECT * FROM ways_tags) b
JOIN nodes ON b.id = nodes.id
WHERE b.value = 'cafe'""").fetchall()
pprint (len(cafe))
# or using the same way that we counted the nodes and tags
cur.execute("""SELECT COUNT (*) FROM nodes_tags WHERE value = 'cafe'""")
cur.fetchall()
plt.scatter([x[2] for x in cafe], [y[3] for y in cafe], c='green')
plt.xlabel('Latitude')
plt.ylabel('Longitude')
plt.title('Location of Cafes in the Bozeman_MT area')
# Bring them together
plt.scatter([x[2] for x in cuisine_loc], [y[3] for y in cuisine_loc], c='blue', label="Restaurants")
plt.scatter([x[2] for x in cafe], [y[3] for y in cafe], c='red', label="Cafes")
plt.xlabel('Latitude')
plt.ylabel('Longtitude')
plt.title('Restaurants and Cafes')
plt.legend(loc=2)
# Now for the ATMs:
atm = cur.execute("""SELECT b.id, b.value, nodes.lat, nodes.lon
FROM (SELECT * FROM nodes_tags UNION ALL SELECT * FROM ways_tags) b
JOIN nodes ON b.id = nodes.id
WHERE b.value = 'atm'""").fetchall()
# Plot #3
plt.scatter([x[2] for x in atm], [y[3] for y in atm], c='red')
plt.xlabel('Latitude')
plt.ylabel('Longitude')
plt.title('Location of ATMs in the Bozeman_MT area')
# Putting them all together, we get the following plot:
plt.scatter([x[2] for x in cuisine_loc], [y[3] for y in cuisine_loc], c='blue', label="Restaurants")
plt.scatter([x[2] for x in cafe], [y[3] for y in cafe], c='green', label="Cafes")
plt.scatter([x[2] for x in atm], [y[3] for y in atm], c='red', label="ATMs")
plt.xlabel('Latitude')
plt.ylabel('Longtitude')
plt.title('Restaurants, Cafes, ATMs')
plt.legend(loc=2)
# Close the db connection
conn.close()
| mit |
arahuja/scikit-learn | doc/conf.py | 16 | 8442 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/feature_selection/plot_select_from_model_boston.py | 146 | 1527 | """
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
justinbois/bebi103_utils | bebi103/deprecated/pm.py | 2 | 21252 | import warnings
import joblib
import numpy as np
import pandas as pd
import pymc3 as pm
import pymc3.stats
import pymc3.model
import theano.tensor as tt
import tqdm
from .hotdists import *
def _log_like_trace(trace, model, progressbar=False):
"""Calculate the elementwise log-likelihood for the sampled trace.
Parameters
----------
trace : result of MCMC run
model : PyMC Model
Optional model. Default None, taken from context.
progressbar: bool
Whether or not to display a progress bar in the command line. The
bar shows the percentage of completion, the evaluation speed, and
the estimated time to completion
Returns
-------
logp : array of shape (n_samples, n_observations)
The contribution of the observations to the log likelihood of
the whole model.
Notes
-----
.. This is a copy of the pymc3.stats._log_post_trace() function for
PyMC3 version 3.2. That is a misnomer, since it is not the log
posterior for the trace, but rather the contributions to the
log posterior of each observation in the likelihood.
"""
cached = [(var, var.logp_elemwise) for var in model.observed_RVs]
def logp_vals_point(pt):
if len(model.observed_RVs) == 0:
return floatX(np.array([], dtype='d'))
logp_vals = []
for var, logp in cached:
logp = logp(pt)
if var.missing_values:
logp = logp[~var.observations.mask]
logp_vals.append(logp.ravel())
return np.concatenate(logp_vals)
try:
points = trace.points()
except AttributeError:
points = trace
points = tqdm.tqdm(points) if progressbar else points
try:
logp = (logp_vals_point(pt) for pt in points)
return np.stack(logp)
finally:
if progressbar:
points.close()
def _log_prior_trace(trace, model):
"""Calculate the elementwise log-prior for the sampled trace.
Parameters
----------
trace : result of MCMC run
model : PyMC Model
Optional model. Default None, taken from context.
Returns
-------
logp : array of shape (n_samples, n_observations)
The contribution of the log prior.
"""
cached = [var.logp for var in model.unobserved_RVs
if type(var) == pymc3.model.FreeRV]
def logp_vals_point(pt):
if len(model.unobserved_RVs) == 0:
return floatX(np.array([], dtype='d'))
return np.array([logp(pt) for logp in cached])
try:
points = trace.points()
except AttributeError:
points = trace
logp = (logp_vals_point(pt) for pt in points)
return np.stack(logp)
def _log_posterior_trace(trace, model):
"""
Log posterior of each point in a trace.
"""
return (_log_like_trace(trace, model).sum(axis=1)
+ _log_prior_trace(trace, model).sum(axis=1))
def trace_to_dataframe(trace, model=None, log_post=False):
"""
Convert a PyMC3 trace to a Pandas DataFrame
Parameters
----------
trace : PyMC3 trace
Trace returned from pm.sample()
model : PyMC3 model, default None
Model returned from pm.Model()
log_post : bool, default False
If True, also compute the log posterior.
Returns
-------
output : Pandas DataFrame
DataFrame with samples and various sampling statistics.
"""
df = pm.trace_to_dataframe(trace, chains=[0])
for stat in trace.stat_names:
if stat in df.columns:
warnings.warn('`' + stat + '` is in the variable names.`'
+ ' Not adding this statistic.')
else:
df[stat] = trace.get_sampler_stats(stat, chains=[0])
if 'chain' in df.columns:
warnings.warn('`chain` is in the variable name.`'
+ ' Not adding this statistic.')
else:
df['chain'] = np.array([0]*len(df), dtype=int)
if trace.nchains > 1:
for chain in trace.chains[1:]:
df_app = pm.trace_to_dataframe(trace, chains=[chain])
for stat in trace.stat_names:
if stat not in df_app.columns:
df_app[stat] = trace.get_sampler_stats(stat,
chains=[chain])
if 'chain' not in df_app.columns:
df_app['chain'] = np.array([chain]*len(df_app))
df = df.append(df_app, ignore_index=True)
if log_post:
# Extract the model from context if necessary
model = pm.modelcontext(model)
df['log_likelihood'] = _log_like_trace(trace, model).sum(axis=1)
df['log_prior'] = _log_prior_trace(trace, model).sum(axis=1)
df['log_posterior'] = df['log_likelihood'] + df['log_prior']
return df
class Jeffreys(pm.Continuous):
"""
Jeffreys prior for a scale parameter.
Parameters
----------
lower : float, > 0
Minimum value the variable can take.
upper : float, > `lower`
Maximum value the variable can take.
Returns
-------
output : pymc3 distribution
Distribution for Jeffreys prior.
"""
def __init__(self, lower=None, upper=None, transform='interval',
*args, **kwargs):
# Check inputs
if lower is None or upper is None:
raise RuntimeError('`lower` and `upper` must be provided.')
if transform == 'interval':
transform = pm.distributions.transforms.interval(lower, upper)
super(Jeffreys, self).__init__(transform=transform, *args, **kwargs)
self.lower = lower = pm.theanof.floatX(tt.as_tensor_variable(lower))
self.upper = upper = pm.theanof.floatX(tt.as_tensor_variable(upper))
self.mean = (upper - lower) / tt.log(upper/lower)
self.median = tt.sqrt(lower * upper)
self.mode = lower
def logp(self, value):
lower = self.lower
upper = self.upper
return pm.distributions.dist_math.bound(
-tt.log(tt.log(upper/lower)) - tt.log(value),
value >= lower, value <= upper)
class MarginalizedHomoscedasticNormal(pm.Continuous):
"""
Likelihood generated by marginalizing out a homoscedastic variance
from a Normal distribution.
Parameters
----------
mu : array
Mean of the distribution.
Returns
-------
output : pymc3 distribution
Distribution for a multivariate Gaussian with homoscedastic
error, normalized over sigma.
"""
def __init__(self, mu, *args, **kwargs):
super(MarginalizedHomoscedasticNormal, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.mean = mu
self.mode = mu
def logp(self, value):
n = value.shape[-1]
prefactor = ( pm.distributions.dist_math.gammaln(n/2)
- tt.log(2)
- 0.5 * n * tt.log(np.pi))
return prefactor - 0.5 * n * tt.log(tt.sum((value - self.mu)**2))
class GoodBad(pm.Continuous):
"""
Likelihood for the good-bad data model, in which each data point
is either "good" with a small variance or "bad" with a large
variance.
Parameters
----------
w : float
Probability that a data point is "good."
mu : float
Mean of the distribution.
sigma : float
Standard deviation for "good" data points.
sigma_bad : float
Standard deviation for "bad" data points.
Returns
-------
output : pymc3 distribution
Distribution for the good-bad data model.
"""
def __init__(self, mu, sigma, sigma_bad, w, *args, **kwargs):
super(GoodBad, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.sigma = tt.as_tensor_variable(sigma)
self.sigma_bad = tt.as_tensor_variable(sigma_bad)
self.w = tt.as_tensor_variable(w)
self.mean = mu
self.median = mu
self.mode = mu
def logp(self, value):
prefactor = -tt.log(2.0 * np.pi) / 2.0
ll_good = ( tt.log(self.w / self.sigma)
- ((value - self.mu) / self.sigma)**2 / 2.0)
ll_bad = ( tt.log((1.0 - self.w) / self.sigma_bad)
- ((value - self.mu) / self.sigma_bad)**2 / 2.0)
term = tt.switch(tt.gt(ll_good, ll_bad),
ll_good + tt.log(1 + tt.exp(ll_bad - ll_good)),
ll_bad + tt.log(1 + tt.exp(ll_good - ll_bad)))
return prefactor + term
def ReparametrizedNormal(name, mu=None, sd=None, shape=1):
"""
Create a reparametrized Normally distributed random variable.
Parameters
----------
name : str
Name of the variable.
mu : float
Mean of Normal distribution.
sd : float, > 0
Standard deviation of Normal distribution.
shape: int or tuple of ints, default 1
Shape of array of variables. If 1, then a single scalar.
Returns
-------
output : pymc3 distribution
Distribution for a reparametrized Normal distribution.
Notes
-----
.. The reparametrization procedure allows the sampler to sample
a standard normal distribution, and then do a deterministic
reparametrization to achieve sampling of the original desired
Normal distribution.
"""
# Check inputs
if type(name) != str:
raise RuntimeError('`name` must be a string.')
if mu is None or sd is None:
raise RuntimeError('`mu` and `sd` must be provided.')
var_reparam = pm.Normal(name + '_reparam', mu=0, sd=1, shape=shape)
var = pm.Deterministic(name, mu + var_reparam * sd)
return var
def ReparametrizedCauchy(name, alpha=None, beta=None, shape=1):
"""
Create a reparametrized Cauchy distributed random variable.
Parameters
----------
name : str
Name of the variable.
alpha : float
Mode of Cauchy distribution.
beta : float, > 0
Scale parameter of Cauchy distribution
shape: int or tuple of ints, default 1
Shape of array of variables. If 1, then a single scalar.
Returns
-------
output : pymc3 distribution
Reparametrized Cauchy distribution.
Notes
-----
.. The reparametrization procedure allows the sampler to sample
a Cauchy distribution with alpha = 0 and beta = 1, and then do a
deterministic reparametrization to achieve sampling of the
original desired Cauchy distribution.
"""
# Check inputs
if type(name) != str:
raise RuntimeError('`name` must be a string.')
if alpha is None or beta is None:
raise RuntimeError('`alpha` and `beta` must be provided.')
var_reparam = pm.Cauchy(name + '_reparam', alpha=0, beta=1, shape=shape)
var = pm.Deterministic(name, alpha + var_reparam * beta)
return var
class Ordered(pm.distributions.transforms.ElemwiseTransform):
"""
Class defining transform to order entries in an array.
Code from Adrian Seyboldt from PyMC3 discourse: https://discourse.pymc.io/t/mixture-models-and-breaking-class-symmetry/208/4
"""
name = 'ordered'
def forward(self, x):
out = tt.zeros(x.shape)
out = tt.inc_subtensor(out[0], x[0])
out = tt.inc_subtensor(out[1:], tt.log(x[1:] - x[:-1]))
return out
def forward_val(self, x, point=None):
x, = pm.distributions.distribution.draw_values([x], point=point)
return self.forward(x)
def backward(self, y):
out = tt.zeros(y.shape)
out = tt.inc_subtensor(out[0], y[0])
out = tt.inc_subtensor(out[1:], tt.exp(y[1:]))
return tt.cumsum(out)
def jacobian_det(self, y):
return tt.sum(y[1:])
class Composed(pm.distributions.transforms.Transform):
"""
Class to build a transform out of an elementwise transform.
Code from Adrian Seyboldt from PyMC3 discourse: https://discourse.pymc.io/t/mixture-models-and-breaking-class-symmetry/208/4
"""
def __init__(self, trafo1, trafo2):
self._trafo1 = trafo1
self._trafo2 = trafo2
self.name = '_'.join([trafo1.name, trafo2.name])
def forward(self, x):
return self._trafo2.forward(self._trafo1.forward(x))
def forward_val(self, x, point=None):
return self.forward(x)
def backward(self, y):
return self._trafo1.backward(self._trafo2.backward(y))
def jacobian_det(self, y):
y2 = self._trafo2.backward(y)
det1 = self._trafo1.jacobian_det(y2)
det2 = self._trafo2.jacobian_det(y)
return det1 + det2
def ordered_transform():
"""
Make an ordered transform.
Returns
-------
output : pm.distirbutions.transforms.Transform subclass instance
Transform to order entries in tensor.
Example
-------
To insist on ordering probabilities, p1 <= p2 <= p3,
>>> p = pymc3.Beta('p',
alpha=1,
beta=1,
shape=3,
transform=ordered_transform())
"""
return Composed(pm.distributions.transforms.LogOdds(), Ordered())
def hotdist(dist, name, beta_temp, *args, **kwargs):
"""
Instantiate a "hot" distribution. The "hot" distribution takes the
value returned by the logp method of `dist` and returns beta * logp.
Parameters
----------
dist : PyMC3 distribution
The name of a distribution you want to make hot. Examples:
pm.Normal, pm.Binomial, pm.MvNormal, pm.Dirichlet.
name : str
Name of the random variable.
beta_temp : float on interval [0, 1]
Beta value (inverse temperature) of the distribution.
Returns
-------
output : pymc3 distribution
Hot distribution.
"""
class HotDistribution(dist):
def __init__(self, beta_temp, *args, **kwargs):
super(HotDistribution, self).__init__(*args, **kwargs)
if not (0 <= beta_temp <= 1):
raise RuntimeError('Must have 0 ≤ beta_temp ≤ 1.')
self.beta_temp = beta_temp
def logp(self, value):
return self.beta_temp * dist.logp(self, value)
return HotDistribution(name, beta_temp, *args, **kwargs)
def beta_ladder(n=20, beta_min=1e-8):
return np.logspace(np.log10(beta_min), 0, n)
def _sample_beta(beta, model_fun, args, kwargs):
print(f'Sampling beta = {beta}....')
model = model_fun(beta, *args)
with model:
trace = pm.sample(**kwargs)
return trace, model, beta
def sample_beta_ladder(model_fun, betas, args=(), njobs=1, draws=500,
tune=500, progressbar=True, **kwargs):
"""
Draw MCMC samples for a distribution for various values of beta.
Parameters
----------
model_fun : function
Function that returns a PyMC3 model. Must have call signature
model_fun(beta_temp, *args), where `beta_temp` is the inverse
temperature.
betas : array_like
Array of beta values to sample.
args : tuple, default ()
Any additional arguments passed into `model_fun`.
njobs : int, default 1
Number of temperatures to run in parallel. This is *not* the
number of samplers to run in parallel for each temperature.
Each temperature is only sampled with a single walker.
draws : int, default 500
Number of samples to generate for each temperature.
tune : int, default 500
Number of tuning steps to take for each temperature.
progressbar : bool, default True
If True, show progress bars of samlers.
kwargs
All additional kwargs are passed to pm.sample().
Returns
-------
output : list of tuples
List of tuples. The first tuple is the traces for each beta
value. The second tuple is a tuple of compiled PyMC3 models.
The last is a tuple of beta values.
Example
-------
.. Draw samples out of a Normal distribution with a flat prior
on `mu` and a HalfCauchy prior on `sigma`.
x = np.random.normal(0, 1, size=100)
def norm_model(beta_temp, beta_cauchy, x):
with pm.Model() as model:
mu = pm.Flat('mu')
sigma = pm.HalfCauchy('sigma', beta=beta_cauchy)
x_obs = HotNormal('x_obs', beta_temp=beta_temp, mu=mu,
sd=sigma, observed=x)
return model
betas = np.logspace(-3, 0, 10)
tmb = sample_beta_ladder(norm_model, betas, args=(beta_cauchy, x))
"""
# Insert code here to pop draws, tune, and progressbar out of kwargs
if np.any(betas < 0) or np.any(betas > 1):
raise RuntimeError('All beta values must be on interval (0, 1].')
if not np.any(betas == 1):
warnings.warn(
'You probably want to sample beta = 1, the cold distribution.')
if np.any(betas == 0):
raise RuntimeError("Sampling beta = 0 not allowed;"
+ " you're just sampling the prior in that case.")
if len(betas) != len(np.unique(betas)):
raise RuntimeError('Repeated beta entry.')
kwargs['draws'] = draws
kwargs['tune'] = tune
kwargs['progressbar'] = progressbar
if njobs == 1:
return [_sample_beta(beta, model_fun, args, kwargs) for beta in betas]
else:
jobs = (joblib.delayed(_sample_beta)(beta, model_fun, args, kwargs)
for beta in betas)
return joblib.Parallel(n_jobs=njobs)(jobs)
def log_evidence_estimate(trace_model_beta):
"""
Compute an estimate of the log evidence.
Parameters
----------
trace_model_beta : list of (trace, model, beta) tuples
List of (trace, model, beta) tuples as would be returned by
sample_beta_ladder().
Returns
-------
output : float
Approximate negative log evidence.
"""
# Extract traces, models, and betas
betas = []
traces = []
models = []
for tmb in trace_model_beta:
traces.append(tmb[0])
models.append(tmb[1])
betas.append(tmb[2])
betas = np.array(betas)
if np.any(betas <= 0) or np.any(betas > 1):
raise RuntimeError('All beta values must be between zero and one.')
if len(betas) != len(np.unique(betas)):
raise RuntimeError('Repeated beta entry.')
# Sort
inds = np.argsort(betas)
betas = betas = [betas[i] for i in inds]
traces =traces = [traces[i] for i in inds]
models = models = [models[i] for i in inds]
# Compute average log likelihood
mean_log_like = []
for beta, trace, model in zip(betas, traces, models):
mean_log_like.append(_log_like_trace(trace, model).sum(axis=1).mean()
/ beta)
# Add zero value
betas = np.concatenate(((0,), betas))
mean_log_like = np.concatenate(((mean_log_like[0],), mean_log_like))
# Perform integral
return np.trapz(mean_log_like, x=betas)
def chol_to_cov(chol, cov_prefix):
"""
Convert flattened Cholesky matrix to covariance.
Parameters
----------
chol : array_like
Lexicographically flattened Cholesky decomposition as returned
from trace.get_values(chol), where trace is a PyMC3 MultiTrace
instance.
chol_prefix : str
Prefix for the nam e of the covariance variable. Results are
stored as prefix__i__j, where i and j are the row and column
indices, respectively.
Returns
-------
output : Pandas DataFrame
DataFrame with values of samples of the components of the
covariance matrix.
"""
chol = np.array(chol)
n = int(np.round((-1 + np.sqrt(8*chol.shape[1] + 1)) / 2))
sigma = np.zeros_like(chol)
inds = np.tril_indices(n)
for i, r in enumerate(chol):
L = np.zeros((n, n))
L[inds] = r
sig = np.dot(L, L.T)
sigma[i] = sig[inds]
cols = ['{0:s}__{1:d}__{2:d}'.format(cov_prefix, i, j)
for i, j in zip(*inds)]
return pd.DataFrame(columns=cols, data=sigma)
def hpd(x, mass_frac) :
"""
Returns highest probability density region given by
a set of samples.
Parameters
----------
x : array
1D array of MCMC samples for a single variable
mass_frac : float with 0 < mass_frac <= 1
The fraction of the probability to be included in
the HPD. For example, `massfrac` = 0.95 gives a
95% HPD.
Returns
-------
output : array, shape (2,)
The bounds of the HPD
"""
# Get sorted list
d = np.sort(np.copy(x))
# Number of total samples taken
n = len(x)
# Get number of samples that should be included in HPD
n_samples = np.floor(mass_frac * n).astype(int)
# Get width (in units of data) of all intervals with n_samples samples
int_width = d[n_samples:] - d[:n-n_samples]
# Pick out minimal interval
min_int = np.argmin(int_width)
# Return interval
return np.array([d[min_int], d[min_int+n_samples]])
| mit |
plotly/python-api | packages/python/chart-studio/chart_studio/tests/test_plot_ly/test_api/test_v2/test_utils.py | 2 | 9482 | from __future__ import absolute_import
import json as _json
from requests.exceptions import ConnectionError
from plotly import version
from chart_studio.api.utils import to_native_utf8_string
from chart_studio.api.v2 import utils
from chart_studio.exceptions import PlotlyRequestError
from chart_studio.session import sign_in
from chart_studio.tests.test_plot_ly.test_api import PlotlyApiTestCase
class MakeParamsTest(PlotlyApiTestCase):
def test_make_params(self):
params = utils.make_params(foo="FOO", bar=None)
self.assertEqual(params, {"foo": "FOO"})
def test_make_params_empty(self):
params = utils.make_params(foo=None, bar=None)
self.assertEqual(params, {})
class BuildUrlTest(PlotlyApiTestCase):
def test_build_url(self):
url = utils.build_url("cats")
self.assertEqual(url, "{}/v2/cats".format(self.plotly_api_domain))
def test_build_url_id(self):
url = utils.build_url("cats", id="MsKitty")
self.assertEqual(url, "{}/v2/cats/MsKitty".format(self.plotly_api_domain))
def test_build_url_route(self):
url = utils.build_url("cats", route="about")
self.assertEqual(url, "{}/v2/cats/about".format(self.plotly_api_domain))
def test_build_url_id_route(self):
url = utils.build_url("cats", id="MsKitty", route="de-claw")
self.assertEqual(
url, "{}/v2/cats/MsKitty/de-claw".format(self.plotly_api_domain)
)
class ValidateResponseTest(PlotlyApiTestCase):
def test_validate_ok(self):
try:
utils.validate_response(self.get_response())
except PlotlyRequestError:
self.fail("Expected this to pass!")
def test_validate_not_ok(self):
bad_status_codes = (400, 404, 500)
for bad_status_code in bad_status_codes:
response = self.get_response(status_code=bad_status_code)
self.assertRaises(PlotlyRequestError, utils.validate_response, response)
def test_validate_no_content(self):
# We shouldn't flake if the response has no content.
response = self.get_response(content=b"", status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, "No Content")
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content.decode("utf-8"), "")
else:
self.fail("Expected this to raise!")
def test_validate_non_json_content(self):
response = self.get_response(content=b"foobar", status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, "foobar")
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, b"foobar")
else:
self.fail("Expected this to raise!")
def test_validate_json_content_array(self):
content = self.to_bytes(_json.dumps([1, 2, 3]))
response = self.get_response(content=content, status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, to_native_utf8_string(content))
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, content)
else:
self.fail("Expected this to raise!")
def test_validate_json_content_dict_no_errors(self):
content = self.to_bytes(_json.dumps({"foo": "bar"}))
response = self.get_response(content=content, status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, to_native_utf8_string(content))
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, content)
else:
self.fail("Expected this to raise!")
def test_validate_json_content_dict_one_error_bad(self):
content = self.to_bytes(_json.dumps({"errors": [{}]}))
response = self.get_response(content=content, status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, to_native_utf8_string(content))
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, content)
else:
self.fail("Expected this to raise!")
content = self.to_bytes(_json.dumps({"errors": [{"message": ""}]}))
response = self.get_response(content=content, status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, to_native_utf8_string(content))
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, content)
else:
self.fail("Expected this to raise!")
def test_validate_json_content_dict_one_error_ok(self):
content = self.to_bytes(_json.dumps({"errors": [{"message": "not ok!"}]}))
response = self.get_response(content=content, status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, "not ok!")
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, content)
else:
self.fail("Expected this to raise!")
def test_validate_json_content_dict_multiple_errors(self):
content = self.to_bytes(
_json.dumps({"errors": [{"message": "not ok!"}, {"message": "bad job..."}]})
)
response = self.get_response(content=content, status_code=400)
try:
utils.validate_response(response)
except PlotlyRequestError as e:
self.assertEqual(e.message, "not ok!\nbad job...")
self.assertEqual(e.status_code, 400)
self.assertEqual(e.content, content)
else:
self.fail("Expected this to raise!")
class GetHeadersTest(PlotlyApiTestCase):
def test_normal_auth(self):
headers = utils.get_headers()
expected_headers = {
"plotly-client-platform": "python {}".format(version.stable_semver()),
"authorization": "Basic Zm9vOmJhcg==",
"content-type": "application/json",
}
self.assertEqual(headers, expected_headers)
def test_proxy_auth(self):
sign_in(self.username, self.api_key, plotly_proxy_authorization=True)
headers = utils.get_headers()
expected_headers = {
"plotly-client-platform": "python {}".format(version.stable_semver()),
"authorization": "Basic Y25ldDpob29wbGE=",
"plotly-authorization": "Basic Zm9vOmJhcg==",
"content-type": "application/json",
}
self.assertEqual(headers, expected_headers)
class RequestTest(PlotlyApiTestCase):
def setUp(self):
super(RequestTest, self).setUp()
# Mock the actual api call, we don't want to do network tests here.
self.request_mock = self.mock("chart_studio.api.v2.utils.requests.request")
self.request_mock.return_value = self.get_response()
# Mock the validation function since we can test that elsewhere.
self.validate_response_mock = self.mock(
"chart_studio.api.v2.utils.validate_response"
)
self.method = "get"
self.url = "https://foo.bar.does.not.exist.anywhere"
def test_request_with_params(self):
# urlencode transforms `True` --> `'True'`, which isn't super helpful,
# Our backend accepts the JS `true`, so we want `True` --> `'true'`.
params = {"foo": True, "bar": "True", "baz": False, "zap": 0}
utils.request(self.method, self.url, params=params)
args, kwargs = self.request_mock.call_args
method, url = args
expected_params = {"foo": "true", "bar": "True", "baz": "false", "zap": 0}
self.assertEqual(method, self.method)
self.assertEqual(url, self.url)
self.assertEqual(kwargs["params"], expected_params)
def test_request_with_non_native_objects(self):
# We always send along json, but it may contain non-native objects like
# a pandas array or a Column reference. Make sure that's handled in one
# central place.
class Duck(object):
def to_plotly_json(self):
return "what else floats?"
utils.request(self.method, self.url, json={"foo": [Duck(), Duck()]})
args, kwargs = self.request_mock.call_args
method, url = args
expected_data = '{"foo": ["what else floats?", "what else floats?"]}'
self.assertEqual(method, self.method)
self.assertEqual(url, self.url)
self.assertEqual(kwargs["data"], expected_data)
self.assertNotIn("json", kwargs)
def test_request_with_ConnectionError(self):
# requests can flake out and not return a response object, we want to
# make sure we remain consistent with our errors.
self.request_mock.side_effect = ConnectionError()
self.assertRaises(PlotlyRequestError, utils.request, self.method, self.url)
def test_request_validate_response(self):
# Finally, we check details elsewhere, but make sure we do validate.
utils.request(self.method, self.url)
assert self.request_mock.call_count == 1
| mit |
jjx02230808/project0223 | doc/conf.py | 26 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2015, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
xclxxl414/rqalpha | rqalpha/mod/rqalpha_mod_alphaStar_factors/mod.py | 1 | 2179 | #coding=utf-8
"""
@author: evilXu
@file: mod.py
@time: 2017/11/14 16:07
@description:
"""
from rqalpha.interface import AbstractMod
from .factor_data import FactorDataInterface
from rqalpha.utils.logger import system_log
import pandas as pd
class FactorDataMod(AbstractMod):
def __init__(self):
self._iData = None
self._inject_api()
def start_up(self, env, mod_config):
system_log.debug("FactorDataMod.start_up,config:{0}",mod_config)
_initDate = pd.Timestamp(mod_config.factor_data_init_date).date() if mod_config.factor_data_init_date is not None else None
self._iData = FactorDataInterface(path = mod_config.factor_data_path,defaultInitDate=_initDate)
def tear_down(self, code, exception=None):
pass
# print(">>> AlphaHDataMode.tear_down")
def _inject_api(self):
from rqalpha import export_as_api
from rqalpha.execution_context import ExecutionContext
from rqalpha.const import EXECUTION_PHASE
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def get_factors(fname= "",sdate=None,eDate = None):
'''
数据结构参考get_fundamentals
pandas + sqlalchemy
numpy + sqlalchemy
hdf5 + sqlalchemy
:param fname:
:param sdate:
:param eDate:
:return:
'''
'''
<class 'pandas.core.frame.DataFrame'>
2017-10-09 INFO 601766.XSHG 601898.XSHG 600998.XSHG 600887.XSHG 601992.XSHG \
revenue 8.8717e+10 3.7104e+10 3.62284e+10 3.34935e+10 2.94658e+10
pe_ratio 27.6174 26.5919 25.741 29.0545 25.0025
'''
'''3x6x0_1
'''
return self._iData.getData(fname,sdate,eDate) | apache-2.0 |
datapythonista/pandas | pandas/tests/frame/test_logical_ops.py | 4 | 6172 | import operator
import re
import numpy as np
import pytest
from pandas import (
CategoricalIndex,
DataFrame,
Interval,
Series,
isnull,
)
import pandas._testing as tm
class TestDataFrameLogicalOperators:
# &, |, ^
@pytest.mark.parametrize(
"left, right, op, expected",
[
(
[True, False, np.nan],
[True, False, True],
operator.and_,
[True, False, False],
),
(
[True, False, True],
[True, False, np.nan],
operator.and_,
[True, False, False],
),
(
[True, False, np.nan],
[True, False, True],
operator.or_,
[True, False, False],
),
(
[True, False, True],
[True, False, np.nan],
operator.or_,
[True, False, True],
),
],
)
def test_logical_operators_nans(self, left, right, op, expected, frame_or_series):
# GH#13896
result = op(frame_or_series(left), frame_or_series(right))
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_logical_ops_empty_frame(self):
# GH#5808
# empty frames, non-mixed dtype
df = DataFrame(index=[1])
result = df & df
tm.assert_frame_equal(result, df)
result = df | df
tm.assert_frame_equal(result, df)
df2 = DataFrame(index=[1, 2])
result = df & df2
tm.assert_frame_equal(result, df2)
dfa = DataFrame(index=[1], columns=["A"])
result = dfa & dfa
expected = DataFrame(False, index=[1], columns=["A"])
tm.assert_frame_equal(result, expected)
def test_logical_ops_bool_frame(self):
# GH#5808
df1a_bool = DataFrame(True, index=[1], columns=["A"])
result = df1a_bool & df1a_bool
tm.assert_frame_equal(result, df1a_bool)
result = df1a_bool | df1a_bool
tm.assert_frame_equal(result, df1a_bool)
def test_logical_ops_int_frame(self):
# GH#5808
df1a_int = DataFrame(1, index=[1], columns=["A"])
df1a_bool = DataFrame(True, index=[1], columns=["A"])
result = df1a_int | df1a_bool
tm.assert_frame_equal(result, df1a_bool)
# Check that this matches Series behavior
res_ser = df1a_int["A"] | df1a_bool["A"]
tm.assert_series_equal(res_ser, df1a_bool["A"])
def test_logical_ops_invalid(self):
# GH#5808
df1 = DataFrame(1.0, index=[1], columns=["A"])
df2 = DataFrame(True, index=[1], columns=["A"])
msg = re.escape("unsupported operand type(s) for |: 'float' and 'bool'")
with pytest.raises(TypeError, match=msg):
df1 | df2
df1 = DataFrame("foo", index=[1], columns=["A"])
df2 = DataFrame(True, index=[1], columns=["A"])
msg = re.escape("unsupported operand type(s) for |: 'str' and 'bool'")
with pytest.raises(TypeError, match=msg):
df1 | df2
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(
op(df1.values, df2.values), index=df1.index, columns=df1.columns
)
assert result.values.dtype == np.bool_
tm.assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index, columns=df1.columns)
assert result.values.dtype == np.bool_
tm.assert_frame_equal(result, expected)
df1 = {
"a": {"a": True, "b": False, "c": False, "d": True, "e": True},
"b": {"a": False, "b": True, "c": False, "d": False, "e": False},
"c": {"a": False, "b": False, "c": True, "d": False, "e": False},
"d": {"a": True, "b": False, "c": False, "d": True, "e": True},
"e": {"a": True, "b": False, "c": False, "d": True, "e": True},
}
df2 = {
"a": {"a": True, "b": False, "c": True, "d": False, "e": False},
"b": {"a": False, "b": True, "c": False, "d": False, "e": False},
"c": {"a": True, "b": False, "c": True, "d": False, "e": False},
"d": {"a": False, "b": False, "c": False, "d": True, "e": False},
"e": {"a": False, "b": False, "c": False, "d": False, "e": True},
}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
_check_unary_op(operator.inv) # TODO: belongs elsewhere
def test_logical_with_nas(self):
d = DataFrame({"a": [np.nan, False], "b": [True, True]})
# GH4947
# bool comparisons should return bool
result = d["a"] | d["b"]
expected = Series([False, True])
tm.assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d["a"].fillna(False) | d["b"]
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = d["a"].fillna(False, downcast=False) | d["b"]
expected = Series([True, True])
tm.assert_series_equal(result, expected)
def test_logical_ops_categorical_columns(self):
# GH#38367
intervals = [Interval(1, 2), Interval(3, 4)]
data = DataFrame(
[[1, np.nan], [2, np.nan]],
columns=CategoricalIndex(
intervals, categories=intervals + [Interval(5, 6)]
),
)
mask = DataFrame(
[[False, False], [False, False]], columns=data.columns, dtype=bool
)
result = mask | isnull(data)
expected = DataFrame(
[[False, True], [False, True]],
columns=CategoricalIndex(
intervals, categories=intervals + [Interval(5, 6)]
),
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
elenita1221/BDA_py_demos | demos_ch4/demo4_1.py | 19 | 5306 | """Bayesian Data Analysis, 3rd ed
Chapter 4, demo 1
Normal approximaton for Bioassay model.
"""
from __future__ import division
import numpy as np
from scipy import optimize, stats
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Bioassay data, (BDA3 page 86)
x = np.array([-0.86, -0.30, -0.05, 0.73])
n = np.array([5, 5, 5, 5])
y = np.array([0, 1, 3, 5])
# compute the posterior density in grid
# - usually should be computed in logarithms!
# - with alternative prior, check that range and spacing of A and B
# are sensible
ngrid = 100
A = np.linspace(-4, 8, ngrid)
B = np.linspace(-10, 40, ngrid)
ilogit_abx = 1 / (np.exp(-(A[:,None] + B[:,None,None] * x)) + 1)
p = np.prod(ilogit_abx**y * (1 - ilogit_abx)**(n - y), axis=2)
# alternative "bad" way of calcuting the above two lines in a for loop
'''
p = np.empty((len(B),len(A))) # allocate space
for i in range(len(A)):
for j in range(len(B)):
ilogit_abx_ij = (1 / (np.exp(-(A[i] + B[j] * x)) + 1))
p[j,i] = np.prod(ilogit_abx_ij**y * ilogit_abx_ij**(n - y))
'''
# sample from the grid
nsamp = 1000
samp_indices = np.unravel_index(
np.random.choice(p.size, size=nsamp, p=p.ravel()/np.sum(p)),
p.shape
)
samp_A = A[samp_indices[1]]
samp_B = B[samp_indices[0]]
# add random jitter, see BDA3 p. 76
samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0])
samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0])
# samples of LD50
samp_ld50 = -samp_A / samp_B
# Find the mode by minimising negative log posterior. Compute gradients and
# Hessian analytically, and use Newton's method for optimisation. You may use
# optimisation routines below for checking your results. See help for
# scipy.optimize.minimize.
# Define the optimised function
def bioassayfun(w):
a = w[0]
b = w[1]
et = np.exp(a + b * x)
z = et / (1 + et)
e = - np.sum(y * np.log(z) + (n - y) * np.log(1 - z))
return e
# initial guess
w0 = np.array([0.0, 0.0])
# optimise
optim_res = optimize.minimize(bioassayfun, w0)
# extract desired results
w = optim_res['x']
S = optim_res['hess_inv']
# compute the normal approximation density in grid
# this is just for the illustration
# Construct a grid array of shape (ngrid, ngrid, 2) from A and B. Although
# Numpy's concatenation functions do not support broadcasting, a clever trick
# can be applied to overcome this without unnecessary memory copies
# (see Numpy's documentation for strides for more information):
A_broadcasted = np.lib.stride_tricks.as_strided(
A, shape=(ngrid,ngrid), strides=(0,A.strides[0]))
B_broadcasted = np.lib.stride_tricks.as_strided(
B, shape=(ngrid,ngrid), strides=(B.strides[0],0))
grid = np.dstack((A_broadcasted, B_broadcasted))
p_norm = stats.multivariate_normal.pdf(x=grid, mean=w, cov=S)
# draw samples from the distribution
samp_norm = stats.multivariate_normal.rvs(mean=w, cov=S, size=1000)
# ====== Plotting
fig = plt.figure(figsize=(12,10))
fig.subplots_adjust(wspace=0.4, hspace=0.25)
# plot the posterior density
plt.subplot(2,3,1)
plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1]))
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# plot the samples
plt.subplot(2,3,2)
plt.scatter(samp_A, samp_B, 10, c='#377eb8', linewidth=0)
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
plt.text(0,-7,'p(beta>0)={:.2f}'.format(np.mean(samp_B>0)))
# plot the histogram of LD50
plt.subplot(2,3,3)
plt.hist(samp_ld50, np.linspace(-0.8, 0.8, 31))
plt.xlim([-0.8, 0.8])
plt.xlabel(r'LD50 = -$\alpha/\beta$')
plt.yticks(())
plt.xticks(np.linspace(-0.8, 0.8, 5))
# plot the posterior density for normal approx.
plt.subplot(2,3,4)
plt.imshow(p_norm, origin='lower', aspect='auto',
extent=(A[0], A[-1], B[0], B[-1]))
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# plot the samples from the normal approx.
plt.subplot(2,3,5)
plt.scatter(samp_norm[:,0], samp_norm[:,1], 10, c='#377eb8', linewidth=0)
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# Normal approximation does not take into account that posteriori
# is not symmetric and that there is very low denisty for negative
# beta values. Based on samples from the normal approximation
# it is estimated that there is about 4% probability that beta is negative!
plt.text(0,-7,'p(beta>0)={:.2f}'.format(np.mean(samp_norm[:,1]>0)))
# Plot the histogram of LD50
plt.subplot(2,3,6)
# Since we have strong prior belief that beta should not be negative we can
# improve our normal approximation by conditioning on beta>0.
bpi = samp_norm[:,1] > 0
samp_ld50_norm = - samp_norm[bpi,0] / samp_norm[bpi,1]
plt.hist(samp_ld50_norm, np.linspace(-0.8, 0.8, 31))
plt.xlim([-0.8, 0.8])
plt.xlabel(r'LD50 = -$\alpha/\beta$')
plt.yticks(())
plt.xticks(np.linspace(-0.8, 0.8, 5))
# Add super title
plt.suptitle('Normal approximaton for Bioassay model', fontsize=18)
plt.show()
| gpl-3.0 |
AtsushiHashimoto/fujino_mthesis | tools/learning/python3/script_exp_clustering_evaluation.py | 1 | 2014 | # _*_ coding: utf-8 -*-
# Python 3.x
import os
import argparse
import exp_clustering_evaluation as exp
import pandas as pd
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-input_dir', help=u'input directory which contains features.npy',
default = None)
parser.add_argument('-output_dir', help=u'output directory',
default = None)
parser.add_argument('-label_dir', help=u'label directory',
default = None)
parser.add_argument('-annotation_dir', help=u'img_list_train*_annotation.tsvが入ったディレクトリ',
default = "")
parser.add_argument('-step_dir', help=u'image directory',
default = None)
parser.add_argument('-metric', help=u'metric of similarities',
default = "linear")
parser.add_argument('-epoch', help=u'1エポックのイテレーション回数', type=int,
default = 0)
parser.add_argument('-n_epoch', help=u'何エポックまで評価するか', type=int,
default = 0)
params = parser.parse_args()
return vars(params)
def main(input_dir, output_dir, label_dir, annotation_dir, step_dir, metric, epoch, n_epoch):
params = {}
params["label_dir"] = label_dir
params["annotation_dir"] = annotation_dir
params["step_dir"] = step_dir
params["metric"] = metric
result = []
for i in range(1,n_epoch+1):
#params["input_dir"] = os.path.join(input_dir, "iter%d"%(epoch*i))
params["input_dir"] = os.path.join(input_dir, "%d"%(epoch*i))
params["output_dir"] = os.path.join(output_dir, "fp_estimation_%d_%s" % (epoch*i, metric))
r = exp.main(**params)
result.append([i] + r)
result = pd.DataFrame(result)
result.to_csv(os.path.join(output_dir, "result.csv"), index=False, header=False)
if __name__ == "__main__":
params = parse()
main(**params)
| bsd-2-clause |
d-xc/face_authentication_viewer | src/classifier.py | 5 | 10591 | #!/usr/bin/env python2
#
# Example to classify faces.
# Brandon Amos
# 2015/10/11
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(imgPath, multiple=False):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
if multiple:
bbs = align.getAllFaceBoundingBoxes(rgbImg)
else:
bb1 = align.getLargestFaceBoundingBox(rgbImg)
bbs = [bb1]
if len(bbs) == 0 or (not multiple and bb1 is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
reps = []
for bb in bbs:
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
print("This bbox is centered at {}, {}".format(bb.center().x, bb.center().y))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
reps.append((bb.center().x, rep))
sreps = sorted(reps, key=lambda x: x[0])
return sreps
def train(args):
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if args.classifier == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif args.classifier == 'GridSearchSvm':
print("""
Warning: In our experiences, using a grid search over SVM hyper-parameters only
gives marginally better performance than a linear SVM with C=1 and
is not worth the extra computations of performing a grid search.
""")
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
clf = GridSearchCV(SVC(C=1, probability=True), param_grid, cv=5)
elif args.classifier == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif args.classifier == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif args.classifier == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif args.classifier == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif args.classifier == 'DBN':
from nolearn.dbn import DBN
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=1)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = "{}/classifier.pkl".format(args.workDir)
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
def infer(args, multiple=False):
with open(args.classifierModel, 'r') as f:
(le, clf) = pickle.load(f)
for img in args.imgs:
print("\n=== {} ===".format(img))
reps = getRep(img, multiple)
if len(reps) > 1:
print("List of faces in image from left to right")
for r in reps:
rep = r[1].reshape(1, -1)
bbx = r[0]
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
if multiple:
print("Predict {} @ x={} with {:.2f} confidence.".format(person, bbx,
confidence))
else:
print("Predict {} with {:.2f} confidence.".format(person, confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GridSearchSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
inferParser.add_argument('--multi', help="Infer multiple faces in image",
action="store_true")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
if args.mode == 'infer' and args.classifierModel.endswith(".t7"):
raise Exception("""
Torch network model passed as the classification model,
which should be a Python pickle (.pkl)
See the documentation for the distinction between the Torch
network and classification models:
http://cmusatyalab.github.io/openface/demo-3-classifier/
http://cmusatyalab.github.io/openface/training-new-models/
Use `--networkModel` to set a non-standard Torch network model.""")
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args, args.multi)
| apache-2.0 |
BennerLab/atg | atg/data/ensembl.py | 1 | 7239 | """
Find species data in Ensembl, recording genome and annotation URLs.
"""
import os
import sys
import pandas
import ftplib
import string
import atg.config
import atg.data.retrieve
ENSEMBL_SPECIES_INFORMATION = 'ftp://ftp.ensemblgenomes.org/pub/release-35/species.txt'
ENSEMBL_DNA_BASE_LOCATION = string.Template('pub/release-35/$division/fasta$collection/$species/dna/')
ENSEMBL_GTF_BASE_LOCATION = string.Template('pub/release-35/$division/gtf$collection/$species/$assembly.'
'$version.gtf.gz')
class EnsemblSpecies:
"""
A class for fetching and managing species data from Ensembl Genomes, which include many organisms not found on
the main Ensembl site. Files for these organisms are stored in individual subfolders in e.g.
~/ATGData/ensemblgenomes/.
"""
def __init__(self):
self.data_root = os.path.expanduser(atg.config.settings['Data']['Root'])
ensembl_genome_file = os.path.join(self.data_root, 'ensembl_species.txt')
if not os.path.exists(ensembl_genome_file):
atg.data.retrieve.fetch_url(ENSEMBL_SPECIES_INFORMATION, ensembl_genome_file)
self.ensembl_species_df = pandas.read_csv(ensembl_genome_file, index_col=False, sep='\t')
def get_species_information(self, species):
"""
:param species: genus and species (as named by Ensembl), e.g. zea_mays
:return: dictionary containing URLs to genome fasta and gene annotation (GTF), if found
"""
if sum(self.ensembl_species_df.species.isin([species])) == 0:
return {'species': species}
# pull out first matching record
ensembl_record = self.ensembl_species_df.loc[self.ensembl_species_df['species'] == species].iloc[0]
ensembl_division = ensembl_record.loc['division'].lstrip('Ensembl').lower()
# could access assembly ID or accession from record, but the Ensembl files don't use one consistently
ensembl_core_db = ensembl_record.loc['core_db']
if "collection" in ensembl_core_db:
collection_path = '/' + ensembl_core_db.split('_core_')[0]
else:
collection_path = ''
with ftplib.FTP('ftp.ensemblgenomes.org') as ftp:
ftp.login()
genome_listing = ftp.nlst(ENSEMBL_DNA_BASE_LOCATION.safe_substitute(division=ensembl_division,
species=species,
collection=collection_path))
genome_location = ''
annotation_location = ''
genome_assembly_version = ''
# find toplevel unmasked genome
for filename in genome_listing:
if 'dna.toplevel' in filename:
genome_location = filename
break
if genome_location != '':
genome_filename = genome_location.split('/')[-1]
genome_assembly = genome_filename.rstrip('.dna.toplevel.fa.gz')
genome_assembly_version = genome_assembly.split('.', maxsplit=1)[1]
annotation_listing = ftp.nlst(ENSEMBL_GTF_BASE_LOCATION.safe_substitute(division=ensembl_division,
species=species,
assembly=genome_assembly,
collection=collection_path,
version=35))
if len(annotation_listing) == 0:
annotation_location = ''
elif len(annotation_listing) == 1:
annotation_location = annotation_listing[0]
else:
annotation_location = 'multiple'
ftp.close()
return {'species': species, 'genome': genome_location, 'annotation': annotation_location,
'version': genome_assembly_version}
def collect_species_information(self, species_list):
"""
Given a list of species names, create a dataframe containing all information
:param species_list:
:return: dataframe
"""
record_list = []
for species in species_list:
record_list.append(self.get_species_information(species))
return pandas.DataFrame.from_records(record_list)
def retrieve_species_data(self, species):
"""
Download data from Ensembl.
:param species:
:return: True if successful
"""
species_information = self.get_species_information(species)
if len(species_information) == 1:
return False
ensembl_species_path = os.path.join(self.data_root, 'ensemblgenomes', species)
os.makedirs(ensembl_species_path, exist_ok=True)
for filetype in ('genome', 'annotation'):
filename = os.path.split(species_information[filetype])[-1].rstrip('.gz') # remove .gz extension if present
ensembl_url = 'ftp://ftp.ensemblgenomes.org/' + species_information[filetype]
output_filename = os.path.join(ensembl_species_path, filename)
atg.data.retrieve.fetch_url(ensembl_url, output_filename)
return True
def retrieve_ensembl_species(namespace):
# get list of species from file or namespace
if namespace.list:
species_list = pandas.read_csv(namespace.species_name[0], index_col=False, header=None).iloc[:, 0].tolist()
else:
species_list = namespace.species_name
tracker = EnsemblSpecies()
# output species information as table, or download
if namespace.table:
species_df = tracker.collect_species_information(species_list)
species_df.to_csv(sys.stdout, sep="\t", index=False, columns=['species', 'genome', 'annotation', 'version'])
else:
for species in species_list:
retrieval_success = tracker.retrieve_species_data(species)
if retrieval_success:
print('%s retrieved successfully.' % species)
else:
print('%s information not retrieved.' % species)
def setup_subparsers(subparsers):
retrieval_parser = subparsers.add_parser('species', help="Fetch an organism by genus and species")
retrieval_parser.add_argument('species_name', nargs="+",
help="one or more genus/species for an organism in Ensembl, e.g. zea_mays")
retrieval_parser.add_argument('-l', '--list', action="store_true", help="species are provided in a text file given"
"as the only argument")
retrieval_parser.add_argument('-t', '--table', action="store_true",
help="instead of downloading data, write the species information to stdout")
# retrieval_parser.add_argument('-o', '--overwrite', action="store_true", help="Overwrite existing files")
retrieval_parser.set_defaults(func=retrieve_ensembl_species)
| gpl-3.0 |
wangmiao1981/spark | python/pyspark/pandas/data_type_ops/binary_ops.py | 5 | 2976 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Union, cast
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.types import BinaryType, BooleanType, StringType
class BinaryOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with BinaryType.
"""
@property
def pretty_name(self) -> str:
return "binaries"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BinaryType):
return column_op(F.concat)(left, right)
elif isinstance(right, bytes):
return column_op(F.concat)(left, SF.lit(right))
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bytes):
return cast(
SeriesOrIndex, left._with_new_scol(F.concat(SF.lit(right), left.spark.column))
)
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype)
else:
return _as_other_type(index_ops, dtype, spark_type)
| apache-2.0 |
astrofle/CRRLpy | examples/fit_stacks_1c.py | 1 | 6066 | #!/usr/bin/env python
from crrlpy import frec_calc as fc
from crrlpy import crrls
from lmfit import Model
from matplotlib.ticker import MaxNLocator
from crrlpy.models import rrlmod
from astropy.table import Table
import glob
import re
import pylab as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
rc('font', weight='bold')
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
def parse_fit_pars(data, mc, fit, n, f0, residuals):
"""
Converts the fitted line parameters to a Table.
"""
#data = np.zeros((14))
# Store data
data[0] = n
data[1] = f0
data[2] = fit.params['v{0}_center'.format(mc)].value
data[3] = fit.params['v{0}_center'.format(mc)].stderr
data[4] = fit.params['v{0}_amplitude'.format(mc)].value*crrls.dv2df(f0*1e6, 1e3)
data[5] = fit.params['v{0}_amplitude'.format(mc)].stderr*crrls.dv2df(f0*1e6, 1e3)
dD = 2*fit.params['v{0}_sigma'.format(mc)].value
dL = 2*fit.params['v{0}_gamma'.format(mc)].value
dv = crrls.line_width(dD, dL)
data[6] = dv
ddD = 2*fit.params['v{0}_sigma'.format(mc)].stderr
ddL = 2*fit.params['v{0}_gamma'.format(mc)].stderr
ddv = crrls.line_width_err(dD, dL, ddD, ddL)
data[7] = ddv
data[8] = crrls.voigt_peak(fit.params['v{0}_amplitude'.format(mc)].value,
fit.params['v{0}_sigma'.format(mc)].value,
fit.params['v{0}_gamma'.format(mc)].value)
data[9] = crrls.voigt_peak_err(data[8],
fit.params['v{0}_amplitude'.format(mc)].value,
fit.params['v{0}_amplitude'.format(mc)].stderr,
fit.params['v{0}_sigma'.format(mc)].value,
fit.params['v{0}_sigma'.format(mc)].stderr)
data[10] = 2*fit.params['v{0}_sigma'.format(mc)].value
data[11] = 2*fit.params['v{0}_sigma'.format(mc)].stderr
data[12] = 2*fit.params['v{0}_gamma'.format(mc)].value
data[13] = 2*fit.params['v{0}_gamma'.format(mc)].stderr
data[14] = crrls.get_rms(residuals)
return data
def save_log(data, log):
"""
"""
table = Table(rows=data, names=('n', 'f0 (MHz)', 'center (km/s)', 'center_err (km/s)',
'itau (Hz)', 'itau_err (Hz)', 'FWHM (km/s)', 'FWHM_err (km/s)',
'tau', 'tau_err', 'FWHM_gauss (km/s)', 'FWHM_gauss_err (km/s)',
'FWHM_lorentz (km/s)', 'FWHM_lorentz_err (km/s)', 'residuals'),
dtype=('i3', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4'))
table.write(log, format='ascii.fixed_width')
if __name__ == '__main__':
dD_fix0 = 3.4/2.#crrls.sigma2FWHM(1.4456523-0*0.0267016)/2.
trans = 'alpha'
frng = 'all'
stacks = glob.glob('CI{0}_only_n*.ascii'.format(trans))
crrls.natural_sort(stacks)
prop = {'n':['812-863',
'760-806',
'713-748',
'668-709',
'623-665',
'580-621'],
'ns':[37,36,36,36,37,37]}
vel = []
tau = []
wei = []
fit3 = []
res3 = []
n = []
f0 = []
data0 = np.empty((len(stacks), 15))
pdf = PdfPages('C{0}_3c.pdf'.format(trans))
for i,stack in enumerate(stacks):
data = np.loadtxt(stack)
vel.append(data[:,0])
tau.append(data[:,1])
wei.append(data[:,2])
nnow = int(re.findall('\d+', stack)[0])
n.append(nnow)
dn = fc.set_dn(trans)
specie, trans, nn, freq = fc.make_line_list('CI', 1500, dn)
nii = crrls.best_match_indx2(n[i], nn)
f0.append(freq[nii])
tmin = min(tau[i])
weight = np.power(wei[i], 2)
v1 = Model(crrls.Voigt, prefix='v0_')
pars3 = v1.make_params()
mod3 = v1
pars3['v0_gamma'].set(value=0.1, vary=True, expr='', min=0.0)
pars3['v0_center'].set(value=-47., vary=True, max=-30, min=-49)
pars3['v0_amplitude'].set(value=-0.1, vary=True, max=-1e-8)
pars3['v0_sigma'].set(value=dD_fix0, vary=False)
fit3.append(mod3.fit(tau[i], pars3, x=vel[i], weights=weight))
# Plot things
res3.append(tau[i] - fit3[i].best_fit)
voigt0 = crrls.Voigt(vel[i],
fit3[i].params['v0_sigma'].value,
fit3[i].params['v0_gamma'].value,
fit3[i].params['v0_center'].value,
fit3[i].params['v0_amplitude'].value)
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1, adjustable='datalim')
ax.plot(vel[i], tau[i], 'k-', drawstyle='steps', lw=1)
ax.plot(vel[i], voigt0, 'g-')
ax.plot(vel[i], fit3[i].best_fit, 'b-', lw=0.5)
ax.plot(vel[i], res3[i], 'b:', lw=1)
ax.plot(vel[i], [0]*len(vel[i]), 'k--')
ax.text(0.8, 0.125, r"{0:.2f} MHz".format(f0[i]),
size="large", transform=ax.transAxes, alpha=1)
ax.text(0.8, 0.075, r"C$\{0}$({1})".format(trans, nnow),
size="large", transform=ax.transAxes, alpha=1)
ax.set_xlim(min(vel[i]),max(vel[i]))
ax.set_ylim(min(tau[i])-max(res3[i]),max(tau[i])+max(res3[i]))
#if (i+1)%2 != 0:
ax.set_ylabel(r"$\tau_{\nu}$", fontweight='bold', fontsize=20)
ax.set_xlabel(r"Radio velocity (km s$^{-1}$)", fontweight='bold')
pdf.savefig(fig)
plt.close(fig)
## Store data
parse_fit_pars(data0[i], 0, fit3[i], nnow, f0[i], res3[i])
pdf.close()
log = 'CI{0}_-47kms_nomod_1c.log'.format(trans)
save_log(data0, log) | mit |
openwfm/wrfxpy | src/vis/timeseries.py | 1 | 4127 | import logging
import subprocess
import pandas as pd
import numpy as np
import os.path as osp
from vis.var_wisdom import get_wisdom, is_fire_var, strip_end
from clamp2mesh import nearest_idx
class Timeseries(object):
"""
Timeseries of WRF data.
"""
def __init__(self, output_path, prod_name, tslist, num_doms):
"""
Initialize timeseries with output parameters.
:param output_path: path where timeseries files are stored
:param prod_name: name of manifest json file and prefix of all output files
:param tslist: dictionary with time series information
:param num_doms: number of domains
"""
logging.info("Timeseries: output_path=%s prod_name=%s" % (output_path, prod_name))
self.output_path = output_path
self.product_name = prod_name
self.stations = tslist['stations'].copy()
self.variables = {var: get_wisdom(var).copy() for var in tslist['vars']}
logging.info("Timeseries: stations=%s" % [st['name'] for st in self.stations.values()])
logging.info("Timeseries: variables=%s" % list(self.variables.keys()))
self.num_doms = num_doms
# initialize the CSV files for each station
self.initialize_stations()
def initialize_stations(self):
static_cols = ['station_name','station_lon','station_lat',
'datetime', 'domain', 'grid_i', 'grid_j',
'grid_lon', 'grid_lat', 'grid_fire_i',
'grid_fire_j', 'grid_lon_fire', 'grid_lat_fire']
var_cols = list(self.variables.keys())
for st in self.stations.keys():
self.stations[st].update({'local_path': {}})
for dom_id in range(1,self.num_doms+1):
st_path = osp.join(self.output_path, self.product_name + '-%02d-' % dom_id + st + '.csv')
self.stations[st]['local_path'].update({str(dom_id): st_path})
cols = static_cols + var_cols
df = pd.DataFrame({c: [] for c in cols})
df.to_csv(st_path,index = False)
def write_timestep(self,d,dom_id,tndx,ts_esmf):
ts_paths = []
logging.info('write_timestep: time series at time %s and domain %d' % (ts_esmf,dom_id))
lats,lons = (d.variables['XLAT'][0,:,:], d.variables['XLONG'][0,:,:])
if 'FXLONG' in d.variables:
lats_fire,lons_fire = (d.variables['XLAT'][0,:,:], d.variables['XLONG'][0,:,:])
fm,fn = strip_end(d)
lats_fire,lons_fire = (lats_fire[:fm,:fn], lons_fire[:fm,:fn])
for k_st,station in self.stations.items():
idx = nearest_idx(lons,lats,station['lon'],station['lat'])
timestep = {
'station_name': station['name'],
'station_lon': station['lon'],
'station_lat': station['lat'],
'datetime': ts_esmf,
'domain': dom_id,
'grid_i': idx[0],
'grid_j': idx[1],
'grid_lon': lons[idx],
'grid_lat': lats[idx]
}
if 'FXLONG' in d.variables:
idx_fire = nearest_idx(lons_fire,lats_fire,station['lon'],station['lat'])
timestep.update({
'grid_fire_i': idx_fire[0],
'grid_fire_j': idx_fire[1],
'grid_fire_lon': lons_fire[idx_fire],
'grid_fire_lat': lats_fire[idx_fire]
})
for k_v,var in self.variables.items():
array = var['retrieve_as'](d,tndx)
if is_fire_var(var):
array = array[:fm,:fn]
val = array[idx_fire]
else:
val = array[idx]
timestep.update({k_v: val})
df = pd.read_csv(station['local_path'][str(dom_id)])
df = df.append(timestep,ignore_index=True)
df.to_csv(station['local_path'][str(dom_id)],index=False)
ts_paths.append(osp.basename(station['local_path'][str(dom_id)]))
return ts_paths
| mit |
mjudsp/Tsallis | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
huaxz1986/git_book | chapters/Linear/lda.py | 1 | 4697 | # -*- coding: utf-8 -*-
"""
广义线性模型
~~~~~~~~~~~~~~~~~~~~~~~~~~
线性判别分析
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, discriminant_analysis,cross_validation
def load_data():
'''
加载用于分类问题的数据集
:return: 一个元组,用于分类问题。元组元素依次为:训练样本集、测试样本集、训练样本集对应的标记、测试样本集对应的标记
'''
iris=datasets.load_iris() # 使用 scikit-learn 自带的 iris 数据集
X_train=iris.data
y_train=iris.target
return cross_validation.train_test_split(X_train, y_train,test_size=0.25,
random_state=0,stratify=y_train)# 分层采样拆分成训练集和测试集,测试集大小为原始数据集大小的 1/4
def test_LinearDiscriminantAnalysis(*data):
'''
测试 LinearDiscriminantAnalysis 的用法
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
X_train,X_test,y_train,y_test=data
lda = discriminant_analysis.LinearDiscriminantAnalysis()
lda.fit(X_train, y_train)
print('Coefficients:%s, intercept %s'%(lda.coef_,lda.intercept_))
print('Score: %.2f' % lda.score(X_test, y_test))
def plot_LDA(converted_X,y):
'''
绘制经过 LDA 转换后的数据
:param converted_X: 经过 LDA转换后的样本集
:param y: 样本集的标记
:return: None
'''
from mpl_toolkits.mplot3d import Axes3D
fig=plt.figure()
ax=Axes3D(fig)
colors='rgb'
markers='o*s'
for target,color,marker in zip([0,1,2],colors,markers):
pos=(y==target).ravel()
X=converted_X[pos,:]
ax.scatter(X[:,0], X[:,1], X[:,2],color=color,marker=marker,
label="Label %d"%target)
ax.legend(loc="best")
fig.suptitle("Iris After LDA")
plt.show()
def run_plot_LDA():
'''
执行 plot_LDA 。其中数据集来自于 load_data() 函数
:return: None
'''
X_train,X_test,y_train,y_test=load_data()
X=np.vstack((X_train,X_test))
Y=np.vstack((y_train.reshape(y_train.size,1),y_test.reshape(y_test.size,1)))
lda = discriminant_analysis.LinearDiscriminantAnalysis()
lda.fit(X, Y)
converted_X=np.dot(X,np.transpose(lda.coef_))+lda.intercept_
plot_LDA(converted_X,Y)
def test_LinearDiscriminantAnalysis_solver(*data):
'''
测试 LinearDiscriminantAnalysis 的预测性能随 solver 参数的影响
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
X_train,X_test,y_train,y_test=data
solvers=['svd','lsqr','eigen']
for solver in solvers:
if(solver=='svd'):
lda = discriminant_analysis.LinearDiscriminantAnalysis(solver=solver)
else:
lda = discriminant_analysis.LinearDiscriminantAnalysis(solver=solver,
shrinkage=None)
lda.fit(X_train, y_train)
print('Score at solver=%s: %.2f' %(solver, lda.score(X_test, y_test)))
def test_LinearDiscriminantAnalysis_shrinkage(*data):
'''
测试 LinearDiscriminantAnalysis 的预测性能随 shrinkage 参数的影响
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
X_train,X_test,y_train,y_test=data
shrinkages=np.linspace(0.0,1.0,num=20)
scores=[]
for shrinkage in shrinkages:
lda = discriminant_analysis.LinearDiscriminantAnalysis(solver='lsqr',
shrinkage=shrinkage)
lda.fit(X_train, y_train)
scores.append(lda.score(X_test, y_test))
## 绘图
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(shrinkages,scores)
ax.set_xlabel(r"shrinkage")
ax.set_ylabel(r"score")
ax.set_ylim(0,1.05)
ax.set_title("LinearDiscriminantAnalysis")
plt.show()
if __name__=='__main__':
X_train,X_test,y_train,y_test=load_data() # 产生用于分类的数据集
test_LinearDiscriminantAnalysis(X_train,X_test,y_train,y_test) # 调用 test_LinearDiscriminantAnalysis
# run_plot_LDA() # 调用 run_plot_LDA
# test_LinearDiscriminantAnalysis_solver(X_train,X_test,y_train,y_test) # 调用 test_LinearDiscriminantAnalysis_solver
# test_LinearDiscriminantAnalysis_shrinkage(X_train,X_test,y_train,y_test) # 调用 test_LinearDiscriminantAnalysis_shrinkage | gpl-3.0 |
pinkavaj/gnuradio | gr-digital/examples/example_fll.py | 49 | 5715 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_fll(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_err = scipy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Vimos/scikit-learn | examples/linear_model/plot_sgd_iris.py | 58 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
louispotok/pandas | asv_bench/benchmarks/replace.py | 3 | 1654 | import numpy as np
import pandas as pd
from .pandas_vb_common import setup # noqa
class FillNa(object):
goal_time = 0.2
params = [True, False]
param_names = ['inplace']
def setup(self, inplace):
N = 10**6
rng = pd.date_range('1/1/2000', periods=N, freq='min')
data = np.random.randn(N)
data[::2] = np.nan
self.ts = pd.Series(data, index=rng)
def time_fillna(self, inplace):
self.ts.fillna(0.0, inplace=inplace)
def time_replace(self, inplace):
self.ts.replace(np.nan, 0.0, inplace=inplace)
class ReplaceDict(object):
goal_time = 0.2
params = [True, False]
param_names = ['inplace']
def setup(self, inplace):
N = 10**5
start_value = 10**5
self.to_rep = dict(enumerate(np.arange(N) + start_value))
self.s = pd.Series(np.random.randint(N, size=10**3))
def time_replace_series(self, inplace):
self.s.replace(self.to_rep, inplace=inplace)
class Convert(object):
goal_time = 0.5
params = (['DataFrame', 'Series'], ['Timestamp', 'Timedelta'])
param_names = ['constructor', 'replace_data']
def setup(self, constructor, replace_data):
N = 10**3
data = {'Series': pd.Series(np.random.randint(N, size=N)),
'DataFrame': pd.DataFrame({'A': np.random.randint(N, size=N),
'B': np.random.randint(N, size=N)})}
self.to_replace = {i: getattr(pd, replace_data) for i in range(N)}
self.data = data[constructor]
def time_replace(self, constructor, replace_data):
self.data.replace(self.to_replace)
| bsd-3-clause |
smblance/ggplot | ggplot/themes/theme.py | 12 | 5935 | """
Theme elements:
* element_line
* element_rect
* element_text
* element_title
These elements define what operations can be performed. The specific targets,
eg. line, rect, text, title and their derivatives axis_title or axis_title_x
specify the scope of the theme application.
"""
from copy import deepcopy
from .element_target import element_target_factory, merge_element_targets
class theme(object):
"""This is an abstract base class for themes.
In general, only complete themes should should subclass this class.
Notes
-----
When subclassing there are really only two methods that need to be
implemented.
__init__: This should call super().__init__ which will define
self._rcParams. Subclasses should customize self._rcParams after calling
super().__init__. That will ensure that the rcParams are applied at
the appropriate time.
The other method is apply_theme(ax). This method takes an axes object that
has been created during the plot process. The theme should modify the
axes according.
"""
def __init__(self, complete=False, **kwargs):
"""
Provide ggplot2 themeing capabilities.
Parameters
-----------
complete : bool
Themes that are complete will override any existing themes.
themes that are not complete (ie. partial) will add to or
override specific elements of the current theme.
eg. theme_matplotlib() + theme_xkcd() will be completely
determined by theme_xkcd, but
theme_matplotlib() + theme(axis_text_x=element_text(angle=45)) will
only modify the x axis text.
kwargs**: theme_element
kwargs are theme_elements based on http://docs.ggplot2.org/current/theme.html.
Currently only a subset of the elements are implemented. In addition,
Python does not allow using '.' in argument names, so we are using '_'
instead.
For example, ggplot2 axis.ticks.y will be axis_ticks_y in Python ggplot.
"""
self.element_themes = []
self.complete = complete
self._rcParams = {}
for target_name, theme_element in kwargs.items():
self.element_themes.append(element_target_factory(target_name,
theme_element))
def apply_theme(self, ax):
"""apply_theme will be called with an axes object after plot has completed.
Complete themes should implement this method if post plot themeing is
required.
"""
pass
def get_rcParams(self):
"""Get an rcParams dict for this theme.
Notes
-----
Subclasses should not need to override this method method as long as
self._rcParams is constructed properly.
rcParams are used during plotting. Sometimes the same theme can be
achieved by setting rcParams before plotting or a post_plot_callback
after plotting. The choice of how to implement it is is a matter of
convenience in that case.
There are certain things can only be themed after plotting. There
may not be an rcParam to control the theme or the act of plotting
may cause an entity to come into existence before it can be themed.
"""
rcParams = deepcopy(self._rcParams)
if self.element_themes:
for element_theme in self.element_themes:
rcparams = element_theme.get_rcParams()
rcParams.update(rcparams)
return rcParams
def post_plot_callback(self, ax):
"""Apply this theme, then apply additional modifications in order.
This method should not be overridden. Subclasses should override
the apply_theme subclass. This implementation will ensure that the
a theme that includes partial themes will be themed properly.
"""
self.apply_theme(ax)
# does this need to be ordered first?
for element_theme in self.element_themes:
element_theme.post_plot_callback(ax)
def add_theme(self, other):
"""Add themes together.
Subclasses should not override this method.
This will be called when adding two instances of class 'theme'
together.
A complete theme will annihilate any previous themes. Partial themes
can be added together and can be added to a complete theme.
"""
if other.complete:
return other
else:
theme_copy = deepcopy(self)
theme_copy.element_themes = merge_element_targets(
deepcopy(self.element_themes),
deepcopy(other.element_themes))
return theme_copy
def __add__(self, other):
if isinstance(other, theme):
return self.add_theme(other)
else:
raise TypeError()
def __radd__(self, other):
"""Subclasses should not override this method.
This will be called in one of two ways:
gg + theme which is translated to self=theme, other=gg
or
theme1 + theme2 which is translated into self=theme2, other=theme1
"""
if not isinstance(other, theme):
gg_copy = deepcopy(other)
if self.complete:
gg_copy.theme = self
else:
gg_copy.theme = other.theme.add_theme(self)
return gg_copy
# other _ self is theme + self
else:
# adding theme and theme here
# other + self
# if self is complete return self
if self.complete:
return self
# else make a copy of other combined with self.
else:
theme_copy = deepcopy(other)
theme_copy.element_themes.append(self)
return theme_copy
| bsd-2-clause |
Technariumas/pieva | create_palette.py | 2 | 1123 | #!/usr/bin/python
#create colour palettes with sns
#reguires seaborn (http://www.stanford.edu/~mwaskom/software/seaborn)
from __future__ import division
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.image as mpimg
import sys
def make_SNS_palette(paletteFilename):
#flat = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
#pal = sns.color_palette(flat, 256)
pal = sns.color_palette("husl", 256)
cm = mpl.colors.ListedColormap(list(pal))
r = cm((np.arange(256)))
r = 255.999*r[:, 0:3]
np.savetxt(paletteFilename, r, delimiter=",")
def makeBitmapPalette(paletteFilename):
#Warning -- TIFF images supported only. If using 16-bit BMPs, which may work, adjust the np.savetxt() call
img=mpimg.imread(paletteFilename)[0]
if img.dtype == np.uint8:
img = img.astype(np.uint32)
elif img.dtype == np.float32:
img = (img * 255).astype(np.uint32)
np.savetxt(paletteFilename[0:-5], img, delimiter=",")
paletteFilename = sys.argv[1]
makeBitmapPalette("palettes/"+paletteFilename)
| gpl-2.0 |
ndingwall/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 15 | 5685 |
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils._testing import (ignore_warnings,
fails_if_pypy)
pytestmark = fails_if_pypy
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert "dict" == h.input_type
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": "string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features=n_features, input_type="string",
alternate_sign=False)
X = h.transform(it)
assert X.shape[0] == len(raw_X)
assert X.shape[1] == n_features
assert X[0].sum() == 4
assert X[1].sum() == 3
assert X.nnz == 6
def test_hashing_transform_seed():
# check the influence of the seed when computing the hashes
# import is here to avoid importing on pypy
from sklearn.feature_extraction._hashing_fast import (
transform as _hashing_transform)
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices, indptr, _ = _hashing_transform(raw_X_, 2 ** 7, str,
False)
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2 ** 7, str,
False, seed=0)
assert_array_equal(indices, indices_0)
assert_array_equal(indptr, indptr_0)
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices_1, _, _ = _hashing_transform(raw_X_, 2 ** 7, str,
False, seed=1)
with pytest.raises(AssertionError):
assert_array_equal(indices, indices_1)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert [1, 2] == x1_nz
assert [1, 3, 4] == x2_nz
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": "abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert [1, 1] == x1_nz
assert [1, 1, 4] == x2_nz
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert [1] == x1_nz
assert [1] == x2_nz
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
with pytest.raises(ValueError):
FeatureHasher(input_type="gobbledygook")
with pytest.raises(ValueError):
FeatureHasher(n_features=-1)
with pytest.raises(ValueError):
FeatureHasher(n_features=0)
with pytest.raises(TypeError):
FeatureHasher(n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
with pytest.raises(ValueError):
h.transform([])
with pytest.raises(Exception):
h.transform([[5.5]])
with pytest.raises(Exception):
h.transform([[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
with pytest.raises(TypeError):
hasher.fit()
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert X.data.shape == (0,)
@ignore_warnings(category=FutureWarning)
def test_hasher_alternate_sign():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True,
input_type='string').fit_transform(X)
assert Xt.data.min() < 0 and Xt.data.max() > 0
Xt = FeatureHasher(alternate_sign=False,
input_type='string').fit_transform(X)
assert Xt.data.min() > 0
def test_hash_collisions():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, n_features=1,
input_type='string').fit_transform(X)
# check that some of the hashed tokens are added
# with an opposite sign and cancel out
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(alternate_sign=False, n_features=1,
input_type='string').fit_transform(X)
assert Xt.data[0] == len(X[0])
| bsd-3-clause |
jjhelmus/scipy | scipy/interpolate/_cubic.py | 37 | 29281 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
wrshoemaker/ffpopsim | examples/genealogies_with_selection.py | 2 | 2468 | import FFPopSim as h
import numpy as np
from matplotlib import pyplot as plt
import random as rd
from Bio import Phylo
print "This script is meant to illustrate and explore the effect of\n\
positive selection on genealogies in asexual and sexual populations. \n\n\
Simulations are performed using an infinite sites model with L segregating\n\
sites at which mutations with identical beneficial effect are injected.\n\n"
#suggested values
#neutral asexual: N=100 s=0.00001 r=0.0
#selected asexual: N=10000 s=0.01 r=0.0
#selected sexual: N=1000 s=0.01 r=1.0
L = 1000 #number of segregating sites
s = 1e-2 #single site effect
N = 10000 #population size
r = 0.0 #outcrossing rate
sample_size=30 #number of individuals whose genealogy is looked at
nsamples = 3 #number of trees
burnin = 2000 #either ~5*N or 5/s, depending on whether coalescence is dominated by drift or draft
dt = 1000 #time between samples
#set up population, switch on infinite sites mode
pop=h.haploid_highd(L, all_polymorphic=True)
#set the population size via the carrying capacity
pop.carrying_capacity= N
#set the crossover rate, outcrossing_rate and recombination model
pop.outcrossing_rate = r
pop.recombination_model = h.CROSSOVERS
pop.crossover_rate = 1.0/pop.L
#set the effect sizes of the mutations that are injected (the same at each site in this case)
pop.set_fitness_additive(np.ones(L)*s)
#track the genealogy at a central locus L/2 (which one doesn't matter in the asexual case)
pop.track_locus_genealogy([L/2])
#initialize the populations
pop.set_wildtype(pop.carrying_capacity)
print "Population parameters:"
pop.status()
#burn in
print "\nEquilibrate:"
while pop.generation<burnin:
print "Burn in: at", pop.generation, "out of", burnin, "generations"
pop.evolve(100)
print "\nPlot coalescent trees:"
fig=plt.figure(figsize=(7,10))
fig.suptitle("".join(map(str,['N=',N,' r=',r,' L=',L, ' s=',s])), fontsize=18)
for si in xrange(nsamples):
print "sample",si,"out of",nsamples
#evolve a while before sampling the next tree
pop.evolve(dt)
#draw a sample from the population, convert its genealogy to a BioPython tree object and plot
tree = pop.genealogy.get_tree(L/2)
subtree = tree.create_subtree_from_keys(rd.sample(tree.leafs,sample_size)).to_Biopython_tree()
subtree.ladderize()
plt.subplot(3,1,si+1)
Phylo.draw(subtree,label_func=lambda x:"")
plt.draw()
plt.savefig("".join(map(str,['tree_', 'N=',N,'_r=',r,'_L=',L, '_s=',s,'.pdf'])))
| gpl-3.0 |
rsivapr/scikit-learn | examples/decomposition/plot_pca_3d.py | 8 | 2410 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pylab as pl
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = pl.figure(fig_num, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density, marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
pl.show()
| bsd-3-clause |
COOLMASON/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
thejevans/pointSourceAnalysis | March2018/plot_ang_res_vs_bin_size.py | 1 | 4558 | #!/usr/bin/env python
from __future__ import print_function
import numpy as np
import calculate
import sys
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import pickle
import time, socket
from glob import glob
import operator
if sys.argv[2][-3:] == 'npy': #distributed computing case
print ('########################################################################################')
print ('#### This job is running on {0}'.format(socket.gethostname()))
print ('########################################################################################')
print (' ')
start_time = time.time()
bin_diameter = float(sys.argv[4])
bin_radians = np.radians(bin_diameter)
index = -1 * float(sys.argv[5])
mc = np.load(sys.argv[2])
bg = np.load(sys.argv[3])
time_window = 365 * 24 * 60 * 60
source = {'name':'Milagro 1908',
'sigma':np.radians(0.06),
'ra':np.radians(287.05),
'dec':np.radians(6.39)}
mc = mc[calculate.signal_cut(source, bin_radians, mc)]
print ('#### mc selection done')
mean, minimum, maximum = calculate.ang_res(mc)
print ('#### ang res done')
print ('#### bin {:.1f} done'.format(bin_diameter))
print ('#### numpy file loaded {0} ...'.format(sys.argv[2]))
print ('#### numpy file loaded {0} ...'.format(sys.argv[3]))
print ('#### pickle file dump to {0} ...'.format(sys.argv[1]))
print ('####')
f = open(sys.argv[1], 'wb')
pickle.dump((bin_diameter, index, mean, minimum, maximum), f)
f.close()
end_time = (time.time() - start_time)/60.
print ('#### ... it took {0} minutes'.format(end_time))
print ('#### DONE :D')
elif sys.argv[2][-1:] == '/': #recombine case
filenames = sorted(glob(''.join([sys.argv[2], '*.pkl'])))
data = []
for x in filenames:
f = open(x, 'rb')
data.append(pickle.load(f))
f.close()
f = open(sys.argv[1], 'wb')
pickle.dump(data, f)
f.close()
elif sys.argv[2][-3:] == 'pkl': #plot case
f = open(sys.argv[2], 'rb')
data = pickle.load(f)
f.close()
munged_data = []
for x in set(zip(*data)[1]):
temp = ([],x,[],[],[])
for y in data:
if y[1] == x:
temp[0].append(y[0])
temp[2].append(np.degrees(y[2]))
temp[3].append(np.degrees(y[3]))
temp[4].append(np.degrees(y[4]))
munged_data.append(temp)
munged_data.sort(key=operator.itemgetter(1))
#mean
max_xs = []
for x in munged_data[::-1]:
plt.step(x[0], x[2], label = r'$E^{'+str(x[1])+r'}$')
max_index = x[2].index(max(x[2]))
max_xs.append(x[0][max_index])
plt.grid(True)
plt.xlabel(r'bin diameter $[^{\circ}]$')
plt.ylabel(r'mean delta angle $[^{\circ}]$')
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.legend(loc='best')
ax = plt.gca()
ax2 = ax.twiny()
ax2.set_xticks(max_xs)
ax2.set_xlim(ax.get_xlim())
ax2.tick_params(axis='x', labelsize=8)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.savefig(''.join([sys.argv[1], '_mean.pdf']))
plt.close()
#max
max_xs = []
for x in munged_data[::-1]:
plt.step(x[0], x[4], label = r'$E^{'+str(x[1])+r'}$')
max_index = x[4].index(max(x[4]))
max_xs.append(x[0][max_index])
plt.grid(True)
plt.xlabel(r'bin diameter $[^{\circ}]$')
plt.ylabel(r'max delta angle $[^{\circ}]$')
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.legend(loc='best')
ax = plt.gca()
ax2 = ax.twiny()
ax2.set_xticks(max_xs)
ax2.set_xlim(ax.get_xlim())
ax2.tick_params(axis='x', labelsize=8)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.savefig(''.join([sys.argv[1], '_max.pdf']))
plt.close()
#min
max_xs = []
for x in munged_data[::-1]:
plt.step(x[0], x[3], label = r'$E^{'+str(x[1])+r'}$')
max_index = x[3].index(max(x[3]))
max_xs.append(x[0][max_index])
plt.grid(True)
plt.xlabel(r'bin diameter $[^{\circ}]$')
plt.ylabel(r'min delta angle $[^{\circ}]$')
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.legend(loc='best')
ax = plt.gca()
ax2 = ax.twiny()
ax2.set_xticks(max_xs)
ax2.set_xlim(ax.get_xlim())
ax2.tick_params(axis='x', labelsize=8)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.savefig(''.join([sys.argv[1], '_min.pdf']))
plt.close()
| gpl-3.0 |
shenyp09/nucnet-tools-code | my_examples/my_analysis/result/abund_with_same_a.py | 1 | 2431 | #!/usr/bin/env python
#
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sys import argv
from mpl_toolkits.axes_grid.inset_locator import inset_axes
dirbase1 = "0823_0"
dirbase2 = "0823_1"
input1 = os.listdir(dirbase1 + "/" + dirbase1 + "_ABUND")
input2 = os.listdir(dirbase2 + "/" + dirbase2 + "_ABUND")
try:
input1.remove('.DS_Store')
input2.remove('.DS_Store')
except Exception, e:
pass
properties = np.array(np.loadtxt("properties.txt", dtype={'names': ('label', 'time', 'T(GK)', 'rho(cgs)'),
'formats': (np.int, np.float, np.float, np.float)}).tolist()).transpose()
# print properties
time = []
temperature = []
density = []
# for fname in finput:
for ii in xrange(0, len(input1)):
fname1 = input1[ii]
fname2 = input2[ii]
time.append(properties[1][ii])
temperature.append(properties[2][ii])
density.append(properties[3][ii])
# print fname
c = np.loadtxt("./dot_abund/" + fname, dtype={'names': ('a', 'abund', 'mass frac', 'norm abund'),
'formats': (np.int, np.float, np.float, np.float)})
c = np.array(c.tolist()).transpose()
plt.clf()
plt.yscale('log', nonposy='clip')
plt.ylabel("Abundance per nucleon")
plt.xlabel("A")
axes = plt.gca()
axes.set_ylim([1e-10, 1])
plt.plot(c[0], c[1], ls='-', color='black',
marker='.', mec='blue', mew=2., ms=10.)
# this is an inset axes over the main axes
inset = inset_axes(axes,
width="40%", # width = 30% of parent_bbox
height=1, # height : 1 inch
loc=1)
# n, bins, patches = plt.hist(s, 400, normed=1)
plt.ylabel('T(GK)')
plt.xlabel('time(s)')
# print properties[1]
plt.plot(time, temperature)
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposy='clip')
ax = plt.gca()
ax.set_xlim([1e-5, 1e16])
ax.set_ylim([1e-7, 1])
plt.xticks(rotation=45)
ax2 = plt.twinx() # this is the important function
ax2.plot(time, density)
ax2.set_yscale('log')
ax2.set_xlim([1e-5, 1e16])
ax2.set_ylim([1e-12, 1e4])
ax2.set_ylabel('$\\rho$(cgs)')
# n, bins, patches = plt.hist(s, 400, normed=1)
# plt.xticks(100)
# plt.yticks([])
plt.savefig("dot_abund_png/" + fname + ".png")
| gpl-3.0 |
chenyyx/scikit-learn-doc-zh | doc/zh/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| gpl-3.0 |
shyamalschandra/scikit-learn | sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
jhelie/order_param | order_param.py | 1 | 106529 | ################################################################################################################################################
# IMPORT MODULES
################################################################################################################################################
#import general python tools
import argparse
import operator
from operator import itemgetter
import sys, os, shutil
import os.path
import math
#import python extensions/packages to manipulate arrays
import numpy #to manipulate arrays
import scipy #mathematical tools and recipesimport MDAnalysis
#import graph building module
import matplotlib as mpl
mpl.use('Agg')
import pylab as plt
import matplotlib.colors as mcolors
import matplotlib.cm as cm #colours library
import matplotlib.ticker
from matplotlib.ticker import MaxNLocator
from matplotlib.font_manager import FontProperties
fontP=FontProperties()
#import MDAnalysis
import MDAnalysis
from MDAnalysis import *
import MDAnalysis.analysis
import MDAnalysis.analysis.leaflet
import MDAnalysis.analysis.distances
#set MDAnalysis to use periodic boundary conditions
MDAnalysis.core.flags['use_periodic_selections'] = True
MDAnalysis.core.flags['use_KDTree_routines'] = False
MDAnalysis.core.flags['use_KDTree_routines'] = False
################################################################################################################################################
# RETRIEVE USER INPUTS
################################################################################################################################################
#create parser
#=============
version_nb="0.1.2"
parser = argparse.ArgumentParser(prog='order_param', usage='', add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter, description=\
'''
**********************************************
v''' + version_nb + '''
author: Jean Helie ([email protected])
git: https://github.com/jhelie/order_param
**********************************************
[ Description ]
This script computes the second rank order parameter as defined by:
P2 = 0.5*(3*<cos**2(theta)> - 1)
where theta is the angle between the bond and the bilayer normal.
P2 = 1 perfect alignement with the bilayer normal
P2 = 0 random orientation
P2 = -0.5 anti-alignement
The script produces the following outputs:
- (time evolution of) P2 for each lipid specie in each leaflet
- (time evolution of) P2 for each flipflopping lipid (if any, see note 3)
[ Requirements ]
The following python module(s) are needed:
- MDAnalysis
[ Notes ]
1. It's a good idea to pre-process the xtc first:
- use trjconv with the -pbc mol option
- only output the relevant lipids (e.g. no water but no cholesterol either)
2. The z axis is considered to be the bilayer normal. The more your system deforms, the further from
the actual bilayer normal the z axis will be.
3. In case lipids flipflop during the trajectory, a file listing them can be supplied via the -i flag.
This file can be the output of the ff_detect script and should follow the format:
'resname,resid,starting_leaflet' format on each line e.g. 'POPC,145,lower'
If flipflopping lipids are not identified they may add some significant noise to the results
4. The code can easily be updated to add more lipids, for now the following tails can be dealt with:
- Martini: DHPC,DHPE,DLPC,DLPE,DAPC,DUPC,DPPC,DPPE,DPPS,DPPG,DSPC,DSPE,POPC,POPE,POPS,POPG,PPCS,PIP2,PIP3,GM3
5. The order parameter calculated for each (handled) lipd specie can be visualised with VMD.
This can be done either with pdb files (output frequency controled via -w flag) or with the
xtc trajectory.
- pdb file: the order parameter info is stored in the beta factor column. Just open
the pdb with VMD and choose Draw Style > Coloring Method > Beta
- xtc file: the order parameter info is stored in a .txt file in /3_VMD/ and you can load it into
the user field in the xtc by sourcing the script 'set_user_fields.tcl' and running the
procedure 'set_order_param'
6. The colour associated to each lipid specie can be defined by supplying a colour file containing
'resname,colour' on each line (a line with a colour MUST be defined for all species).
Colours can be specified using single letter code (e.g. 'r'), hex code or the name of colormap.
In case a colormap is used, its name must be specified as the colour for each lipid specie - type
'order_param --colour_maps' to see a list of the standard colour maps.
If no colour is used the 'jet' colour map is used by default.
[ Usage ]
Option Default Description
-----------------------------------------------------
-f : structure file [.gro]
-x : trajectory file [.xtc] (optional)
-c : colour definition file, see note 6
-o : name of output folder
-b : beginning time (ns) (the bilayer must exist by then!)
-e : ending time (ns)
-t 10 : process every t-frames
-w : write annotated pdbs every [w] processed frames (optional, see note 5)
--smooth : nb of points to use for data smoothing (optional)
Lipids identification
-----------------------------------------------------
--flipflops : input file with flipflopping lipids, see note 3
--forcefield : forcefield options, see note 3
--no-opt : do not attempt to optimise leaflet identification (useful for huge system)
Other options
-----------------------------------------------------
--colour_maps : show list of standard colour maps, see note 6
--version : show version number and exit
-h, --help : show this menu and exit
''')
#data options
parser.add_argument('-f', nargs=1, dest='grofilename', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-x', nargs=1, dest='xtcfilename', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-c', nargs=1, dest='colour_file', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-o', nargs=1, dest='output_folder', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-b', nargs=1, dest='t_start', default=[-1], type=int, help=argparse.SUPPRESS)
parser.add_argument('-e', nargs=1, dest='t_end', default=[10000000000000], type=int, help=argparse.SUPPRESS)
parser.add_argument('-t', nargs=1, dest='frames_dt', default=[10], type=int, help=argparse.SUPPRESS)
parser.add_argument('-w', nargs=1, dest='frames_write_dt', default=[1000000000000000], type=int, help=argparse.SUPPRESS)
parser.add_argument('--smooth', nargs=1, dest='nb_smoothing', default=[0], type=int, help=argparse.SUPPRESS)
#lipids identification
parser.add_argument('--flipflops', nargs=1, dest='selection_file_ff', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('--forcefield', dest='forcefield_opt', choices=['martini'], default='martini', help=argparse.SUPPRESS)
parser.add_argument('--no-opt', dest='cutoff_leaflet', action='store_false', help=argparse.SUPPRESS)
#other options
parser.add_argument('--colour_maps', dest='show_colour_map', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--version', action='version', version='%(prog)s v' + version_nb, help=argparse.SUPPRESS)
parser.add_argument('-h','--help', action='help', help=argparse.SUPPRESS)
#store inputs
#============
args=parser.parse_args()
args.grofilename=args.grofilename[0]
args.xtcfilename=args.xtcfilename[0]
args.colour_file=args.colour_file[0]
args.output_folder=args.output_folder[0]
args.frames_dt=args.frames_dt[0]
args.frames_write_dt=args.frames_write_dt[0]
args.t_start=args.t_start[0]
args.t_end=args.t_end[0]
args.selection_file_ff=args.selection_file_ff[0]
args.nb_smoothing=args.nb_smoothing[0]
#show colour maps
#----------------
if args.show_colour_map:
print ""
print "The following standard matplotlib color maps can be used:"
print ""
print "Spectral, summer, coolwarm, pink_r, Set1, Set2, Set3, brg_r, Dark2, hot, PuOr_r, afmhot_r, terrain_r,"
print "PuBuGn_r, RdPu, gist_ncar_r, gist_yarg_r, Dark2_r, YlGnBu, RdYlBu, hot_r, gist_rainbow_r, gist_stern, "
print "gnuplot_r, cool_r, cool, gray, copper_r, Greens_r, GnBu, gist_ncar, spring_r, gist_rainbow, RdYlBu_r, "
print "gist_heat_r, OrRd_r, CMRmap, bone, gist_stern_r, RdYlGn, Pastel2_r, spring, terrain, YlOrRd_r, Set2_r, "
print "winter_r, PuBu, RdGy_r, spectral, flag_r, jet_r, RdPu_r, Purples_r, gist_yarg, BuGn, Paired_r, hsv_r, "
print "bwr, cubehelix, YlOrRd, Greens, PRGn, gist_heat, spectral_r, Paired, hsv, Oranges_r, prism_r, Pastel2, "
print "Pastel1_r, Pastel1, gray_r, PuRd_r, Spectral_r, gnuplot2_r, BuPu, YlGnBu_r, copper, gist_earth_r, "
print "Set3_r, OrRd, PuBu_r, ocean_r, brg, gnuplot2, jet, bone_r, gist_earth, Oranges, RdYlGn_r, PiYG,"
print "CMRmap_r, YlGn, binary_r, gist_gray_r, Accent, BuPu_r, gist_gray, flag, seismic_r, RdBu_r, BrBG, Reds,"
print "BuGn_r, summer_r, GnBu_r, BrBG_r, Reds_r, RdGy, PuRd, Accent_r, Blues, Greys, autumn, cubehelix_r, "
print "nipy_spectral_r, PRGn_r, Greys_r, pink, binary, winter, gnuplot, RdBu, prism, YlOrBr, coolwarm_r,"
print "rainbow_r, rainbow, PiYG_r, YlGn_r, Blues_r, YlOrBr_r, seismic, Purples, bwr_r, autumn_r, ocean,"
print "Set1_r, PuOr, PuBuGn, nipy_spectral, afmhot."
print ""
sys.exit(0)
#sanity check
#============
if not os.path.isfile(args.grofilename):
print "Error: file " + str(args.grofilename) + " not found."
sys.exit(1)
if args.colour_file!="no" and not os.path.isfile(args.colour_file):
print "Error: file " + str(args.colour_file) + " not found."
sys.exit(1)
if args.selection_file_ff!="no" and not os.path.isfile(args.selection_file_ff):
print "Error: file " + str(args.selection_file_ff) + " not found."
sys.exit(1)
if args.xtcfilename=="no":
if '-t' in sys.argv:
print "Error: -t option specified but no xtc file specified."
sys.exit(1)
elif '-b' in sys.argv:
print "Error: -b option specified but no xtc file specified."
sys.exit(1)
elif '-e' in sys.argv:
print "Error: -e option specified but no xtc file specified."
sys.exit(1)
elif '--smooth' in sys.argv:
print "Error: --smooth option specified but no xtc file specified."
sys.exit(1)
elif not os.path.isfile(args.xtcfilename):
print "Error: file " + str(args.xtcfilename) + " not found."
sys.exit(1)
#create folders and log file
#===========================
if args.output_folder=="no":
if args.xtcfilename=="no":
args.output_folder="order_param_" + args.grofilename[:-4]
else:
args.output_folder="order_param_" + args.xtcfilename[:-4]
if os.path.isdir(args.output_folder):
print "Error: folder " + str(args.output_folder) + " already exists, choose a different output name via -o."
sys.exit(1)
else:
#create folders
#--------------
os.mkdir(args.output_folder)
#1 non flipfloppping lipids
os.mkdir(args.output_folder + "/1_nff")
if args.xtcfilename!="no":
os.mkdir(args.output_folder + "/1_nff/xvg")
os.mkdir(args.output_folder + "/1_nff/png")
if args.nb_smoothing>1:
os.mkdir(args.output_folder + "/1_nff/smoothed")
os.mkdir(args.output_folder + "/1_nff/smoothed/png")
os.mkdir(args.output_folder + "/1_nff/smoothed/xvg")
#2 snapshots
os.mkdir(args.output_folder + "/2_snapshots")
#3 vmd
if args.xtcfilename!="no":
os.mkdir(args.output_folder + "/3_VMD")
#4 flipflopping lipids
if args.selection_file_ff!="no":
os.mkdir(args.output_folder + "/4_ff")
if args.xtcfilename!="no":
os.mkdir(args.output_folder + "/4_ff/xvg")
os.mkdir(args.output_folder + "/4_ff/png")
if args.nb_smoothing>1:
os.mkdir(args.output_folder + "/4_ff/smoothed")
os.mkdir(args.output_folder + "/4_ff/smoothed/png")
os.mkdir(args.output_folder + "/4_ff/smoothed/xvg")
#create log
#----------
filename_log=os.getcwd() + '/' + str(args.output_folder) + '/order_param.log'
output_log=open(filename_log, 'w')
output_log.write("[order_param v" + str(version_nb) + "]\n")
output_log.write("\nThis folder and its content were created using the following command:\n\n")
tmp_log="python order_param.py"
for c in sys.argv[1:]:
tmp_log+=" " + c
output_log.write(tmp_log + "\n")
output_log.close()
#copy input files
#----------------
if args.colour_file!="no":
shutil.copy2(args.colour_file,args.output_folder + "/")
if args.selection_file_ff!="no":
shutil.copy2(args.selection_file_ff,args.output_folder + "/")
################################################################################################################################################
# DATA LOADING
################################################################################################################################################
# Load universe
#==============
if args.xtcfilename=="no":
print "\nLoading file..."
U=Universe(args.grofilename)
all_atoms=U.selectAtoms("all")
nb_atoms=all_atoms.numberOfAtoms()
nb_frames_xtc=1
nb_frames_processed=1
else:
print "\nLoading trajectory..."
U=Universe(args.grofilename, args.xtcfilename)
all_atoms=U.selectAtoms("all")
nb_atoms=all_atoms.numberOfAtoms()
nb_frames_xtc=U.trajectory.numframes
nb_frames_processed=0
U.trajectory.rewind()
# Identify ff lipids
#===================
lipids_ff_nb=0
lipids_ff_info={}
lipids_ff_species=[]
lipids_ff_leaflet=[]
lipids_ff_u2l_index=[]
lipids_ff_l2u_index=[]
lipids_selection_ff={}
lipids_selection_ff_VMD_string={}
leaflet_selection_string={}
leaflet_selection_string[args.forcefield_opt]="name PO4 or name PO3 or name B1A" #martini
#case: read specified ff lipids selection file
if args.selection_file_ff!="no":
print "\nReading selection file for flipflopping lipids..."
with open(args.selection_file_ff) as f:
lines = f.readlines()
lipids_ff_nb=len(lines)
print " -found " + str(lipids_ff_nb) + " flipflopping lipids"
sele_all_nff_string=leaflet_selection_string[args.forcefield_opt] + " and not ("
for l in range(0,lipids_ff_nb):
try:
#read the 3 comma separated field
l_type=lines[l].split(',')[0]
l_indx=int(lines[l].split(',')[1])
l_start=lines[l].split(',')[2][0:-1]
#build leaflet dictionary
if l_start not in lipids_ff_leaflet:
lipids_ff_leaflet.append(l_start)
#create index list of u2l and l2u ff lipids
if l_start=="upper":
lipids_ff_u2l_index.append(l)
elif l_start=="lower":
lipids_ff_l2u_index.append(l)
else:
print "unknown starting leaflet '" + str(l_start) + "'."
sys.exit(1)
#build specie dictionary
if l_type not in lipids_ff_species:
lipids_ff_species.append(l_type)
#build MDAnalysis atom group
lipids_ff_info[l]=[l_type,l_indx,l_start]
lipids_selection_ff[l]=U.selectAtoms("resname " + str(l_type) + " and resnum " + str(l_indx))
if lipids_selection_ff[l].numberOfAtoms()==0:
sys.exit(1)
#build VMD selection string
lipids_selection_ff_VMD_string[l]="resname " + str(lipids_ff_info[l][0]) + " and resid " + str(lipids_ff_info[l][1])
#build selection string to select all PO4 without the flipflopping ones
if l==0:
sele_all_nff_string+="(resname " + str(l_type) + " and resnum " + str(l_indx) + ")"
else:
sele_all_nff_string+=" or (resname " + str(l_type) + " and resnum " + str(l_indx) + ")"
except:
print "Error: invalid flipflopping lipid selection string on line " + str(l+1) + ": '" + lines[l][:-1] + "'"
sys.exit(1)
sele_all_nff_string+=")"
#case: no ff lipids selection file specified
else:
sele_all_nff_string=leaflet_selection_string[args.forcefield_opt]
# Identify nff leaflets
#======================
print "\nIdentifying leaflets..."
lipids_nff_sele={}
lipids_nff_sele_nb={}
for l in ["lower","upper","both"]:
lipids_nff_sele[l]={}
lipids_nff_sele_nb[l]={}
#identify lipids leaflet groups
if args.cutoff_leaflet:
print " -optimising cutoff..."
cutoff_value=MDAnalysis.analysis.leaflet.optimize_cutoff(U, sele_all_nff_string)
L=MDAnalysis.analysis.leaflet.LeafletFinder(U, sele_all_nff_string, cutoff_value[0])
else:
L=MDAnalysis.analysis.leaflet.LeafletFinder(U, sele_all_nff_string)
#process groups
if numpy.shape(L.groups())[0]<2:
print "Error: imposssible to identify 2 leaflets."
sys.exit(1)
else:
if L.group(0).centerOfGeometry()[2] > L.group(1).centerOfGeometry()[2]:
lipids_nff_sele["upper"]["all"]=L.group(0).residues.atoms
lipids_nff_sele["lower"]["all"]=L.group(1).residues.atoms
else:
lipids_nff_sele["upper"]["all"]=L.group(1).residues.atoms
lipids_nff_sele["lower"]["all"]=L.group(0).residues.atoms
for l in ["lower","upper"]:
lipids_nff_sele_nb[l]["all"]=lipids_nff_sele[l]["all"].numberOfResidues()
if numpy.shape(L.groups())[0]==2:
print " -found 2 leaflets: ", lipids_nff_sele["upper"]["all"].numberOfResidues(), '(upper) and ', lipids_nff_sele["lower"]["all"].numberOfResidues(), '(lower) lipids'
else:
other_lipids=0
for g in range(2, numpy.shape(L.groups())[0]):
other_lipids+=L.group(g).numberOfResidues()
print " -found " + str(numpy.shape(L.groups())[0]) + " groups: " + str(lipids_nff_sele["upper"]["all"].numberOfResidues()) + "(upper), " + str(lipids_nff_sele["lower"]["all"].numberOfResidues()) + "(lower) and " + str(other_lipids) + " (others) lipids respectively"
lipids_nff_sele["both"]["all"]=lipids_nff_sele["lower"]["all"]+lipids_nff_sele["upper"]["all"]
lipids_nff_sele_nb_atoms=lipids_nff_sele["both"]["all"].numberOfAtoms()
# Identify lipid species
#=======================
print "\nIdentifying membrane composition..."
#specie identification
lipids_nff_species={}
for l in ["lower","upper"]:
lipids_nff_species[l]=list(numpy.unique(lipids_nff_sele[l]["all"].resnames()))
lipids_nff_species["both"]=numpy.unique(lipids_nff_species["lower"]+lipids_nff_species["upper"])
#count species
lipids_nff_species_nb={}
for l in ["lower","upper","both"]:
lipids_nff_species_nb[l]=numpy.size(lipids_nff_species[l])
#selection creation
for l in ["lower","upper"]:
for s in lipids_nff_species[l]:
#current specie
lipids_nff_sele[l][s]=lipids_nff_sele[l]["all"].selectAtoms("resname " + str(s))
lipids_nff_sele_nb[l][s]=lipids_nff_sele[l][s].numberOfResidues()
#specie ratios
membrane_comp={}
lipids_nff_ratio={}
for l in ["lower","upper"]:
membrane_comp[l]=" -" + str(l) + ":"
lipids_nff_ratio[l]={}
for s in lipids_nff_species[l]:
lipids_nff_ratio[l][s]=round(lipids_nff_sele_nb[l][s]/float(lipids_nff_sele_nb[l]["all"])*100,1)
membrane_comp[l]+=" " + s + " (" + str(lipids_nff_ratio[l][s]) + "%)"
print membrane_comp[l]
################################################################################################################################################
# LIPIDS DICTIONARIES
################################################################################################################################################
#define all lipids taken into account
#====================================
#lipids handled
lipids_possible={}
lipids_possible[args.forcefield_opt]=['DHPC','DHPE','DLPC','DLPE','DAPC','DUPC','DPPC','DPPE','DPPS','DPPG','DSPC','DSPE','POPC','POPE','POPS','POPG','PPCS','PIP2','PIP3','GM3']
#case: martini
#-------------
if args.forcefield_opt=="martini":
#define possible head, link and tail sequences
head_PC=" NC3-PO4"
head_PE=" NH3-PO4"
head_PS=" CNO-PO4"
head_PG=" GL0-PO4"
head_I2=" PO1-RP1 PO2-RP1 PO2-RP2 RP1-RP2 RP1-RP3 RP2-RP3 RP3-PO3"
head_I3=" PO0-RP2 PO1-RP1 PO2-RP1 PO2-RP2 RP1-RP2 RP1-RP3 RP2-RP3 RP3-PO3"
head_GM=" very-big"
link1=" PO4-GL1 GL1-GL2 GL1-C1A GL2-C1B"
link2=" PO4-GL1 GL1-GL2 GL1-D1A GL2-D1B"
link3=" PO4-AM1 AM1-AM2 AM1-C1A AM2-D1B"
link4=" PO3-GL1 GL1-GL2 GL1-C1A GL2-C1B"
link5=" B1A-AM2 AM1-AM2 AM1-C1A AM2-D1B"
tail_DH=" C1A-C2A C1B-C2B"
tail_DL=" C1A-C2A C2A-C3A C1B-C2B C2B-C3B"
tail_DP=" C1A-C2A C2A-C3A C3A-C4A C1B-C2B C2B-C3B C3B-C4B"
tail_DU=" C1A-D2A D2A-D3A D3A-C4A C1B-D2B D2B-D3B D3B-C4B"
tail_DS=" C1A-C2A C2A-C3A C3A-C4A C4A-C5A C1B-C2B C2B-C3B C3B-C4B C4B-C5B"
tail_DO=" C1A-C2A C2A-D3A D3A-C4A C4A-C5A C1B-C2B C2B-D3B D3B-C4B C4B-C5B"
tail_DA=" D1A-D2A D2A-D3A D3A-D4A D4A-C5A D1B-D2B D2B-D3B D3B-D4B D4B-C5B"
tail_PO=" C1A-C2A C2A-C3A C3A-C4A C1B-C2B C2B-D3B D3B-C4B C4B-C5B"
tail_PI=" C1A-C2A C2A-C3A C3A-C4A C1B-C2B C2B-C3B C3B-C4B C4B-C5B"
tail_PP=" C1A-C2A C2A-C3A C3A-C4A D1B-C2B C2B-C3B C3B-C4B"
#define lipids composition
bond_names={}
bond_names['DHPC']=head_PC + link1 + tail_DH
bond_names['DHPE']=head_PE + link1 + tail_DH
bond_names['DLPC']=head_PC + link1 + tail_DL
bond_names['DLPE']=head_PE + link1 + tail_DL
bond_names['DAPC']=head_PC + link2 + tail_DA
bond_names['DUPC']=head_PC + link1 + tail_DU
bond_names['DPPC']=head_PC + link1 + tail_DP
bond_names['DPPE']=head_PE + link1 + tail_DP
bond_names['DPPS']=head_PS + link1 + tail_DP
bond_names['DPPG']=head_PG + link1 + tail_DP
bond_names['DSPC']=head_PC + link1 + tail_DS
bond_names['DSPE']=head_PE + link1 + tail_DS
bond_names['POPC']=head_PC + link1 + tail_PO
bond_names['POPE']=head_PE + link1 + tail_PO
bond_names['POPS']=head_PS + link1 + tail_PO
bond_names['POPG']=head_PG + link1 + tail_PO
bond_names['PPCS']=head_PC + link3 + tail_PP
bond_names['PIP2']=head_I2 + link4 + tail_PI
bond_names['PIP3']=head_I3 + link4 + tail_PI
bond_names['GM3']=head_GM + link5 + tail_PP
#define tail boundaries (start_A, length_A, start_B, length_B)
tail_boundaries={}
tail_boundaries['DHPC']=[5,1,6,1]
tail_boundaries['DHPE']=[5,1,6,1]
tail_boundaries['DLPC']=[5,2,7,2]
tail_boundaries['DLPE']=[5,2,7,2]
tail_boundaries['DAPC']=[5,4,9,4]
tail_boundaries['DUPC']=[5,4,9,4]
tail_boundaries['DPPC']=[5,3,8,3]
tail_boundaries['DPPG']=[5,3,8,3]
tail_boundaries['DPPE']=[5,3,8,3]
tail_boundaries['DPPS']=[5,3,8,3]
tail_boundaries['DOPC']=[5,4,9,4]
tail_boundaries['DOPG']=[5,4,9,4]
tail_boundaries['DOPE']=[5,4,9,4]
tail_boundaries['DOPS']=[5,4,9,4]
tail_boundaries['DSPC']=[5,4,9,4]
tail_boundaries['DSPE']=[5,4,9,4]
tail_boundaries['POPC']=[5,3,8,4]
tail_boundaries['POPE']=[5,3,8,4]
tail_boundaries['POPS']=[5,3,8,4]
tail_boundaries['POPG']=[5,3,8,4]
tail_boundaries['PPCS']=[5,3,8,3]
tail_boundaries['PIP2']=[11,3,14,4]
tail_boundaries['PIP3']=[12,3,15,4]
tail_boundaries['GM3']=[5,3,8,3]
#deal with those actually present
#================================
#create list of lipids to take into account
#------------------------------------------
lipids_handled={}
for l in ["lower","upper"]:
lipids_handled[l]=[]
for s in lipids_nff_species[l]:
if s in lipids_possible[args.forcefield_opt]:
lipids_handled[l].append(s)
if len(lipids_handled["lower"])==0 and len(lipids_handled["upper"])==0:
print "Error: none of the lipid species can be taken into account - double check the forcefield option, see order_param -h."
sys.exit(1)
lipids_handled["both"]=numpy.unique(lipids_handled["lower"]+lipids_handled["upper"])
#display them
#------------
tmp_lip=lipids_handled["both"][0]
for s in lipids_handled["both"][1:]:
tmp_lip+=","+str(s)
print "\nLipids handled: ", tmp_lip
print "\nInitialising data structures..."
#create VMD selection string for each specie
#-------------------------------------------
lipids_selection_nff={}
lipids_selection_nff_VMD_string={}
for l in ["lower","upper"]:
lipids_selection_nff[l]={}
lipids_selection_nff_VMD_string[l]={}
for s in lipids_handled[l]:
lipids_selection_nff[l][s]={}
lipids_selection_nff_VMD_string[l][s]={}
for r_index in range(0,lipids_nff_sele_nb[l][s]):
lipids_selection_nff[l][s][r_index]=lipids_nff_sele[l][s].selectAtoms("resnum " + str(lipids_nff_sele[l][s].resnums()[r_index]))
lipids_selection_nff_VMD_string[l][s][r_index]="resname " + str(s) + " and resid " + str(lipids_nff_sele[l][s].resnums()[r_index])
#create bond list for each lipid specie
#--------------------------------------
bonds={}
for s in lipids_handled["both"]:
bonds[s]=[]
for bond_name in bond_names[s].split():
bonds[s].append(bond_name.split("-"))
#associate colours to lipids
#===========================
#color maps dictionaries
colours_lipids_nb=0
colours_lipids={}
colours_lipids_list=[]
colours_lipids_map="jet"
colormaps_possible=['Spectral', 'summer', 'coolwarm', 'pink_r', 'Set1', 'Set2', 'Set3', 'brg_r', 'Dark2', 'hot', 'PuOr_r', 'afmhot_r', 'terrain_r', 'PuBuGn_r', 'RdPu', 'gist_ncar_r', 'gist_yarg_r', 'Dark2_r', 'YlGnBu', 'RdYlBu', 'hot_r', 'gist_rainbow_r', 'gist_stern', 'gnuplot_r', 'cool_r', 'cool', 'gray', 'copper_r', 'Greens_r', 'GnBu', 'gist_ncar', 'spring_r', 'gist_rainbow', 'RdYlBu_r', 'gist_heat_r', 'OrRd_r', 'CMRmap', 'bone', 'gist_stern_r', 'RdYlGn', 'Pastel2_r', 'spring', 'terrain', 'YlOrRd_r', 'Set2_r', 'winter_r', 'PuBu', 'RdGy_r', 'spectral', 'flag_r', 'jet_r', 'RdPu_r', 'Purples_r', 'gist_yarg', 'BuGn', 'Paired_r', 'hsv_r', 'bwr', 'cubehelix', 'YlOrRd', 'Greens', 'PRGn', 'gist_heat', 'spectral_r', 'Paired', 'hsv', 'Oranges_r', 'prism_r', 'Pastel2', 'Pastel1_r', 'Pastel1', 'gray_r', 'PuRd_r', 'Spectral_r', 'gnuplot2_r', 'BuPu', 'YlGnBu_r', 'copper', 'gist_earth_r', 'Set3_r', 'OrRd', 'PuBu_r', 'ocean_r', 'brg', 'gnuplot2', 'jet', 'bone_r', 'gist_earth', 'Oranges', 'RdYlGn_r', 'PiYG', 'CMRmap_r', 'YlGn', 'binary_r', 'gist_gray_r', 'Accent', 'BuPu_r', 'gist_gray', 'flag', 'seismic_r', 'RdBu_r', 'BrBG', 'Reds', 'BuGn_r', 'summer_r', 'GnBu_r', 'BrBG_r', 'Reds_r', 'RdGy', 'PuRd', 'Accent_r', 'Blues', 'Greys', 'autumn', 'cubehelix_r', 'nipy_spectral_r', 'PRGn_r', 'Greys_r', 'pink', 'binary', 'winter', 'gnuplot', 'RdBu', 'prism', 'YlOrBr', 'coolwarm_r', 'rainbow_r', 'rainbow', 'PiYG_r', 'YlGn_r', 'Blues_r', 'YlOrBr_r', 'seismic', 'Purples', 'bwr_r', 'autumn_r', 'ocean', 'Set1_r', 'PuOr', 'PuBuGn', 'nipy_spectral', 'afmhot']
#case: group definition file
#---------------------------
if args.colour_file!="no":
print "\nReading colour definition file..."
with open(args.colour_file) as f:
lines = f.readlines()
colours_lipids_nb=len(lines)
for line_index in range(0,colours_lipids_nb):
l_content=lines[line_index].split(',')
colours_lipids[l_content[0]]=l_content[1][:-1] #to get rid of the returning char
#display results
print " -found the following colours definition:"
for s in colours_lipids.keys():
print " -" + str(s) + ": " + str(colours_lipids[s])
#check if a custom color map has been specified or not
if colours_lipids_nb>1 and len(numpy.unique(colours_lipids.values()))==1:
if numpy.unique(colours_lipids.values())[0] in colormaps_possible:
colours_lipids_map=numpy.unique(colours_lipids.values())[0]
else:
print "Error: either the same color was specified for all species or the color map '" + str(numpy.unique(colours_lipids.values())[0]) + "' is not valid."
sys.exit(1)
else:
colours_lipids_map="custom"
#check that all detected species have a colour specified
for s in lipids_handled["both"]:
if s not in colours_lipids.keys():
print "Error: no colour specified for " + str(s) + "."
sys.exit(1)
#case: generate colours from jet colour map
#------------------------------------------
if colours_lipids_map!="custom":
tmp_cmap=cm.get_cmap(colours_lipids_map)
colours_lipids_value=tmp_cmap(numpy.linspace(0, 1, len(lipids_handled["both"])))
for l_index in range(0, len(lipids_handled["both"])):
colours_lipids[lipids_handled["both"][l_index]]=colours_lipids_value[l_index]
################################################################################################################################################
# DATA STRUCTURE: order parameters
################################################################################################################################################
#time
#----
time_stamp={}
time_sorted=[]
time_smooth=[]
#non flipflopping lipids
#-----------------------
#avg over lipids within a frame: full data
op_tailA_avg_frame={}
op_tailB_avg_frame={}
op_both_avg_frame={}
op_tailA_std_frame={}
op_tailB_std_frame={}
op_both_std_frame={}
#avg over lipids within a frame: sorted data
op_tailA_avg_frame_sorted={}
op_tailB_avg_frame_sorted={}
op_both_avg_frame_sorted={}
op_tailA_std_frame_sorted={}
op_tailB_std_frame_sorted={}
op_both_std_frame_sorted={}
for l in ["lower","upper"]:
op_tailA_avg_frame_sorted[l]={}
op_tailB_avg_frame_sorted[l]={}
op_both_avg_frame_sorted[l]={}
op_tailA_std_frame_sorted[l]={}
op_tailB_std_frame_sorted[l]={}
op_both_std_frame_sorted[l]={}
for s in lipids_handled[l]:
op_tailA_avg_frame_sorted[l][s]=[]
op_tailB_avg_frame_sorted[l][s]=[]
op_both_avg_frame_sorted[l][s]=[]
op_tailA_std_frame_sorted[l][s]=[]
op_tailB_std_frame_sorted[l][s]=[]
op_both_std_frame_sorted[l][s]=[]
#avg over lipids within a frame: smoothed data
op_tailA_avg_frame_smooth={}
op_tailB_avg_frame_smooth={}
op_both_avg_frame_smooth={}
op_tailA_std_frame_smooth={}
op_tailB_std_frame_smooth={}
op_both_std_frame_smooth={}
for l in ["lower","upper"]:
op_tailA_avg_frame_smooth[l]={}
op_tailB_avg_frame_smooth[l]={}
op_both_avg_frame_smooth[l]={}
op_tailA_std_frame_smooth[l]={}
op_tailB_std_frame_smooth[l]={}
op_both_std_frame_smooth[l]={}
#avg over time of frame avg
op_tailA_avg={}
op_tailB_avg={}
op_both_avg={}
op_tailA_std={}
op_tailB_std={}
op_both_std={}
for l in ["lower","upper"]:
op_tailA_avg[l]={}
op_tailB_avg[l]={}
op_both_avg[l]={}
op_tailA_std[l]={}
op_tailB_std[l]={}
op_both_std[l]={}
op_tailA_avg_frame[l]={}
op_tailB_avg_frame[l]={}
op_both_avg_frame[l]={}
op_tailA_std_frame[l]={}
op_tailB_std_frame[l]={}
op_both_std_frame[l]={}
for s in lipids_handled[l]:
op_tailA_avg_frame[l][s]={}
op_tailB_avg_frame[l][s]={}
op_both_avg_frame[l][s]={}
op_tailA_std_frame[l][s]={}
op_tailB_std_frame[l][s]={}
op_both_std_frame[l][s]={}
#store evolution of op for each lipid
lipids_nff_op={}
for l in ["lower","upper"]:
lipids_nff_op[l]={}
for s in lipids_handled[l]:
lipids_nff_op[l][s]={}
for r_index in range(0,lipids_nff_sele_nb[l][s]):
lipids_nff_op[l][s][r_index]=[]
#flipflopping lipids
#-------------------
#order parameter: full data
op_ff_tailA={}
op_ff_tailB={}
op_ff_both={}
for l in range(0,lipids_ff_nb):
op_ff_tailA[l]={}
op_ff_tailB[l]={}
op_ff_both[l]={}
#order paramater: sorted data
op_ff_tailA_sorted={}
op_ff_tailB_sorted={}
op_ff_both_sorted={}
for l in range(0,lipids_ff_nb):
op_ff_tailA_sorted[l]=[]
op_ff_tailB_sorted[l]=[]
op_ff_both_sorted[l]=[]
#order paramater: smoothed data
op_ff_tailA_smooth={}
op_ff_tailB_smooth={}
op_ff_both_smooth={}
#z coordinate: full data
z_lower={} #store z coord of lower leaflet for each frame
z_upper={} #store z coord of upper leaflet for each frame
z_ff={} #store z coord of the PO4 particle of each ff lipid
for l in range(0,lipids_ff_nb):
z_ff[l]={}
#z coordinate: sorted data
z_upper_sorted=[]
z_lower_sorted=[]
z_ff_sorted={}
for l in range(0,lipids_ff_nb):
z_ff_sorted[l]=[]
#z coordinate: smoothed data
z_lower_smooth=[]
z_upper_smooth=[]
z_ff_smooth={}
################################################################################################################################################
# FUNCTIONS: core
################################################################################################################################################
def get_z_coords(frame_nb):
z_middle_instant=lipids_nff_sele["lower"]["all"].selectAtoms("name PO4").centerOfGeometry()[2]+(lipids_nff_sele["upper"]["all"].selectAtoms("name PO4").centerOfGeometry()[2]-lipids_nff_sele["lower"]["all"].selectAtoms("name PO4").centerOfGeometry()[2])/float(2)
z_lower[frame_nb]=lipids_nff_sele["lower"]["all"].selectAtoms("name PO4").centerOfGeometry()[2]-z_middle_instant
z_upper[frame_nb]=lipids_nff_sele["upper"]["all"].selectAtoms("name PO4").centerOfGeometry()[2]-z_middle_instant
for l in range(0,lipids_ff_nb):
z_ff[l][frame_nb]=lipids_selection_ff[l].selectAtoms("name PO4").centerOfGeometry()[2]-z_middle_instant
return
def calculate_order_parameters(frame_nb):
#non flipflopping lipids
#=======================
for l in ["lower","upper"]:
for s in lipids_handled[l]:
#retrieve tail boundaries for current lipid type
tail_A_start=tail_boundaries[s][0]
tail_B_start=tail_boundaries[s][2]
tail_A_length=tail_boundaries[s][1]
tail_B_length=tail_boundaries[s][3]
#retrieve coordinates of lipids
tmp_coord=lipids_nff_sele[l][s].coordinates()
tmp_op=[]
#calculate 'order param' for each bond (1st bond to initialise array)
tmp_bond_array=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),1))
v=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),3))
v_norm2=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),1))
v[:,0]=lipids_nff_sele[l][s].selectAtoms("name " + str(bonds[s][tail_boundaries[s][0]][0])).coordinates()[:,0]-lipids_nff_sele[l][s].selectAtoms("name " + str(bonds[s][tail_boundaries[s][0]][1])).coordinates()[:,0]
v[:,1]=lipids_nff_sele[l][s].selectAtoms("name " + str(bonds[s][tail_boundaries[s][0]][0])).coordinates()[:,1]-lipids_nff_sele[l][s].selectAtoms("name " + str(bonds[s][tail_boundaries[s][0]][1])).coordinates()[:,1]
v[:,2]=lipids_nff_sele[l][s].selectAtoms("name " + str(bonds[s][tail_boundaries[s][0]][0])).coordinates()[:,2]-lipids_nff_sele[l][s].selectAtoms("name " + str(bonds[s][tail_boundaries[s][0]][1])).coordinates()[:,2]
v_norm2[:,0] = v[:,0]**2 + v[:,1]**2 + v[:,2]**2
tmp_bond_array[:,0]=0.5*(3*(v[:,2]**2)/v_norm2[:,0]-1)
for bond in bonds[s][tail_boundaries[s][0]+1:]:
v=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),3))
v_norm2=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),1))
tmp_op_bond=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),1))
v[:,0]=lipids_nff_sele[l][s].selectAtoms("name " + str(bond[0])).coordinates()[:,0]-lipids_nff_sele[l][s].selectAtoms("name " + str(bond[1])).coordinates()[:,0]
v[:,1]=lipids_nff_sele[l][s].selectAtoms("name " + str(bond[0])).coordinates()[:,1]-lipids_nff_sele[l][s].selectAtoms("name " + str(bond[1])).coordinates()[:,1]
v[:,2]=lipids_nff_sele[l][s].selectAtoms("name " + str(bond[0])).coordinates()[:,2]-lipids_nff_sele[l][s].selectAtoms("name " + str(bond[1])).coordinates()[:,2]
v_norm2[:,0] = v[:,0]**2 + v[:,1]**2 + v[:,2]**2
tmp_op_bond[:,0]=0.5*(3*(v[:,2]**2)/v_norm2[:,0]-1)
tmp_bond_array=numpy.concatenate((tmp_bond_array, tmp_op_bond), axis=1)
#calculate op (tail A, tail B, avg of both) for each lipid
tmp_op_tails_array=numpy.zeros((lipids_nff_sele[l][s].numberOfResidues(),2))
tmp_op_tails_array[:,0]=numpy.average(tmp_bond_array[:,0:tail_A_length],axis=1)
tmp_op_tails_array[:,1]=numpy.average(tmp_bond_array[:,tail_A_length:tail_A_length+tail_B_length],axis=1)
tmp_op_array=numpy.average(tmp_op_tails_array, axis=1)
#calculate averages for whole lipid specie
op_tailA_avg_frame[l][s][frame_nb]=numpy.average(tmp_op_tails_array[:,0])
op_tailB_avg_frame[l][s][frame_nb]=numpy.average(tmp_op_tails_array[:,1])
op_both_avg_frame[l][s][frame_nb]=numpy.average(tmp_op_array)
op_tailA_std_frame[l][s][frame_nb]=numpy.std(tmp_op_tails_array[:,0])
op_tailB_std_frame[l][s][frame_nb]=numpy.std(tmp_op_tails_array[:,1])
op_both_std_frame[l][s][frame_nb]=numpy.std(tmp_op_array)
#store order parameter for each residue
for r_index in range(0,lipids_nff_sele_nb[l][s]):
lipids_nff_op[l][s][r_index].append(tmp_op_array[r_index])
#flipflopping lipids
#===================
if args.selection_file_ff!="no":
for l in range(0, lipids_ff_nb):
tmp_bond_array=[]
tmp_specie=lipids_ff_info[l][0]
for bond in bonds[tmp_specie][tail_boundaries[tmp_specie][0]:]:
vx=lipids_selection_ff[l].selectAtoms("name " + str(bond[0])).coordinates()[0,0]-lipids_selection_ff[l].selectAtoms("name " + str(bond[1])).coordinates()[0,0]
vy=lipids_selection_ff[l].selectAtoms("name " + str(bond[0])).coordinates()[0,1]-lipids_selection_ff[l].selectAtoms("name " + str(bond[1])).coordinates()[0,1]
vz=lipids_selection_ff[l].selectAtoms("name " + str(bond[0])).coordinates()[0,2]-lipids_selection_ff[l].selectAtoms("name " + str(bond[1])).coordinates()[0,2]
v_norm2=vx**2 + vy**2 + vz**2
tmp_bond_array.append(0.5*(3*(vz**2)/float(v_norm2)-1))
op_ff_tailA[l][frame_nb]=numpy.average(tmp_bond_array[0:tail_A_length])
op_ff_tailB[l][frame_nb]=numpy.average(tmp_bond_array[tail_A_length:tail_A_length+tail_B_length])
op_ff_both[l][frame_nb]=numpy.average(op_ff_tailA[l][frame_nb], op_ff_tailB[l][frame_nb])
return
def rolling_avg(loc_list):
loc_arr=numpy.asarray(loc_list)
shape=(loc_arr.shape[-1]-args.nb_smoothing+1,args.nb_smoothing)
strides=(loc_arr.strides[-1],loc_arr.strides[-1])
return numpy.average(numpy.lib.stride_tricks.as_strided(loc_arr, shape=shape, strides=strides), -1)
def calculate_stats():
for l in ["lower","upper"]:
#define data structure
#---------------------
op_tailA_avg[l]["all"]=[]
op_tailA_std[l]["all"]=[]
op_tailB_avg[l]["all"]=[]
op_tailB_std[l]["all"]=[]
op_both_avg[l]["all"]=[]
op_both_std[l]["all"]=[]
#store specie average
#--------------------
#case: gro file
if args.xtcfilename=="no":
for s in lipids_handled[l]:
op_tailA_avg[l][s]=op_tailA_avg_frame[l][s][1]
op_tailA_avg[l]["all"].append(op_tailA_avg_frame[l][s][1]*lipids_nff_sele[l][s].numberOfResidues())
op_tailB_avg[l][s]=op_tailB_avg_frame[l][s][1]
op_tailB_avg[l]["all"].append(op_tailB_avg_frame[l][s][1]*lipids_nff_sele[l][s].numberOfResidues())
op_both_avg[l][s]=op_both_avg_frame[l][s][1]
op_both_avg[l]["all"].append(op_both_avg_frame[l][s][1]*lipids_nff_sele[l][s].numberOfResidues())
op_tailA_std[l][s]=op_tailA_std_frame[l][s][1]
op_tailA_std[l]["all"].append(op_tailA_std_frame[l][s][1]*lipids_nff_sele[l][s].numberOfResidues())
op_tailB_std[l][s]=op_tailB_std_frame[l][s][1]
op_tailB_std[l]["all"].append(op_tailB_std_frame[l][s][1]*lipids_nff_sele[l][s].numberOfResidues())
op_both_std[l][s]=op_both_std_frame[l][s][1]
op_both_std[l]["all"].append(op_both_std_frame[l][s][1]*lipids_nff_sele[l][s].numberOfResidues())
#case: xtc file
else:
for s in lipids_handled[l]:
op_tailA_avg[l][s]=numpy.average(op_tailA_avg_frame[l][s].values())
op_tailA_avg[l]["all"].append(numpy.average(op_tailA_avg_frame[l][s].values())*lipids_nff_sele[l][s].numberOfResidues())
op_tailB_avg[l][s]=numpy.average(op_tailB_avg_frame[l][s].values())
op_tailB_avg[l]["all"].append(numpy.average(op_tailB_avg_frame[l][s].values())*lipids_nff_sele[l][s].numberOfResidues())
op_both_avg[l][s]=numpy.average(op_both_avg_frame[l][s].values())
op_both_avg[l]["all"].append(numpy.average(op_both_avg_frame[l][s].values())*lipids_nff_sele[l][s].numberOfResidues())
op_tailA_std[l][s]=numpy.average(op_tailA_std_frame[l][s].values())
op_tailA_std[l]["all"].append(numpy.average(op_tailA_std_frame[l][s].values())*lipids_nff_sele[l][s].numberOfResidues())
op_tailB_std[l][s]=numpy.average(op_tailB_std_frame[l][s].values())
op_tailB_std[l]["all"].append(numpy.average(op_tailB_std_frame[l][s].values())*lipids_nff_sele[l][s].numberOfResidues())
op_both_std[l][s]=numpy.average(op_both_std_frame[l][s].values())
op_both_std[l]["all"].append(numpy.average(op_both_std_frame[l][s].values())*lipids_nff_sele[l][s].numberOfResidues())
#calculate leaflet average
#-------------------------
op_tailA_avg[l]["all"]=numpy.sum(op_tailA_avg[l]["all"])/float(lipids_nff_sele_nb["upper"]["all"])
op_tailB_avg[l]["all"]=numpy.sum(op_tailB_avg[l]["all"])/float(lipids_nff_sele_nb["upper"]["all"])
op_both_avg[l]["all"]=numpy.sum(op_both_avg[l]["all"])/float(lipids_nff_sele_nb["upper"]["all"])
op_tailA_std[l]["all"]=numpy.sum(op_tailA_std[l]["all"])/float(lipids_nff_sele_nb["upper"]["all"])
op_tailB_std[l]["all"]=numpy.sum(op_tailB_std[l]["all"])/float(lipids_nff_sele_nb["upper"]["all"])
op_both_std[l]["all"]=numpy.sum(op_both_std[l]["all"])/float(lipids_nff_sele_nb["upper"]["all"])
return
def smooth_data():
global time_smooth
global z_upper_smooth
global z_lower_smooth
#time and nff lipids
#===================
#sort data into ordered lists
for frame in sorted(time_stamp.keys()):
time_sorted.append(time_stamp[frame])
for l in ["lower","upper"]:
for s in lipids_handled[l]:
op_tailA_avg_frame_sorted[l][s].append(op_tailA_avg_frame[l][s][frame])
op_tailB_avg_frame_sorted[l][s].append(op_tailB_avg_frame[l][s][frame])
op_both_avg_frame_sorted[l][s].append(op_both_avg_frame[l][s][frame])
op_tailA_std_frame_sorted[l][s].append(op_tailA_std_frame[l][s][frame])
op_tailB_std_frame_sorted[l][s].append(op_tailB_std_frame[l][s][frame])
op_both_std_frame_sorted[l][s].append(op_both_std_frame[l][s][frame])
#calculate running average on sorted lists
time_smooth=rolling_avg(time_sorted)
for l in ["lower","upper"]:
for s in lipids_handled[l]:
op_tailA_avg_frame_smooth[l][s]=rolling_avg(op_tailA_avg_frame_sorted[l][s])
op_tailB_avg_frame_smooth[l][s]=rolling_avg(op_tailB_avg_frame_sorted[l][s])
op_both_avg_frame_smooth[l][s]=rolling_avg(op_both_avg_frame_sorted[l][s])
op_tailA_std_frame_smooth[l][s]=rolling_avg(op_tailA_std_frame_sorted[l][s])
op_tailB_std_frame_smooth[l][s]=rolling_avg(op_tailB_std_frame_sorted[l][s])
op_both_std_frame_smooth[l][s]=rolling_avg(op_both_std_frame_sorted[l][s])
#flipflopping lipids
#===================
if args.selection_file_ff!="no":
#sort data into ordered lists
for frame in sorted(time_stamp.keys()):
z_upper_sorted.append(z_upper[frame])
z_lower_sorted.append(z_lower[frame])
for l in range(0,lipids_ff_nb):
z_ff_sorted[l].append(z_ff[l][frame])
op_ff_tailA_sorted[l].append(op_ff_tailA[l][frame])
op_ff_tailB_sorted[l].append(op_ff_tailB[l][frame])
op_ff_both_sorted[l].append(op_ff_both[l][frame])
#calculate running average on sorted lists
z_upper_smooth=rolling_avg(z_upper_sorted)
z_lower_smooth=rolling_avg(z_lower_sorted)
for l in range(0,lipids_ff_nb):
z_ff_smooth[l]=rolling_avg(z_ff_sorted[l])
op_ff_tailA_smooth[l]=rolling_avg(op_ff_tailA_sorted[l])
op_ff_tailB_smooth[l]=rolling_avg(op_ff_tailB_sorted[l])
op_ff_both_smooth[l]=rolling_avg(op_ff_both_sorted[l])
return
################################################################################################################################################
# FUNCTIONS: outputs
################################################################################################################################################
#non flipflopping lipids
def write_op_nff_xvg():
#lipids in upper leaflet
#=======================
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/xvg/1_3_order_param_nff_upper.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/xvg/1_3_order_param_nff_upper.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 1_3_order_param_nff_upper.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of lipid tails order parameters in upper leaflet\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(2*len(lipids_handled["upper"])*3) + "\n")
for s_index in range(0,len(lipids_handled["upper"])):
output_xvg.write("@ s" + str(3*s_index) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail A (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+1) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail B (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+2) + " legend \"" + str(lipids_handled["upper"][s_index]) + " both (avg)\"\n")
output_txt.write("1_3_order_param_nff_upper.xvg," + str((3*s_index)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail A (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper.xvg," + str((3*s_index+1)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail B (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper.xvg," + str((3*s_index+2)+1) +"," + str(lipids_handled["upper"][s_index]) + " both (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
for s_index in range(0,len(lipids_handled["upper"])):
output_xvg.write("@ s" + str(3*len(lipids_handled["upper"])+3*s_index) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail A (std)\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["upper"])+3*s_index+1) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail (std)B\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["upper"])+3*s_index+2) + " legend \"" + str(lipids_handled["upper"][s_index]) + " both (std)\"\n")
output_txt.write("1_3_order_param_nff_upper.xvg," + str(3*len(lipids_handled["upper"])+(3*s_index)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail A (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper.xvg," + str(3*len(lipids_handled["upper"])+(3*s_index+1)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail B (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper.xvg," + str(3*len(lipids_handled["upper"])+(3*s_index+2)+1) +"," + str(lipids_handled["upper"][s_index]) + " both (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.close()
for frame in sorted(time_stamp.iterkeys()):
results=str(time_stamp[frame])
for s in lipids_handled["upper"]:
results+=" " + str(round(op_tailA_avg_frame["upper"][s][frame],2)) + " " + str(round(op_tailB_avg_frame["upper"][s][frame],2)) + " " + str(round(op_both_avg_frame["upper"][s][frame],2))
for s in lipids_handled["upper"]:
results+=" " + str(round(op_tailA_std_frame["upper"][s][frame],2)) + " " + str(round(op_tailB_std_frame["upper"][s][frame],2)) + " " + str(round(op_both_std_frame["upper"][s][frame],2))
output_xvg.write(results + "\n")
output_xvg.close()
#lipids in lower leaflet
#=======================
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/xvg/1_3_order_param_nff_lower.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/xvg/1_3_order_param_nff_lower.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 1_3_order_param_nff_lower.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of lipid tails order parameters in lower leaflet\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(2*len(lipids_handled["lower"])*3) + "\n")
for s_index in range(0,len(lipids_handled["lower"])):
output_xvg.write("@ s" + str(3*s_index) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail A (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+1) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail B (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+2) + " legend \"" + str(lipids_handled["lower"][s_index]) + " both (avg)\"\n")
output_txt.write("1_3_order_param_nff_lower.xvg," + str((3*s_index)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail A (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower.xvg," + str((3*s_index+1)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail B (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower.xvg," + str((3*s_index+2)+1) +"," + str(lipids_handled["lower"][s_index]) + " both (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
for s_index in range(0,len(lipids_handled["lower"])):
output_xvg.write("@ s" + str(3*len(lipids_handled["lower"])+3*s_index) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail A (std)\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["lower"])+3*s_index+1) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail (std)B\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["lower"])+3*s_index+2) + " legend \"" + str(lipids_handled["lower"][s_index]) + " both (std)\"\n")
output_txt.write("1_3_order_param_nff_lower.xvg," + str(3*len(lipids_handled["lower"])+(3*s_index)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail A (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower.xvg," + str(3*len(lipids_handled["lower"])+(3*s_index+1)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail B (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower.xvg," + str(3*len(lipids_handled["lower"])+(3*s_index+2)+1) +"," + str(lipids_handled["lower"][s_index]) + " both (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.close()
for frame in sorted(time_stamp.iterkeys()):
results=str(time_stamp[frame])
for s in lipids_handled["lower"]:
results+=" " + str(round(op_tailA_avg_frame["lower"][s][frame],2)) + " " + str(round(op_tailB_avg_frame["lower"][s][frame],2)) + " " + str(round(op_both_avg_frame["lower"][s][frame],2))
for s in lipids_handled["lower"]:
results+=" " + str(round(op_tailA_std_frame["lower"][s][frame],2)) + " " + str(round(op_tailB_std_frame["lower"][s][frame],2)) + " " + str(round(op_both_std_frame["lower"][s][frame],2))
output_xvg.write(results + "\n")
output_xvg.close()
return
def write_op_nff_xvg_smoothed():
#lipids in upper leaflet
#=======================
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/smoothed/xvg/1_5_order_param_nff_upper_smoothed.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/smoothed/xvg/1_5_order_param_nff_upper_smoothed.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 1_5_order_param_nff_upper_smoothed.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of lipid tails order parameters in upper leaflet\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(2*len(lipids_handled["upper"])*3) + "\n")
for s_index in range(0,len(lipids_handled["upper"])):
output_xvg.write("@ s" + str(3*s_index) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail A (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+1) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail B (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+2) + " legend \"" + str(lipids_handled["upper"][s_index]) + " both (avg)\"\n")
output_txt.write("1_3_order_param_nff_upper_smoothed.xvg," + str((3*s_index)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail A (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper_smoothed.xvg," + str((3*s_index+1)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail B (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper_smoothed.xvg," + str((3*s_index+2)+1) +"," + str(lipids_handled["upper"][s_index]) + " both (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
for s_index in range(0,len(lipids_handled["upper"])):
output_xvg.write("@ s" + str(3*len(lipids_handled["upper"])+3*s_index) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail A (std)\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["upper"])+3*s_index+1) + " legend \"" + str(lipids_handled["upper"][s_index]) + " tail (std)B\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["upper"])+3*s_index+2) + " legend \"" + str(lipids_handled["upper"][s_index]) + " both (std)\"\n")
output_txt.write("1_3_order_param_nff_upper_smoothed.xvg," + str(3*len(lipids_handled["upper"])+(3*s_index)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail A (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper_smoothed.xvg," + str(3*len(lipids_handled["upper"])+(3*s_index+1)+1) +"," + str(lipids_handled["upper"][s_index]) + " tail B (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_upper_smoothed.xvg," + str(3*len(lipids_handled["upper"])+(3*s_index+2)+1) +"," + str(lipids_handled["upper"][s_index]) + " both (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["upper"][s_index])]) + "\n")
output_txt.close()
for t_index in range(0, numpy.size(time_smooth)):
results=str(time_smooth[t_index])
for s in lipids_handled["upper"]:
results+=" " + str(round(op_tailA_avg_frame_smooth["upper"][s][t_index],2)) + " " + str(round(op_tailB_avg_frame_smooth["upper"][s][t_index],2)) + " " + str(round(op_both_avg_frame_smooth["upper"][s][t_index],2))
for s in lipids_handled["upper"]:
results+=" " + str(round(op_tailA_std_frame_smooth["upper"][s][t_index],2)) + " " + str(round(op_tailB_std_frame_smooth["upper"][s][t_index],2)) + " " + str(round(op_both_std_frame_smooth["upper"][s][t_index],2))
output_xvg.write(results)
output_xvg.close()
#lipids in lower leaflet
#=======================
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/smoothed/xvg/1_5_order_param_nff_lower_smoothed.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/smoothed/xvg/1_5_order_param_nff_lower_smoothed.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 1_5_order_param_nff_lower_smoothed.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of lipid tails order parameters in lower leaflet\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(2*len(lipids_handled["lower"])*3) + "\n")
for s_index in range(0,len(lipids_handled["lower"])):
output_xvg.write("@ s" + str(3*s_index) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail A (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+1) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail B (avg)\"\n")
output_xvg.write("@ s" + str(3*s_index+2) + " legend \"" + str(lipids_handled["lower"][s_index]) + " both (avg)\"\n")
output_txt.write("1_3_order_param_nff_lower_smoothed.xvg," + str((3*s_index)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail A (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower_smoothed.xvg," + str((3*s_index+1)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail B (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower_smoothed.xvg," + str((3*s_index+2)+1) +"," + str(lipids_handled["lower"][s_index]) + " both (avg)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
for s_index in range(0,len(lipids_handled["lower"])):
output_xvg.write("@ s" + str(3*len(lipids_handled["lower"])+3*s_index) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail A (std)\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["lower"])+3*s_index+1) + " legend \"" + str(lipids_handled["lower"][s_index]) + " tail (std)B\"\n")
output_xvg.write("@ s" + str(3*len(lipids_handled["lower"])+3*s_index+2) + " legend \"" + str(lipids_handled["lower"][s_index]) + " both (std)\"\n")
output_txt.write("1_3_order_param_nff_lower_smoothed.xvg," + str(3*len(lipids_handled["lower"])+(3*s_index)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail A (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower_smoothed.xvg," + str(3*len(lipids_handled["lower"])+(3*s_index+1)+1) +"," + str(lipids_handled["lower"][s_index]) + " tail B (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.write("1_3_order_param_nff_lower_smoothed.xvg," + str(3*len(lipids_handled["lower"])+(3*s_index+2)+1) +"," + str(lipids_handled["lower"][s_index]) + " both (std)," + mcolors.rgb2hex(colours_lipids[str(lipids_handled["lower"][s_index])]) + "\n")
output_txt.close()
for t_index in range(0, numpy.size(time_smooth)):
results=str(time_smooth[t_index])
for s in lipids_handled["lower"]:
results+=" " + str(round(op_tailA_avg_frame_smooth["lower"][s][t_index],2)) + " " + str(round(op_tailB_avg_frame_smooth["lower"][s][t_index],2)) + " " + str(round(op_both_avg_frame_smooth["lower"][s][t_index],2))
for s in lipids_handled["lower"]:
results+=" " + str(round(op_tailA_std_frame_smooth["lower"][s][t_index],2)) + " " + str(round(op_tailB_std_frame_smooth["lower"][s][t_index],2)) + " " + str(round(op_both_std_frame_smooth["lower"][s][t_index],2))
output_xvg.write(results + "\n")
output_xvg.close()
return
def graph_op_nff_xvg():
#create filenames
#----------------
filename_png=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/png/1_2_order_param_nff.png'
filename_svg=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/1_2_order_param_nff.svg'
#create figure
#-------------
fig=plt.figure(figsize=(8, 6.2))
fig.suptitle("Evolution of lipid tails order parameter")
#create data
#-----------
tmp_time=[]
tmp_op_both_avg_frame={}
tmp_op_both_std_frame={}
for l in ["lower","upper"]:
tmp_op_both_avg_frame[l]={}
tmp_op_both_std_frame[l]={}
for s in lipids_handled[l]:
tmp_op_both_avg_frame[l][s]=[]
tmp_op_both_std_frame[l][s]=[]
for frame in sorted(time_stamp.iterkeys()):
tmp_time.append(time_stamp[frame])
for l in ["lower","upper"]:
for s in lipids_handled[l]:
tmp_op_both_avg_frame[l][s].append(op_both_avg_frame[l][s][frame])
tmp_op_both_std_frame[l][s].append(op_both_std_frame[l][s][frame])
#plot data: upper leafet
#-----------------------
ax1 = fig.add_subplot(211)
p_upper={}
for s in lipids_handled["upper"]:
p_upper[s]=plt.plot(tmp_time, tmp_op_both_avg_frame["upper"][s], color=colours_lipids[s], linewidth=3.0, label=str(s))
p_upper[str(s + "_err")]=plt.fill_between(tmp_time, numpy.asarray(tmp_op_both_avg_frame["upper"][s])-numpy.asarray(tmp_op_both_std_frame["upper"][s]), numpy.asarray(tmp_op_both_avg_frame["upper"][s])+numpy.asarray(tmp_op_both_std_frame["upper"][s]), color=colours_lipids[s], alpha=0.2)
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.title("upper leaflet", fontsize="small")
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#plot data: lower leafet
#-----------------------
ax2 = fig.add_subplot(212)
p_lower={}
for s in lipids_handled["lower"]:
p_lower[s]=plt.plot(tmp_time, tmp_op_both_avg_frame["lower"][s], color=colours_lipids[s], linewidth=3.0, label=str(s))
p_lower[str(s + "_err")]=plt.fill_between(tmp_time, numpy.asarray(tmp_op_both_avg_frame["lower"][s])-numpy.asarray(tmp_op_both_std_frame["lower"][s]), numpy.asarray(tmp_op_both_avg_frame["lower"][s])+numpy.asarray(tmp_op_both_std_frame["lower"][s]), color=colours_lipids[s], alpha=0.2)
fontP.set_size("small")
ax2.legend(prop=fontP)
plt.title("lower leaflet", fontsize="small")
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#save figure
#-----------
ax1.set_ylim(-0.5, 1)
ax2.set_ylim(-0.5, 1)
ax1.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=7))
ax2.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=7))
plt.setp(ax1.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax1.yaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax2.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax2.yaxis.get_majorticklabels(), fontsize="small" )
plt.subplots_adjust(top=0.9, bottom=0.07, hspace=0.37, left=0.09, right=0.96)
fig.savefig(filename_png)
fig.savefig(filename_svg)
plt.close()
return
def graph_op_nff_xvg_smoothed():
#create filenames
#----------------
filename_png=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/smoothed/png/1_4_order_param_nff_smoothed.png'
filename_svg=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/smoothed/1_4_order_param_nff_smoothed.svg'
#create figure
#-------------
fig=plt.figure(figsize=(8, 6.2))
fig.suptitle("Evolution of lipid tails order parameter")
#plot data: upper leafet
#-----------------------
ax1 = fig.add_subplot(211)
p_upper={}
for s in lipids_handled["upper"]:
p_upper[s]=plt.plot(time_smooth, op_both_avg_frame_smooth["upper"][s], color=colours_lipids[s], linewidth=3.0, label=str(s))
p_upper[str(s + "_err")]=plt.fill_between(time_smooth, numpy.asarray(op_both_avg_frame_smooth["upper"][s])-numpy.asarray(op_both_std_frame_smooth["upper"][s]), numpy.asarray(op_both_avg_frame_smooth["upper"][s])+numpy.asarray(op_both_std_frame_smooth["upper"][s]), color=colours_lipids[s], alpha=0.2)
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.title("upper leaflet", fontsize="small")
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#plot data: lower leafet
#-----------------------
ax2 = fig.add_subplot(212)
p_lower={}
for s in lipids_handled["lower"]:
p_lower[s]=plt.plot(time_smooth, op_both_avg_frame_smooth["lower"][s], color=colours_lipids[s], linewidth=3.0, label=str(s))
p_lower[str(s + "_err")]=plt.fill_between(time_smooth, numpy.asarray(op_both_avg_frame_smooth["lower"][s])-numpy.asarray(op_both_std_frame_smooth["lower"][s]), numpy.asarray(op_both_avg_frame_smooth["lower"][s])+numpy.asarray(op_both_std_frame_smooth["lower"][s]), color=colours_lipids[s], alpha=0.2)
fontP.set_size("small")
ax2.legend(prop=fontP)
plt.title("lower leaflet", fontsize="small")
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#save figure
#-----------
ax1.set_ylim(-0.5, 1)
ax2.set_ylim(-0.5, 1)
ax1.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=7))
ax2.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=7))
plt.setp(ax1.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax1.yaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax2.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax2.yaxis.get_majorticklabels(), fontsize="small" )
plt.subplots_adjust(top=0.9, bottom=0.07, hspace=0.37, left=0.09, right=0.96)
fig.savefig(filename_png)
fig.savefig(filename_svg)
plt.close()
return
#flipflopping lipids
def write_op_ff_xvg():
#upper to lower flipflops
#========================
if numpy.size(lipids_ff_u2l_index)>0:
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/xvg/4_3_order_param_ff_u2l.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/xvg/4_3_order_param_ff_u2l.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 4_3_order_param_ff_u2l.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of the tail order parameters of flipflopping lipids\"\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(lipids_ff_nb*3) + "\n")
for l_index in range(0,len(lipids_ff_u2l_index)):
l=lipids_ff_u2l_index[l_index]
output_xvg.write("@ s" + str(3*l_index) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A\"\n")
output_xvg.write("@ s" + str(3*l_index+1) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B\"\n")
output_xvg.write("@ s" + str(3*l_index+2) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both\"\n")
output_txt.write("4_3_order_param_ff_u2l.xvg," + str((3*l_index)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A,auto\n")
output_txt.write("4_3_order_param_ff_u2l.xvg," + str((3*l_index+1)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B,auto\n")
output_txt.write("4_3_order_param_ff_u2l.xvg," + str((3*l_index+2)+1) +"," + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both,auto\n")
output_txt.close()
for frame in sorted(time_stamp.iterkeys()):
results=str(time_stamp[frame])
for l in lipids_ff_u2l_index:
results+=" " + str(round(op_ff_tailA[l][frame],2)) + " " + str(round(op_ff_tailB[l][frame],2)) + " " + str(round(op_ff_both[l][frame],2))
output_xvg.write(results + "\n")
output_xvg.close()
#lower to upper flipflops
#========================
if numpy.size(lipids_ff_l2u_index)>0:
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/xvg/4_3_order_param_ff_l2u.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/xvg/4_3_order_param_ff_l2u.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 4_3_order_param_ff_l2u.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of the tail order parameters of flipflopping lipids\"\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(lipids_ff_nb*3) + "\n")
for l_index in range(0,len(lipids_ff_l2u_index)):
l=lipids_ff_l2u_index[l_index]
output_xvg.write("@ s" + str(3*l_index) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A\"\n")
output_xvg.write("@ s" + str(3*l_index+1) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B\"\n")
output_xvg.write("@ s" + str(3*l_index+2) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both\"\n")
output_txt.write("4_3_order_param_ff_l2u.xvg," + str((3*l_index)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A,auto\n")
output_txt.write("4_3_order_param_ff_l2u.xvg," + str((3*l_index+1)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B,auto\n")
output_txt.write("4_3_order_param_ff_l2u.xvg," + str((3*l_index+2)+1) +"," + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both,auto\n")
output_txt.close()
for frame in sorted(time_stamp.iterkeys()):
results=str(time_stamp[frame])
for l in lipids_ff_l2u_index:
results+=" " + str(round(op_ff_tailA[l][frame],2)) + " " + str(round(op_ff_tailB[l][frame],2)) + " " + str(round(op_ff_both[l][frame],2))
output_xvg.write(results + "\n")
output_xvg.close()
return
def write_op_ff_xvg_smoothed():
#upper to lower flipflops
#========================
if numpy.size(lipids_ff_u2l_index)>0:
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/xvg/4_5_order_param_ff_u2l_smoothed.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/xvg/4_5_order_param_ff_u2l_smoothed.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 4_5_order_param_ff_u2l_smoothed.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of the tail order parameters of flipflopping lipids\"\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(lipids_ff_nb*3) + "\n")
for l_index in range(0,len(lipids_ff_u2l_index)):
l=lipids_ff_u2l_index[l_index]
output_xvg.write("@ s" + str(3*l_index) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A\"\n")
output_xvg.write("@ s" + str(3*l_index+1) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B\"\n")
output_xvg.write("@ s" + str(3*l_index+2) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both\"\n")
output_txt.write("4_3_order_param_ff_u2l_smoothed.xvg," + str((3*l_index)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A,auto\n")
output_txt.write("4_3_order_param_ff_u2l_smoothed.xvg," + str((3*l_index+1)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B,auto\n")
output_txt.write("4_3_order_param_ff_u2l_smoothed.xvg," + str((3*l_index+2)+1) +"," + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both,auto\n")
output_txt.close()
for t_index in range(0, numpy.size(time_smooth)):
results=str(time_smooth[t_index])
for l in lipids_ff_u2l_index:
results+=" " + str(round(op_ff_tailA_smooth[l][t_index],2)) + " " + str(round(op_ff_tailB_smooth[l][t_index],2)) + " " + str(round(op_ff_both_smooth[l][t_index],2))
output_xvg.write(results + "\n")
output_xvg.close()
#lower to upper flipflops
#========================
if numpy.size(lipids_ff_l2u_index)>0:
filename_txt=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/xvg/4_5_order_param_ff_l2u_smoothed.txt'
filename_xvg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/xvg/4_5_order_param_ff_l2u_smoothed.xvg'
output_txt = open(filename_txt, 'w')
output_txt.write("@[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_txt.write("@Use this file as the argument of the -c option of the script 'xvg_animate' in order to make a time lapse movie of the data in 4_5_order_param_ff_l2u_smoothed.xvg.\n")
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of the tail order parameters of flipflopping lipids\"\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"order parameter S2\"\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length " + str(lipids_ff_nb*3) + "\n")
for l_index in range(0,len(lipids_ff_l2u_index)):
l=lipids_ff_l2u_index[l_index]
output_xvg.write("@ s" + str(3*l_index) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A\"\n")
output_xvg.write("@ s" + str(3*l_index+1) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B\"\n")
output_xvg.write("@ s" + str(3*l_index+2) + " legend \"" + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both\"\n")
output_txt.write("4_3_order_param_ff_l2u_smoothed.xvg," + str((3*l_index)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail A,auto\n")
output_txt.write("4_3_order_param_ff_l2u_smoothed.xvg," + str((3*l_index+1)+1) + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " tail B,auto\n")
output_txt.write("4_3_order_param_ff_l2u_smoothed.xvg," + str((3*l_index+2)+1) +"," + "," + str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " both,auto\n")
output_txt.close()
for t_index in range(0, numpy.size(time_smooth)):
results=str(time_smooth[t_index])
for l in lipids_ff_l2u_index:
results+=" " + str(round(op_ff_tailA_smooth[l][t_index],2)) + " " + str(round(op_ff_tailB_smooth[l][t_index],2)) + " " + str(round(op_ff_both_smooth[l][t_index],2))
output_xvg.write(results + "\n")
output_xvg.close()
return
def graph_op_ff_xvg():
#upper to lower flipflops
#========================
if numpy.size(lipids_ff_u2l_index)>0:
#create filenames
#----------------
filename_png=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/png/4_2_order_param_ff_u2l.png'
filename_svg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/4_2_order_param_ff_u2l.svg'
#create figure
#-------------
fig=plt.figure(figsize=(8, 6.2))
fig.suptitle("Flipflopping lipids: upper to lower")
#create data
#-----------
tmp_time=[]
tmp_z_ff={}
tmp_z_upper=[]
tmp_z_lower=[]
tmp_op_ff_both={}
for l in lipids_ff_u2l_index:
tmp_z_ff[l]=[]
tmp_op_ff_both[l]=[]
for frame in sorted(time_stamp.iterkeys()):
tmp_time.append(time_stamp[frame])
tmp_z_upper.append(z_upper[frame])
tmp_z_lower.append(z_lower[frame])
for l in lipids_ff_u2l_index:
tmp_z_ff[l].append(z_ff[l][frame])
tmp_op_ff_both[l].append(op_ff_both[l][frame])
#plot data: order paramter
#-------------------------
ax1 = fig.add_subplot(211)
p_upper={}
for l in lipids_ff_u2l_index:
p_upper[l]=plt.plot(tmp_time, tmp_op_ff_both[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#plot data: z coordinate
#-----------------------
ax2 = fig.add_subplot(212)
p_lower={}
p_lower["upper"]=plt.plot(tmp_time, tmp_z_upper, linestyle='dashed', color='k')
p_lower["lower"]=plt.plot(tmp_time, tmp_z_lower, linestyle='dashed', color='k')
for l in lipids_ff_u2l_index:
p_lower[l]=plt.plot(tmp_time, tmp_z_ff[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax2.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('z coordinate', fontsize="small")
#save figure
#-----------
ax1.set_ylim(-0.5, 1)
ax1.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=7))
ax2.set_ylim(-40, 40)
ax2.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=3))
plt.setp(ax1.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax1.yaxis.get_majorticklabels(), fontsize="small" )
plt.subplots_adjust(top=0.9, bottom=0.07, hspace=0.37, left=0.09, right=0.96)
fig.savefig(filename_png)
fig.savefig(filename_svg)
plt.close()
#lower to upper flipflops
#========================
if numpy.size(lipids_ff_l2u_index)>0:
#create filenames
#----------------
filename_png=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/png/4_2_order_param_ff_l2u.png'
filename_svg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/4_2_order_param_ff_l2u.svg'
#create figure
#-------------
fig=plt.figure(figsize=(8, 6.2))
fig.suptitle("Flipflopping lipids: upper to lower")
#create data
#-----------
tmp_time=[]
tmp_z_ff={}
tmp_z_upper=[]
tmp_z_lower=[]
tmp_op_ff_both={}
for l in lipids_ff_l2u_index:
tmp_z_ff[l]=[]
tmp_op_ff_both[l]=[]
for frame in sorted(time_stamp.iterkeys()):
tmp_time.append(time_stamp[frame])
tmp_z_upper.append(z_upper[frame])
tmp_z_lower.append(z_lower[frame])
for l in lipids_ff_l2u_index:
tmp_z_ff[l].append(z_ff[l][frame])
tmp_op_ff_both[l].append(op_ff_both[l][frame])
#plot data: order paramter
#-------------------------
ax1 = fig.add_subplot(211)
p_upper={}
for l in lipids_ff_l2u_index:
p_upper[l]=plt.plot(tmp_time, tmp_op_ff_both[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#plot data: z coordinate
#-----------------------
ax2 = fig.add_subplot(212)
p_lower={}
p_lower["upper"]=plt.plot(tmp_time, tmp_z_upper, linestyle='dashed', color='k')
p_lower["lower"]=plt.plot(tmp_time, tmp_z_lower, linestyle='dashed', color='k')
for l in lipids_ff_l2u_index:
p_lower[l]=plt.plot(tmp_time, tmp_z_ff[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('z coordinate', fontsize="small")
#save figure
#-----------
ax1.set_ylim(-0.5, 1)
ax1.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=7))
ax2.set_ylim(-40, 40)
ax2.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=3))
plt.setp(ax1.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax1.yaxis.get_majorticklabels(), fontsize="small" )
plt.subplots_adjust(top=0.9, bottom=0.07, hspace=0.37, left=0.09, right=0.96)
fig.savefig(filename_png)
fig.savefig(filename_svg)
plt.close()
return
def graph_op_ff_xvg_smoothed():
#upper to lower flipflops
#========================
if numpy.size(lipids_ff_u2l_index)>0:
#create filenames
#----------------
filename_png=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/png/4_4_order_param_ff_u2l_smoothed.png'
filename_svg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/4_4_order_param_ff_u2l_smoothed.svg'
#create figure
#-------------
fig=plt.figure(figsize=(8, 6.2))
fig.suptitle("Flipflopping lipids: upper to lower")
#create data
#-----------
tmp_time=[]
tmp_z_ff={}
tmp_z_upper=[]
tmp_z_lower=[]
tmp_op_ff_both={}
for l in lipids_ff_u2l_index:
tmp_z_ff[l]=[]
tmp_op_ff_both[l]=[]
for t_index in range(0, numpy.size(time_smooth)):
tmp_time.append(time_smooth[t_index])
tmp_z_upper.append(z_upper_smooth[t_index])
tmp_z_lower.append(z_lower_smooth[t_index])
for l in lipids_ff_u2l_index:
tmp_z_ff[l].append(z_ff_smooth[l][t_index])
tmp_op_ff_both[l].append(op_ff_both_smooth[l][t_index])
#plot data: order paramter
#-------------------------
ax1 = fig.add_subplot(211)
p_upper={}
for l in lipids_ff_u2l_index:
p_upper[l]=plt.plot(tmp_time, tmp_op_ff_both[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#plot data: z coordinate
#-----------------------
ax2 = fig.add_subplot(212)
p_lower={}
p_lower["upper"]=plt.plot(tmp_time, tmp_z_upper, linestyle='dashed', color='k')
p_lower["lower"]=plt.plot(tmp_time, tmp_z_lower, linestyle='dashed', color='k')
for l in lipids_ff_u2l_index:
p_lower[l]=plt.plot(tmp_time, tmp_z_ff[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax2.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('z coordinate', fontsize="small")
#save figure
#-----------
ax1.set_ylim(-0.5, 1)
ax1.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=7))
ax2.set_ylim(-40, 40)
ax2.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=3))
plt.setp(ax1.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax1.yaxis.get_majorticklabels(), fontsize="small" )
plt.subplots_adjust(top=0.9, bottom=0.07, hspace=0.37, left=0.09, right=0.96)
fig.savefig(filename_png)
fig.savefig(filename_svg)
plt.close()
#lower to upper flipflops
#========================
if numpy.size(lipids_ff_l2u_index)>0:
#create filenames
#----------------
filename_png=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/png/4_4_order_param_ff_l2u_smoothed.png'
filename_svg=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/smoothed/4_4_order_param_ff_l2u_smoothed.svg'
#create figure
#-------------
fig=plt.figure(figsize=(8, 6.2))
fig.suptitle("Flipflopping lipids: lower to lower")
#create data
#-----------
tmp_time=[]
tmp_z_ff={}
tmp_z_upper=[]
tmp_z_lower=[]
tmp_op_ff_both={}
for l in lipids_ff_l2u_index:
tmp_z_ff[l]=[]
tmp_op_ff_both[l]=[]
for t_index in range(0, numpy.size(time_smooth)):
tmp_time.append(time_smooth[t_index])
tmp_z_upper.append(z_upper_smooth[t_index])
tmp_z_lower.append(z_lower_smooth[t_index])
for l in lipids_ff_l2u_index:
tmp_z_ff[l].append(z_ff_smooth[l][t_index])
tmp_op_ff_both[l].append(op_ff_both_smooth[l][t_index])
#plot data: order paramter
#-------------------------
ax1 = fig.add_subplot(211)
p_upper={}
for l in lipids_ff_l2u_index:
p_upper[l]=plt.plot(tmp_time, tmp_op_ff_both[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('order parameter', fontsize="small")
#plot data: z coordinate
#-----------------------
ax2 = fig.add_subplot(212)
p_lower={}
p_lower["upper"]=plt.plot(tmp_time, tmp_z_upper, linestyle='dashed', color='k')
p_lower["lower"]=plt.plot(tmp_time, tmp_z_lower, linestyle='dashed', color='k')
for l in lipids_ff_l2u_index:
p_lower[l]=plt.plot(tmp_time, tmp_z_ff[l], label=str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]))
fontP.set_size("small")
ax1.legend(prop=fontP)
plt.xlabel('time (ns)', fontsize="small")
plt.ylabel('z coordinate', fontsize="small")
#save figure
#-----------
ax1.set_ylim(-0.5, 1)
ax1.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=7))
ax2.set_ylim(-40, 40)
ax2.xaxis.set_major_locator(MaxNLocator(nbins=5))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=3))
plt.setp(ax1.xaxis.get_majorticklabels(), fontsize="small" )
plt.setp(ax1.yaxis.get_majorticklabels(), fontsize="small" )
plt.subplots_adjust(top=0.9, bottom=0.07, hspace=0.37, left=0.09, right=0.96)
fig.savefig(filename_png)
fig.savefig(filename_svg)
plt.close()
return
#annotations
#===========
def write_frame_stat(f_nb, f_index, t):
#case: gro file or xtc summary
#=============================
if f_index=="all" and t=="all":
#nff lipids
#----------
#create file
filename_details=os.getcwd() + '/' + str(args.output_folder) + '/1_nff/1_1_order_param_nff.stat'
output_stat = open(filename_details, 'w')
output_stat.write("[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_stat.write("\n")
#general info
output_stat.write("1. membrane composition\n")
output_stat.write(membrane_comp["upper"] + "\n")
output_stat.write(membrane_comp["lower"] + "\n")
tmp_string=str(lipids_handled["both"][0])
for s in lipids_handled["both"][1:]:
tmp_string+=", " + str(s)
output_stat.write("2. lipid species processed: " + str(tmp_string) + "\n")
if args.xtcfilename!="no":
output_stat.write("3. nb frames processed: " + str(nb_frames_processed) + " (" + str(nb_frames_xtc) + " frames in xtc, step=" + str(args.frames_dt) + ")\n")
output_stat.write("\n")
output_stat.write("lipid orientation with bilayer normal\n")
output_stat.write(" P2=1 : parallel\n")
output_stat.write(" P2=0 : random\n")
output_stat.write(" P2=-0.5 : orthogonal\n")
#average
output_stat.write("\n")
output_stat.write("average order parameter: " + str(round((op_both_avg["upper"]["all"]*lipids_nff_sele_nb["upper"]["all"]+op_both_avg["lower"]["all"]*lipids_nff_sele_nb["lower"]["all"])/float(lipids_nff_sele_nb["upper"]["all"]+lipids_nff_sele_nb["lower"]["all"]),2)) + "\n")
#lipids in upper leaflet
output_stat.write("\n")
output_stat.write("upper leaflet\n")
output_stat.write("=============\n")
output_stat.write("avg nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["upper"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["upper"][s].numberOfResidues()) + " " + str(round(op_tailA_avg["upper"][s],2)) + " " + str(round(op_tailB_avg["upper"][s],2)) + " " + str(round(op_both_avg["upper"][s],2)) + "\n")
output_stat.write("-------------------------------------\n")
output_stat.write("all " + str(lipids_nff_sele_nb["upper"]["all"]) + " " + str(round(op_tailA_avg["upper"]["all"],2)) + " " + str(round(op_tailB_avg["upper"]["all"],2)) + " " + str(round(op_both_avg["upper"]["all"],2)) + "\n")
output_stat.write("\n")
output_stat.write("std nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["upper"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["upper"][s].numberOfResidues()) + " " + str(round(op_tailA_std["upper"][s],2)) + " " + str(round(op_tailB_std["upper"][s],2)) + " " + str(round(op_both_std["upper"][s],2)) + "\n")
output_stat.write("-------------------------------------\n")
output_stat.write("all " + str(lipids_nff_sele_nb["upper"]["all"]) + " " + str(round(op_tailA_std["upper"]["all"],2)) + " " + str(round(op_tailB_std["upper"]["all"],2)) + " " + str(round(op_both_std["upper"]["all"],2)) + "\n")
#lipids in lower leaflet
output_stat.write("\n")
output_stat.write("lower leaflet\n")
output_stat.write("=============\n")
output_stat.write("avg nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["lower"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["lower"][s].numberOfResidues()) + " " + str(round(op_tailA_avg["lower"][s],2)) + " " + str(round(op_tailB_avg["lower"][s],2)) + " " + str(round(op_both_avg["lower"][s],2)) + "\n")
output_stat.write("-------------------------------------\n")
output_stat.write("all " + str(lipids_nff_sele_nb["lower"]["all"]) + " " + str(round(op_tailA_avg["lower"]["all"],2)) + " " + str(round(op_tailB_avg["lower"]["all"],2)) + " " + str(round(op_both_avg["lower"]["all"],2)) + "\n")
output_stat.write("\n")
output_stat.write("std nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["lower"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["lower"][s].numberOfResidues()) + " " + str(round(op_tailA_std["lower"][s],2)) + " " + str(round(op_tailB_std["lower"][s],2)) + " " + str(round(op_both_std["lower"][s],2)) + "\n")
output_stat.write("-------------------------------------\n")
output_stat.write("all " + str(lipids_nff_sele_nb["lower"]["all"]) + " " + str(round(op_tailA_std["lower"]["all"],2)) + " " + str(round(op_tailB_std["lower"]["all"],2)) + " " + str(round(op_both_std["lower"]["all"],2)) + "\n")
output_stat.close()
#ff lipids
#---------
if args.selection_file_ff!="no":
filename_details=os.getcwd() + '/' + str(args.output_folder) + '/4_ff/4_1_order_param_ff.stat'
output_stat = open(filename_details, 'w')
output_stat.write("[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_stat.write("\n")
#general info
output_stat.write("1. membrane composition\n")
output_stat.write(membrane_comp["upper"] + "\n")
output_stat.write(membrane_comp["lower"] + "\n")
tmp_string=str(lipids_handled["both"][0])
for s in lipids_handled["both"][1:]:
tmp_string+=", " + str(s)
output_stat.write("2. lipid species processed: " + str(tmp_string) + "\n")
if args.xtcfilename!="no":
output_stat.write("3. nb frames processed: " + str(nb_frames_processed) + " (" + str(nb_frames_xtc) + " frames in xtc, step=" + str(args.frames_dt) + ")\n")
output_stat.write("\n")
output_stat.write("lipid orientation with bilayer normal\n")
output_stat.write(" P2=1 : parallel\n")
output_stat.write(" P2=0 : random\n")
output_stat.write(" P2=-0.5 : orthogonal\n")
#upper to lower
if numpy.size(lipids_ff_u2l_index)>0:
output_stat.write("\n")
output_stat.write("upper to lower\n")
output_stat.write("==============\n")
output_stat.write("specie resid tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for l in lipids_ff_u2l_index:
output_stat.write(str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " " + str(round(op_ff_tailA[l][1],2)) + " " + str(round(op_ff_tailB[l][1],2)) + " " + str(round(op_ff_both[l][1],2)) + "\n")
#lower to upper
if numpy.size(lipids_ff_l2u_index)>0:
output_stat.write("\n")
output_stat.write("lower to upper\n")
output_stat.write("==============\n")
output_stat.write("specie resid tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for l in lipids_ff_l2u_index:
output_stat.write(str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " " + str(round(op_ff_tailA[l][1],2)) + " " + str(round(op_ff_tailB[l][1],2)) + " " + str(round(op_ff_both[l][1],2)) + "\n")
output_stat.close()
#case: xtc snapshot
#==================
else:
#nff lipids
#----------
#create file
filename_details=os.getcwd() + '/' + str(args.output_folder) + '/2_snapshots/' + args.xtcfilename[:-4] + '_annotated_orderparam_' + str(int(t)).zfill(5) + 'ns_nff.stat'
output_stat = open(filename_details, 'w')
output_stat.write("[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_stat.write("\n")
#general info
output_stat.write("1. membrane composition\n")
output_stat.write(membrane_comp["upper"] + "\n")
output_stat.write(membrane_comp["lower"] + "\n")
tmp_string=str(lipids_handled["both"][0])
for s in lipids_handled["both"][1:]:
tmp_string+=", " + str(s)
output_stat.write("\n")
output_stat.write("2. lipid species processed: " + str(tmp_string) + "\n")
output_stat.write("\n")
output_stat.write("3. nb frames processed: " + str(nb_frames_processed) + " (" + str(nb_frames_xtc) + " frames in xtc, step=" + str(args.frames_dt) + ")\n")
output_stat.write("\n")
output_stat.write("4. time: " + str(t) + "ns (frame " + str(f_nb) + "/" + str(nb_frames_xtc) + ")\n")
output_stat.write("\n")
output_stat.write("lipid orientation with bilayer normal\n")
output_stat.write(" P2=1 : parallel\n")
output_stat.write(" P2=0 : random\n")
output_stat.write(" P2=-0.5 : orthogonal\n")
#lipids in upper leaflet
output_stat.write("\n")
output_stat.write("upper leaflet\n")
output_stat.write("=============\n")
output_stat.write("avg nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["upper"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["upper"][s].numberOfResidues()) + " " + str(round(op_tailA_avg_frame["upper"][s][f_nb],2)) + " " + str(round(op_tailA_avg_frame["upper"][s][f_nb],2)) + " " + str(round(op_both_avg_frame["upper"][s][f_nb],2)) + "\n")
output_stat.write("\n")
output_stat.write("std nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["upper"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["upper"][s].numberOfResidues()) + " " + str(round(op_tailA_std_frame["upper"][s][f_nb],2)) + " " + str(round(op_tailA_std_frame["upper"][s][f_nb],2)) + " " + str(round(op_both_std_frame["upper"][s][f_nb],2)) + "\n")
#lipids in lower leaflet
output_stat.write("\n")
output_stat.write("lower leaflet\n")
output_stat.write("=============\n")
output_stat.write("avg nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["lower"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["lower"][s].numberOfResidues()) + " " + str(round(op_tailA_avg_frame["lower"][s][f_nb],2)) + " " + str(round(op_tailA_avg_frame["lower"][s][f_nb],2)) + " " + str(round(op_both_avg_frame["lower"][s][f_nb],2)) + "\n")
output_stat.write("\n")
output_stat.write("std nb tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for s in lipids_handled["lower"]:
output_stat.write(str(s) + " " + str(lipids_nff_sele["lower"][s].numberOfResidues()) + " " + str(round(op_tailA_std_frame["lower"][s][f_nb],2)) + " " + str(round(op_tailA_std_frame["lower"][s][f_nb],2)) + " " + str(round(op_both_std_frame["lower"][s][f_nb],2)) + "\n")
output_stat.close()
#ff lipids
#----------
if args.selection_file_ff!="no":
filename_details=os.getcwd() + '/' + str(args.output_folder) + '/2_snapshots/' + args.xtcfilename[:-4] + '_annotated_orderparam_' + str(int(t)).zfill(5) + 'ns_ff.stat'
output_stat = open(filename_details, 'w')
output_stat.write("[lipid tail order parameters statistics - written by order_param v" + str(version_nb) + "]\n")
output_stat.write("\n")
#general info
output_stat.write("1. membrane composition\n")
output_stat.write(membrane_comp["upper"] + "\n")
output_stat.write(membrane_comp["lower"] + "\n")
tmp_string=str(lipids_handled["both"][0])
for s in lipids_handled["both"][1:]:
tmp_string+=", " + str(s)
output_stat.write("\n")
output_stat.write("2. lipid species processed: " + str(tmp_string) + "\n")
output_stat.write("\n")
output_stat.write("3. nb frames processed: " + str(nb_frames_processed) + " (" + str(nb_frames_xtc) + " frames in xtc, step=" + str(args.frames_dt) + ")\n")
output_stat.write("\n")
output_stat.write("4. time: " + str(t) + "ns (frame " + str(f_nb) + "/" + str(nb_frames_xtc) + ")\n")
output_stat.write("\n")
output_stat.write("lipid orientation with bilayer normal\n")
output_stat.write(" P2=1 : parallel\n")
output_stat.write(" P2=0 : random\n")
output_stat.write(" P2=-0.5 : orthogonal\n")
#upper to lower
if numpy.size(lipids_ff_u2l_index)>0:
output_stat.write("\n")
output_stat.write("upper to lower\n")
output_stat.write("==============\n")
output_stat.write("specie resid tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for l in lipids_ff_u2l_index:
output_stat.write(str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " " + str(round(op_ff_tailA[l][f_nb],2)) + " " + str(round(op_ff_tailB[l][f_nb],2)) + " " + str(round(op_ff_both[l][f_nb],2)) + "\n")
#lower to upper
if numpy.size(lipids_ff_l2u_index)>0:
output_stat.write("\n")
output_stat.write("lower to upper\n")
output_stat.write("==============\n")
output_stat.write("specie resid tail A tail B both\n")
output_stat.write("-------------------------------------\n")
for l in lipids_ff_l2u_index:
output_stat.write(str(lipids_ff_info[l][0]) + " " + str(lipids_ff_info[l][1]) + " " + str(round(op_ff_tailA[l][f_nb],2)) + " " + str(round(op_ff_tailB[l][f_nb],2)) + " " + str(round(op_ff_both[l][f_nb],2)) + "\n")
output_stat.close()
return
def write_frame_snapshot(f_nb, f_index,t):
#store order parameter info in beta factor field: nff lipids
for l in ["lower","upper"]:
for s in lipids_handled[l]:
for r_index in range(0,lipids_nff_sele_nb[l][s]):
lipids_selection_nff[l][s][r_index].set_bfactor(lipids_nff_op[l][s][r_index][f_index])
#store order parameter info in beta factor field: ff lipids
if args.selection_file_ff!="no":
for l in range(0,lipids_ff_nb):
lipids_selection_ff[l].set_bfactor(op_ff_both[l][f_nb])
#case: gro file
if args.xtcfilename=="no":
all_atoms.write(os.getcwd() + '/' + str(args.output_folder) + '/2_snapshots/' + args.grofilename[:-4] + '_annotated_orderparam', format="PDB")
#case: xtc file
else:
tmp_name=os.getcwd() + "/" + str(args.output_folder) + '/2_snapshots/' + args.xtcfilename[:-4] + '_annotated_orderparam_' + str(int(t)).zfill(5) + 'ns.pdb'
W=Writer(tmp_name, nb_atoms)
W.write(all_atoms)
return
def write_frame_annotation(f_nb, f_index,t):
#create file
if args.xtcfilename=="no":
filename_details=os.getcwd() + "/" + str(args.output_folder) + '/2_snapshots/' + args.grofilename[:-4] + '_annotated_orderparam.txt'
else:
filename_details=os.getcwd() + "/" + str(args.output_folder) + '/2_snapshots/' + args.xtcfilename[:-4] + '_annotated_orderparam_' + str(int(t)).zfill(5) + 'ns.txt'
output_stat = open(filename_details, 'w')
#output selection strings: nff lipids
tmp_sele_string=""
for l in ["lower","upper"]:
for s in lipids_handled[l]:
for r_index in range(0,lipids_nff_sele_nb[l][s]):
tmp_sele_string+="." + lipids_selection_nff_VMD_string[l][s][r_index]
#output selection strings: ff lipids
if args.selection_file_ff!="no":
for l in range(0,lipids_ff_nb):
tmp_sele_string+="." + lipids_selection_ff_VMD_string[l]
output_stat.write(tmp_sele_string[1:] + "\n")
#ouptut order param for each lipid for current frame: nff lipids
tmp_ops="1"
for l in ["lower","upper"]:
for s in lipids_handled[l]:
for r_index in lipids_nff_op[l][s]:
tmp_ops+=";" + str(round(lipids_nff_op[l][s][r_index][f_index],2))
#ouptut order param for each lipid for current frame: ff lipids
if args.selection_file_ff!="no":
for l in range(0,lipids_ff_nb):
tmp_ops+=";" + str(round(op_ff_both[l][f_nb],2))
output_stat.write(tmp_ops + "\n")
output_stat.close()
return
def write_xtc_snapshots():
#NB: - this will always output the first and final frame snapshots
# - it will also intermediate frames according to the -w option
loc_nb_frames_processed=0
for ts in U.trajectory:
#case: frames before specified time boundaries
#---------------------------------------------
if ts.time/float(1000)<args.t_start:
progress='\r -skipping frame ' + str(ts.frame) + '/' + str(nb_frames_xtc) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
#case: frames within specified time boundaries
#---------------------------------------------
elif ts.time/float(1000)>args.t_start and ts.time/float(1000)<args.t_end:
progress='\r -writing snapshots... frame ' + str(ts.frame) + '/' + str(nb_frames_xtc) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
if ((ts.frame-1) % args.frames_dt)==0:
if ((loc_nb_frames_processed) % args.frames_write_dt)==0 or loc_nb_frames_processed==nb_frames_processed-1:
write_frame_stat(ts.frame, loc_nb_frames_processed, ts.time/float(1000))
write_frame_snapshot(ts.frame, loc_nb_frames_processed, ts.time/float(1000))
write_frame_annotation(ts.frame, loc_nb_frames_processed, ts.time/float(1000))
loc_nb_frames_processed+=1
#case: frames after specified time boundaries
#--------------------------------------------
elif ts.time/float(1000)>args.t_end:
break
print ''
return
def write_xtc_annotation():
#create file
filename_details=os.getcwd() + '/' + str(args.output_folder) + '/3_VMD/' + args.xtcfilename[:-4] + '_annotated_orderparam_dt' + str(args.frames_dt) + '.txt'
output_stat = open(filename_details, 'w')
#output selection strings
#------------------------
#nff lipids
tmp_sele_string=""
for l in ["lower","upper"]:
for s in lipids_handled[l]:
for r_index in range(0,lipids_nff_sele_nb[l][s]):
tmp_sele_string+="." + lipids_selection_nff_VMD_string[l][s][r_index]
#ff lipids
if args.selection_file_ff!="no":
for l in range(0,lipids_ff_nb):
tmp_sele_string+="." + lipids_selection_ff_VMD_string[l]
output_stat.write(tmp_sele_string[1:] + "\n")
#ouptut order param for each lipid
#---------------------------------
for frame in sorted(time_stamp.iterkeys()):
tmp_ops=str(frame)
frame_index=sorted(time_stamp.keys()).index(frame)
#nff lipids
for l in ["lower","upper"]:
for s in lipids_handled[l]:
for r_index in lipids_nff_op[l][s]:
tmp_ops+=";" + str(round(lipids_nff_op[l][s][r_index][frame_index],2))
#ff lipids
if args.selection_file_ff!="no":
for l in range(0,lipids_ff_nb):
tmp_ops+=";" + str(round(op_ff_both[l][frame],2))
output_stat.write(tmp_ops + "\n")
output_stat.close()
return
################################################################################################################################################
# ALGORITHM : Browse trajectory and process relevant frames
################################################################################################################################################
print "\nCalculating order parameters..."
#case: structure only
#====================
if args.xtcfilename=="no":
time_stamp[1]=0
calculate_order_parameters(1)
#case: browse xtc frames
#=======================
else:
for ts in U.trajectory:
#case: frames before specified time boundaries
#---------------------------------------------
if ts.time/float(1000)<args.t_start:
progress='\r -skipping frame ' + str(ts.frame) + '/' + str(nb_frames_xtc) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
#case: frames within specified time boundaries
#---------------------------------------------
elif ts.time/float(1000)>args.t_start and ts.time/float(1000)<args.t_end:
progress='\r -processing frame ' + str(ts.frame) + '/' + str(nb_frames_xtc) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
if ((ts.frame-1) % args.frames_dt)==0:
nb_frames_processed+=1
time_stamp[ts.frame]=ts.time/float(1000)
get_z_coords(ts.frame)
calculate_order_parameters(ts.frame)
#case: frames after specified time boundaries
#---------------------------------------------
elif ts.time/float(1000)>args.t_end:
break
print ''
################################################################################################################################################
# CALCULATE STATISTICS
################################################################################################################################################
print "\nCalculating statistics..."
calculate_stats()
if args.nb_smoothing>1:
smooth_data()
################################################################################################################################################
# PRODUCE OUTPUTS
################################################################################################################################################
print "\nWriting outputs..."
#case: gro file
#==============
if args.xtcfilename=="no":
print " -writing statistics..."
write_frame_stat(1,"all","all")
print " -writing annotated pdb..."
write_frame_snapshot(1,0,0)
write_frame_annotation(1,0,0)
#case: xtc file
#==============
else:
#writing statistics
print " -writing statistics..."
write_frame_stat(1,"all","all")
#output cluster snapshots
write_xtc_snapshots()
#write annotation files for VMD
print " -writing VMD annotation files..."
write_xtc_annotation()
#write xvg and graphs
print " -writing xvg and graphs..."
write_op_nff_xvg()
graph_op_nff_xvg()
if args.nb_smoothing>1:
write_op_nff_xvg_smoothed()
graph_op_nff_xvg_smoothed()
if args.selection_file_ff!="no":
write_op_ff_xvg()
graph_op_ff_xvg()
if args.nb_smoothing>1:
write_op_ff_xvg_smoothed()
graph_op_ff_xvg_smoothed()
#exit
#====
print "\nFinished successfully! Check output in ./" + args.output_folder + "/"
print ""
sys.exit(0)
| gpl-2.0 |
juanlao7/CIFAR100-CNN | weight2.py | 1 | 4833 | from keras.datasets import cifar100
from keras.utils import np_utils
from keras import backend
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.utils import plot_model
from keras.callbacks import EarlyStopping
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam
from keras import regularizers
import matplotlib.pyplot as plt
import os.path
import pickle
N_CLASSES = 100
SAMPLE_WIDTH = 32
SAMPLE_HEIGHT = 32
# Parameters
BATCH_SIZE = 100
N_EPOCHS = 10000 # We stop training when the validation loss converges; the training can take all the epochs it needs
VALIDATION_SPLIT = 0.2
VALIDATION_PATIENCE = 15
ACTIVATION = 'elu'
DROPOUT = 0.25
def plotOptions(results, title, ylabel, keys):
plt.gca().set_color_cycle(None)
plt.plot([], '--', color='black')
plt.plot([], color='black')
for i in results:
plt.plot(results[i]['h'][keys[0]])
plt.legend(['Training', 'Validation'] + results.keys(), loc='upper right')
plt.gca().set_color_cycle(None)
for i in results:
plt.plot(results[i]['h'][keys[1]], '--')
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel('Epoch')
plt.show()
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
# Normalizing the input.
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255
x_test = x_test / 255
# One-hot encoding the labels.
y_train = np_utils.to_categorical(y_train, N_CLASSES)
y_test = np_utils.to_categorical(y_test, N_CLASSES)
# Reshaping the samples depending on which format the backend uses.
if backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, SAMPLE_WIDTH, SAMPLE_HEIGHT)
x_test = x_test.reshape(x_test.shape[0], 3, SAMPLE_WIDTH, SAMPLE_HEIGHT)
input_shape = (3, SAMPLE_WIDTH, SAMPLE_HEIGHT)
else:
x_train = x_train.reshape(x_train.shape[0], SAMPLE_WIDTH, SAMPLE_HEIGHT, 3)
x_test = x_test.reshape(x_test.shape[0], SAMPLE_WIDTH, SAMPLE_HEIGHT, 3)
input_shape = (SAMPLE_WIDTH, SAMPLE_HEIGHT, 3)
#optimizer = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
optimizer = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
weightDecayFunctions = {
'0': None,
'0.01': regularizers.l2(0.01),
'0.001': regularizers.l2(0.001),
'0.0001': regularizers.l2(0.0001),
'0.00001': regularizers.l2(0.00001)
}
results = {}
for i in weightDecayFunctions:
print '### Weight decay factor ' + i + ' ###'
resultFileName = 'model_' + i + '.result'
if os.path.isfile(resultFileName):
handler = open(resultFileName, 'rb')
results[i] = pickle.load(handler)
handler.close()
continue
# Defining the model.
model = Sequential()
model.add(Conv2D(27, (3, 3), activation=ACTIVATION, input_shape=input_shape, kernel_regularizer=weightDecayFunctions[i]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Conv2D(81, (3, 3), activation=ACTIVATION, kernel_regularizer=weightDecayFunctions[i]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Conv2D(135, (3, 3), activation=ACTIVATION, kernel_regularizer=weightDecayFunctions[i]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
model.add(Dense(128, activation=ACTIVATION, kernel_regularizer=weightDecayFunctions[i]))
model.add(Dropout(DROPOUT))
model.add(Dense(128, activation=ACTIVATION, kernel_regularizer=weightDecayFunctions[i]))
model.add(Dense(N_CLASSES, activation='softmax'))
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Training the model.
stopper = EarlyStopping(monitor='val_loss', patience=VALIDATION_PATIENCE)
h = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=N_EPOCHS, callbacks=[stopper], validation_split=VALIDATION_SPLIT)
# Evaluating the model.
score = model.evaluate(x_test, y_test, verbose=0)
results[i] = {
'h': h.history,
'test_loss': score[0],
'test_acc': score[1]
}
handler = open(resultFileName, 'wb')
pickle.dump(results[i], handler)
handler.close()
print '### FINISH! ###'
for i in results:
h = results[i]['h']
print i + ':'
result = [str(round(i, 4)) for i in [h['loss'][-1], h['acc'][-1], h['val_loss'][-1], h['val_acc'][-1], results[i]['test_loss'], results[i]['test_acc']]]
print ','.join(result)
# Plotting
plotOptions(results, 'Model loss', 'Loss', ['val_loss', 'loss'])
plotOptions(results, 'Model accuracy', 'Accuracy', ['val_acc', 'acc'])
| mit |
eadgarchen/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
axbaretto/beam | sdks/python/apache_beam/dataframe/doctests_test.py | 6 | 6778 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import os
import tempfile
import unittest
from apache_beam.dataframe import doctests
SAMPLE_DOCTEST = '''
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
'''
CHECK_USES_DEFERRED_DATAFRAMES = '''
>>> type(pd).__name__
'FakePandasObject'
>>> type(pd.DataFrame([]))
<class 'apache_beam.dataframe.frames.DeferredDataFrame'>
>>> type(pd.DataFrame.from_dict({'a': [1, 2], 'b': [3, 4]}))
<class 'apache_beam.dataframe.frames.DeferredDataFrame'>
>>> pd.Index(range(10))
RangeIndex(start=0, stop=10, step=1)
'''
WONT_IMPLEMENT_RAISING_TESTS = '''
>>> import apache_beam
>>> raise apache_beam.dataframe.frame_base.WontImplementError('anything')
ignored exception
>>> pd.Series(range(10)).__array__()
ignored result
'''
ERROR_RAISING_NAME_ERROR_TESTS = '''
>>> import apache_beam
>>> raise %s('anything')
ignored exception
>>> raise NameError
ignored exception
>>> undefined_name
ignored exception
>>> 2 + 2
4
>>> raise NameError
failed exception
'''
WONT_IMPLEMENT_RAISING_NAME_ERROR_TESTS = ERROR_RAISING_NAME_ERROR_TESTS % (
'apache_beam.dataframe.frame_base.WontImplementError', )
NOT_IMPLEMENTED_RAISING_TESTS = '''
>>> import apache_beam
>>> raise NotImplementedError('anything')
ignored exception
'''
NOT_IMPLEMENTED_RAISING_NAME_ERROR_TESTS = ERROR_RAISING_NAME_ERROR_TESTS % (
'NotImplementedError', )
FAILED_ASSIGNMENT = '''
>>> def foo(): raise NotImplementedError()
>>> res = 'old_value'
>>> res = foo()
>>> print(res)
ignored NameError
'''
RST_IPYTHON = '''
Here is an example
.. ipython::
2 + 2
some multi-line examples
.. ipython::
def foo(x):
return x * x
foo(4)
foo(
4
)
In [100]: def foo(x):
....: return x * x * x
....:
foo(5)
history is preserved
foo(3)
foo(4)
and finally an example with pandas
.. ipython::
pd.Series([1, 2, 3]).max()
This one should be skipped:
.. ipython::
@verbatim
not run or tested
and someting that'll fail (due to fake vs. real pandas)
.. ipython::
type(pd)
'''
class DoctestTest(unittest.TestCase):
def test_good(self):
result = doctests.teststring(SAMPLE_DOCTEST, report=False)
self.assertEqual(result.attempted, 3)
self.assertEqual(result.failed, 0)
def test_failure(self):
result = doctests.teststring(
SAMPLE_DOCTEST.replace('25.0', '25.00001'), report=False)
self.assertEqual(result.attempted, 3)
self.assertEqual(result.failed, 1)
def test_uses_beam_dataframes(self):
result = doctests.teststring(CHECK_USES_DEFERRED_DATAFRAMES, report=False)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
def test_file(self):
with tempfile.TemporaryDirectory() as dir:
filename = os.path.join(dir, 'tests.py')
with open(filename, 'w') as fout:
fout.write(SAMPLE_DOCTEST)
result = doctests.testfile(filename, module_relative=False, report=False)
self.assertEqual(result.attempted, 3)
self.assertEqual(result.failed, 0)
def test_file_uses_beam_dataframes(self):
with tempfile.TemporaryDirectory() as dir:
filename = os.path.join(dir, 'tests.py')
with open(filename, 'w') as fout:
fout.write(CHECK_USES_DEFERRED_DATAFRAMES)
result = doctests.testfile(filename, module_relative=False, report=False)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
def test_wont_implement(self):
result = doctests.teststring(
WONT_IMPLEMENT_RAISING_TESTS,
optionflags=doctest.ELLIPSIS,
wont_implement_ok=True)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
result = doctests.teststring(
WONT_IMPLEMENT_RAISING_TESTS,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL,
wont_implement_ok=True)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
def test_wont_implement_followed_by_name_error(self):
result = doctests.teststring(
WONT_IMPLEMENT_RAISING_NAME_ERROR_TESTS,
optionflags=doctest.ELLIPSIS,
wont_implement_ok=True)
self.assertEqual(result.attempted, 6)
self.assertEqual(result.failed, 1) # Only the very last one.
def test_not_implemented(self):
result = doctests.teststring(
NOT_IMPLEMENTED_RAISING_TESTS,
optionflags=doctest.ELLIPSIS,
not_implemented_ok=True)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
result = doctests.teststring(
NOT_IMPLEMENTED_RAISING_TESTS,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL,
not_implemented_ok=True)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
def test_not_implemented_followed_by_name_error(self):
result = doctests.teststring(
NOT_IMPLEMENTED_RAISING_NAME_ERROR_TESTS,
optionflags=doctest.ELLIPSIS,
not_implemented_ok=True)
self.assertEqual(result.attempted, 6)
self.assertEqual(result.failed, 1) # Only the very last one.
def test_failed_assignment(self):
result = doctests.teststring(
FAILED_ASSIGNMENT,
optionflags=doctest.ELLIPSIS,
not_implemented_ok=True)
self.assertNotEqual(result.attempted, 0)
self.assertEqual(result.failed, 0)
def test_rst_ipython(self):
try:
import IPython
except ImportError:
raise unittest.SkipTest('IPython not available')
result = doctests.test_rst_ipython(RST_IPYTHON, 'test_rst_ipython')
self.assertEqual(result.attempted, 8)
self.assertEqual(result.failed, 1) # Only the very last one.
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
PythonProgramming/Pattern-Recognition-for-Forex-Trading | machFX3.py | 1 | 1337 | # Keep in mind the bid/ask spread.
# all purchases would be made @ the ask.
# all sales made @ the bid.
'''
So the first thing we need to do, is go ahead and plot
this data out to see what we're working with, and
see what our goals are.
'''
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
import numpy as np
from numpy import loadtxt
def graphRawFX():
date,bid,ask = np.loadtxt('GBPUSD1d.txt', unpack=True,
delimiter=',',
converters={0:mdates.strpdate2num('%Y%m%d%H%M%S')})
fig=plt.figure(figsize=(10,7))
ax1 = plt.subplot2grid((40,40), (0,0), rowspan=40, colspan=40)
ax1.plot(date,bid)
ax1.plot(date,ask)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
#####
plt.grid(True)
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
#######
ax1_2 = ax1.twinx()
#ax1_2.plot(date, (ask-bid))
ax1_2.fill_between(date, 0, (ask-bid), facecolor='g',alpha=.3)
#ax1_2.set_ylim(0, 3*ask.max())
#######
plt.subplots_adjust(bottom=.23)
#plt.grid(True)
plt.show()
graphRawFX()
| mit |
UDST/urbanaccess | urbanaccess/gtfs/network.py | 1 | 56617 | from __future__ import division
import os
import pandas as pd
import time
from datetime import datetime, timedelta
import logging as lg
from urbanaccess.utils import log, df_to_hdf5, hdf5_to_df
from urbanaccess.gtfs.utils_validation import _check_time_range_format
from urbanaccess.network import ua_network
from urbanaccess import config
from urbanaccess.gtfs.gtfsfeeds_dataframe import gtfsfeeds_dfs, \
urbanaccess_gtfs_df
pd.options.mode.chained_assignment = None
def create_transit_net(
gtfsfeeds_dfs,
day,
timerange,
calendar_dates_lookup=None,
overwrite_existing_stop_times_int=False,
use_existing_stop_times_int=False,
save_processed_gtfs=False,
save_dir=config.settings.data_folder,
save_filename=None,
timerange_pad=None,
time_aware=False):
"""
Create a travel time weight network graph in units of
minutes from GTFS data
Parameters
----------
gtfsfeeds_dfs : object
urbanaccess_gtfs_df object with DataFrames of stops, routes, trips,
stop_times, calendar, calendar_dates (optional) and
stop_times_int (optional)
day : {'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday'}
day of the week to extract transit schedule from that
corresponds to the day in the GTFS calendar
timerange : list
time range to extract transit schedule from in a list with time
1 and time 2 as strings. It is suggested the time range
specified is large enough to allow for travel
from one end of the transit network to the other but small enough
to represent a relevant travel time period such as a 3 hour window
for the AM Peak period. Must follow format
of a 24 hour clock for example: 08:00:00 or 17:00:00
calendar_dates_lookup : dict, optional
dictionary of the lookup column (key) as a string and corresponding
string (value) as string or list of strings to use to subset trips
using the calendar_dates DataFrame. Search will be exact. If none,
then the calendar_dates DataFrame will not be used to select trips
that are not in the calendar DataFrame. Note search will select all
records that meet each key value pair criteria.
Example: {'schedule_type' : 'WD'} or {'schedule_type' : ['WD', 'SU']}
overwrite_existing_stop_times_int : bool, optional
if true, and if there is an existing stop_times_int
DataFrame stored in the gtfsfeeds_dfs object it will be
overwritten
use_existing_stop_times_int : bool, optional
if true, and if there is an existing stop_times_int
DataFrame for the same time period stored in the
gtfsfeeds_dfs object it will be used instead of re-calculated
save_processed_gtfs : bool, optional
if true, all processed GTFS DataFrames will
be stored to disk in a HDF5 file
save_dir : str, optional
directory to save the HDF5 file
save_filename : str, optional
name to save the HDF5 file as
timerange_pad: str, optional
string indicating the number of hours minutes seconds to pad after the
end of the time interval specified in 'timerange'. Must follow format
of a 24 hour clock for example: '02:00:00' for a two hour pad or
'02:30:00' for a 2 hour and 30 minute pad.
time_aware: bool, optional
boolean to indicate whether the transit network should include
time information. If True, 'arrival_time' and 'departure_time' columns
from the stop_times table will be included in the transit edge table
where 'departure_time' is the departure time at node_id_from stop and
'arrival_time' is the arrival time at node_id_to stop
Returns
-------
ua_network : object
ua_network.transit_edges : pandas.DataFrame
ua_network.transit_nodes : pandas.DataFrame
"""
start_time = time.time()
_check_time_range_format(timerange)
if not isinstance(gtfsfeeds_dfs, urbanaccess_gtfs_df):
raise ValueError('gtfsfeeds_dfs must be an urbanaccess_gtfs_df '
'object.')
error_msg = ('One of the following gtfsfeeds_dfs objects: {} were '
'found to be empty.')
if gtfsfeeds_dfs.trips.empty or gtfsfeeds_dfs.stop_times.empty or \
gtfsfeeds_dfs.stops.empty:
error_msg_case_1 = 'trips, stops, or stop_times'
raise ValueError(error_msg.format(error_msg_case_1))
if gtfsfeeds_dfs.calendar.empty and gtfsfeeds_dfs.calendar_dates.empty:
error_msg_case_2 = 'calendar or calendar_dates'
raise ValueError(error_msg.format(error_msg_case_2))
if not isinstance(overwrite_existing_stop_times_int, bool):
raise ValueError('overwrite_existing_stop_times_int must be bool.')
if not isinstance(use_existing_stop_times_int, bool):
raise ValueError('use_existing_stop_times_int must be bool.')
if not isinstance(save_processed_gtfs, bool):
raise ValueError('save_processed_gtfs must be bool.')
if timerange_pad and not isinstance(timerange_pad, str):
raise ValueError('timerange_pad must be string.')
if not isinstance(time_aware, bool):
raise ValueError('time_aware must be bool.')
if overwrite_existing_stop_times_int and use_existing_stop_times_int:
raise ValueError('overwrite_existing_stop_times_int and '
'use_existing_stop_times_int cannot both be True.')
columns = ['route_id',
'direction_id',
'trip_id',
'service_id',
'unique_agency_id',
'unique_feed_id']
if 'direction_id' not in gtfsfeeds_dfs.trips.columns:
columns.remove('direction_id')
# TODO: support use case where only calendar_dates is in use: make 'day'
# optional as None but require either day or calendar_dates_lookup
# to exist but both are not required
calendar_selected_trips_df = _trip_schedule_selector(
input_trips_df=gtfsfeeds_dfs.trips[columns],
input_calendar_df=gtfsfeeds_dfs.calendar,
input_calendar_dates_df=gtfsfeeds_dfs.calendar_dates,
day=day,
calendar_dates_lookup=calendar_dates_lookup)
# proceed to calc stop_times_int if stop_times_int is already empty, or
# overwrite existing is True, or use existing is False
if gtfsfeeds_dfs.stop_times_int.empty or \
overwrite_existing_stop_times_int or use_existing_stop_times_int \
is False:
if overwrite_existing_stop_times_int:
log(' Overwriting existing stop_times_int DataFrame...')
gtfsfeeds_dfs.stop_times_int = _interpolate_stop_times(
stop_times_df=gtfsfeeds_dfs.stop_times,
calendar_selected_trips_df=calendar_selected_trips_df)
gtfsfeeds_dfs.stop_times_int = _time_difference(
stop_times_df=gtfsfeeds_dfs.stop_times_int)
if save_processed_gtfs:
save_processed_gtfs_data(gtfsfeeds_dfs=gtfsfeeds_dfs,
dir=save_dir, filename=save_filename)
if use_existing_stop_times_int:
log(' Using existing stop_times_int DataFrame...')
selected_interpolated_stop_times_df = _time_selector(
df=gtfsfeeds_dfs.stop_times_int,
starttime=timerange[0],
endtime=timerange[1],
timerange_pad=timerange_pad)
final_edge_table = _format_transit_net_edge(
stop_times_df=selected_interpolated_stop_times_df,
time_aware=time_aware)
transit_edges = _convert_imp_time_units(
df=final_edge_table, time_col='weight', convert_to='minutes')
final_selected_stops = _stops_in_edge_table_selector(
input_stops_df=gtfsfeeds_dfs.stops,
input_stop_times_df=selected_interpolated_stop_times_df)
transit_nodes = _format_transit_net_nodes(df=final_selected_stops)
transit_edges = _route_type_to_edge(
transit_edge_df=transit_edges, stop_time_df=gtfsfeeds_dfs.stop_times)
transit_edges = _route_id_to_edge(
transit_edge_df=transit_edges, trips_df=gtfsfeeds_dfs.trips)
# assign node and edge net type
transit_nodes['net_type'] = 'transit'
transit_edges['net_type'] = 'transit'
# set global ua_network edges and nodes
ua_network.transit_edges = transit_edges
ua_network.transit_nodes = transit_nodes
log('Successfully created transit network. Took {:,.2f} seconds.'.format(
time.time() - start_time))
return ua_network
def _trip_schedule_selector(input_trips_df, input_calendar_df,
input_calendar_dates_df, day,
calendar_dates_lookup=None):
"""
Select trips that correspond to a specific schedule in either calendar.txt
and or calendar_dates.txt by finding service_ids that correspond to the
specified search parameters and the trips related to those service_ids
Parameters
----------
input_trips_df : pandas.DataFrame
trips DataFrame
input_calendar_df : pandas.DataFrame
calendar DataFrame
input_calendar_dates_df : pandas.DataFrame
calendar_dates DataFrame
day : {'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday'}
day of the week to extract transit schedule that corresponds to the
day in the GTFS calendar
calendar_dates_lookup : dict, optional
dictionary of the lookup column (key) as a string and corresponding
string (value) as string or list of strings to use to subset trips
using the calendar_dates DataFrame. Search will be exact. If none,
then the calendar_dates DataFrame will not be used to select trips
that are not in the calendar DataFrame. Note search will select all
records that meet each key value pair criteria.
Example: {'schedule_type' : 'WD'} or {'schedule_type' : ['WD', 'SU']}
Returns
-------
calendar_selected_trips_df : pandas.DataFrame
"""
start_time = time.time()
valid_days = ['monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday']
if day not in valid_days:
valid_days_str = str(valid_days).replace('[', '').replace(']', '')
raise ValueError('Incorrect day specified. Must be one of lowercase '
'strings: {}.'.format(valid_days_str))
# check format of calendar_dates_lookup
if calendar_dates_lookup is not None:
if not isinstance(calendar_dates_lookup, dict):
raise ValueError(
'calendar_dates_lookup parameter must be a dictionary.')
for key in calendar_dates_lookup.keys():
if not isinstance(key, str):
raise ValueError('calendar_dates_lookup key: {} '
'must be a string.'.format(key))
if isinstance(calendar_dates_lookup[key], str):
value = [calendar_dates_lookup[key]]
else:
if not isinstance(calendar_dates_lookup[key], list):
raise ValueError(
'calendar_dates_lookup value: {} must be a string or '
'a list of strings.'.format(
calendar_dates_lookup[key]))
else:
value = calendar_dates_lookup[key]
for string in value:
if not isinstance(string, str):
raise ValueError('calendar_dates_lookup value: {} '
'must contain strings.'.format(value))
# check if calendar dfs and related params are empty or not to determine
# what will be used in processing
has_calendar = input_calendar_df.empty is False
has_calendar_param = day is not None
has_calendar_dates = input_calendar_dates_df.empty is False
has_calendar_dates_param = calendar_dates_lookup is not None
if not has_calendar:
log('calendar table has no records and will not be used to '
'select trips.')
if has_calendar_param:
log("Warning: calendar is empty. "
"Unable to use the 'day' parameter.", level=lg.WARNING)
if has_calendar_dates:
if not has_calendar_dates_param:
log("calendar_dates table has records however the "
"'calendar_dates_lookup' parameter is None, no trips will be "
"selected using calendar_dates.")
else:
log('calendar_dates table has no records and will not be used to '
'select trips.')
if has_calendar_dates_param:
raise ValueError("calendar_dates is empty. Unable to use the "
"'calendar_dates_lookup' parameter. Set to None.")
# create unique service IDs for dfs in list if they are not empty
df_list = [input_trips_df]
if has_calendar:
df_list.extend([input_calendar_df])
if has_calendar_dates:
df_list.extend([input_calendar_dates_df])
for index, df in enumerate(df_list):
df['unique_service_id'] = (df['service_id'].str.cat(
df['unique_agency_id'].astype('str'), sep='_'))
df_list[index] = df
service_ids_df = pd.DataFrame()
# collect service IDs that match search parameters in calendar.txt
if has_calendar and has_calendar_param:
# select service IDs where day specified has a 1 = service
# runs on that day
log('Using calendar to extract service_ids to select trips...')
service_ids_df = input_calendar_df[(input_calendar_df[day] == 1)]
service_ids_df = service_ids_df[['unique_service_id']]
num_cal_service_ids_extracted = len(service_ids_df)
log('{:,} service_ids were extracted from calendar.'.format(
num_cal_service_ids_extracted))
# generate information needed to tell user the status of their trips in
# terms of service_ids in calendar table
trips_in_calendar = input_trips_df.loc[
input_trips_df['unique_service_id'].isin(
service_ids_df['unique_service_id'])]
trips_notin_calendar = input_trips_df.loc[
~input_trips_df['unique_service_id'].isin(
service_ids_df['unique_service_id'])]
cnt_input_trips_df = len(input_trips_df)
cnt_trips_in_calendar = len(trips_in_calendar)
pct_trips_in_calendar = round(cnt_trips_in_calendar / len(
input_trips_df) * 100, 2)
feeds_wtrips_in_cal = trips_in_calendar['unique_feed_id'].unique()
print_feed_ids = [' '.join(feed_id.split('_')[:-1]) for feed_id in
feeds_wtrips_in_cal]
feeds_wotrips_in_cal = trips_notin_calendar['unique_feed_id'].unique()
if print_feed_ids:
log('{:,} trip(s) {:.2f} percent of {:,} total trip records were '
'found in calendar for GTFS feed(s): {}.'.format(
cnt_trips_in_calendar, pct_trips_in_calendar,
cnt_input_trips_df, print_feed_ids))
feed_id_not_in_cal = [x for x in feeds_wotrips_in_cal if
x not in feeds_wtrips_in_cal]
for feed_id in feed_id_not_in_cal:
trip_feed_name = ' '.join(feed_id.split('_')[:-1])
log('0 trip(s) 0 percent of {:,} total trip records were '
'found in calendar for GTFS feed: {}.'.format(
cnt_input_trips_df, trip_feed_name))
# warn user that if they have a calendar_dates table and they
# expected more trips to be selected from the calendar table that
# they should consider using the calendar_dates table to supplement
# the selection of trips
if has_calendar_dates and len(trips_notin_calendar) > 0 and \
has_calendar_dates_param is False:
warning_msg = (
'NOTE: If you expected more trips to have been extracted and '
'your GTFS feed(s) have a calendar_dates file, consider '
'utilizing the calendar_dates_lookup parameter in order to '
'add additional trips based on information inside of '
'calendar_dates. This should only be done if you know the '
'corresponding GTFS feed is using calendar_dates instead of '
'calendar to specify service_ids. When in doubt do not use '
'the calendar_dates_lookup parameter.')
log(warning_msg, level=lg.WARNING)
if len(feeds_wtrips_in_cal) != len(feeds_wotrips_in_cal) and \
calendar_dates_lookup is None:
for feed_id in feeds_wotrips_in_cal:
trip_feed_name = ' '.join(feed_id.split('_')[:-1])
log('{:,} trip(s) {:.2f} percent of {:,} total trip records '
'were not found in calendar for GTFS feed: {}.'.format(
cnt_trips_in_calendar, pct_trips_in_calendar,
cnt_input_trips_df, trip_feed_name))
if feed_id not in feeds_wtrips_in_cal:
log('Warning: GTFS feed: {} no trips were selected using '
'calendar. It is suggested you use the '
'calendar_dates_lookup parameter to utilize this '
'feed\'s calendar_dates file.'.format(trip_feed_name),
level=lg.WARNING)
else:
num_cal_service_ids_extracted = 0
cnt_input_trips_df = 0
# collect service IDs that match search parameters in calendar_dates.txt
if has_calendar_dates and has_calendar_dates_param:
# look for service_ids inside of calendar_dates if calendar does not
# supply enough service_ids to select trips by
if has_calendar:
if len(trips_notin_calendar) > 0:
log('Using calendar_dates to supplement service_ids extracted '
'from calendar to select trips...')
subset_result_df = pd.DataFrame()
for col_name_key, string_value in calendar_dates_lookup.items():
if col_name_key not in input_calendar_dates_df.columns:
raise ValueError('Column: {} not found in calendar_dates '
'DataFrame.'.format(col_name_key))
if col_name_key not in input_calendar_dates_df.select_dtypes(
include=[object]).columns:
raise ValueError('Column: {} must be object type.'.format(
col_name_key))
if not isinstance(string_value, list):
string_value = [string_value]
for text in string_value:
# TODO: modify this in order to allow subset based on GTFS
# feed name or a or/and condition
subset_result = input_calendar_dates_df[
input_calendar_dates_df[col_name_key].str.match(
text, case=False, na=False)]
cnt_subset_result = len(subset_result)
if cnt_subset_result != 0:
feed_id_list = subset_result['unique_feed_id'].unique()
for index, id in enumerate(feed_id_list):
feed_id_list[index] = ' '.join(id.split('_')[:-1])
log('Found {:,} record(s) that matched query: column: {} '
'and string: {} for GTFS feed(s): {}.'.format(
cnt_subset_result, col_name_key, text, feed_id_list))
subset_result_df = subset_result_df.append(subset_result)
subset_result_df.drop_duplicates(inplace=True)
subset_result_df = subset_result_df[['unique_service_id']]
num_caldates_service_ids_extracted = len(subset_result_df)
tot_service_ids_extracted = \
num_caldates_service_ids_extracted + num_cal_service_ids_extracted
log('An additional {:,} service_id(s) were extracted from '
'calendar_dates. Total service_id(s) extracted: {:,}.'.format(
num_caldates_service_ids_extracted, tot_service_ids_extracted))
service_ids_df = service_ids_df.append(subset_result_df)
service_ids_df.drop_duplicates(inplace=True)
if service_ids_df.empty:
raise ValueError('No service_id(s) were found with '
'the specified calendar and or calendar_dates '
'search parameters.')
# select and create df of trips that match the service IDs for the day of
# the week specified merge calendar df that has service IDs for
# specified day with trips df
calendar_selected_trips_df = input_trips_df.loc[
input_trips_df['unique_service_id'].isin(
service_ids_df['unique_service_id'])]
sort_columns = ['route_id', 'trip_id', 'direction_id']
if 'direction_id' not in calendar_selected_trips_df.columns:
sort_columns.remove('direction_id')
calendar_selected_trips_df.sort_values(by=sort_columns, inplace=True)
calendar_selected_trips_df.reset_index(drop=True, inplace=True)
calendar_selected_trips_df.drop('unique_service_id', axis=1, inplace=True)
calendar_selected_trips_count = len(calendar_selected_trips_df)
if calendar_dates_lookup is None:
log('{:,} of {:,} total trips were extracted representing calendar '
'day: {}. Took {:,.2f} seconds.'.format(
calendar_selected_trips_count, cnt_input_trips_df, day,
time.time() - start_time))
else:
log('{:,} of {:,} total trips were extracted representing calendar '
'day: {} and calendar_dates search parameters: {}. '
'Took {:,.2f} seconds.'.format(
calendar_selected_trips_count, cnt_input_trips_df, day,
calendar_dates_lookup, time.time() - start_time))
return calendar_selected_trips_df
def _interpolate_stop_times(stop_times_df, calendar_selected_trips_df):
"""
Interpolate missing stop times using a linear
interpolator between known stop times
Parameters
----------
stop_times_df : pandas.DataFrame
stop times DataFrame
calendar_selected_trips_df : pandas.DataFrame
DataFrame of trips that run on specific day
Returns
-------
final_stop_times_df : pandas.DataFrame
"""
start_time = time.time()
# create unique trip IDs
df_list = [calendar_selected_trips_df, stop_times_df]
for index, df in enumerate(df_list):
df['unique_trip_id'] = (df['trip_id'].str.cat(
df['unique_agency_id'].astype('str'), sep='_'))
df_list[index] = df
# sort stop times inplace based on first to last stop in
# sequence -- required as the linear interpolator runs
# from first value to last value
if stop_times_df['stop_sequence'].isnull().sum() > 1:
log('WARNING: There are {:,} stop_sequence records missing in the '
'stop_times DataFrame. Please check these missing values. '
'In order for interpolation to proceed correctly, all records '
'must have a stop_sequence value.'.format(
stop_times_df['stop_sequence'].isnull().sum()),
level=lg.WARNING)
stop_times_df.sort_values(by=['unique_trip_id', 'stop_sequence'],
inplace=True)
# make list of unique trip IDs from the calendar_selected_trips_df
uniquetriplist = calendar_selected_trips_df[
'unique_trip_id'].unique().tolist()
# select trip IDs that match the trips in the
# calendar_selected_trips_df -- resulting df will be stop times
# only for trips that run on the service day or dates of interest
stop_times_df = stop_times_df[
stop_times_df['unique_trip_id'].isin(uniquetriplist)]
# if there were no records that match then do not proceed and throw error
if len(stop_times_df) == 0:
raise ValueError('No matching trip_ids where found. Suggest checking '
'for differences between trip_id values in '
'stop_times and trips GTFS files.')
# count missing stop times
missing_stop_times_count = stop_times_df[
'departure_time_sec'].isnull().sum()
# if there are stop times missing that need interpolation notify user
if missing_stop_times_count > 0:
log('Note: Processing may take a long time depending '
'on the number of records. '
'Total unique trips to assess: {:,}.'.format(
len(stop_times_df['unique_trip_id'].unique())),
level=lg.WARNING)
log('Starting departure stop time interpolation...')
log('Departure time records missing from trips following the '
'specified schedule: {:,} ({:.2f} percent of {:,} total '
'records.)'.format(
missing_stop_times_count,
(missing_stop_times_count / len(stop_times_df)) * 100,
len(stop_times_df['departure_time_sec'])))
log('Interpolating...')
else:
log('There are no departure time records missing from trips '
'following the specified schedule. There are no records to '
'interpolate.')
# TODO: for the rare and unlikely case when there is 1 null record and
# its not the first or last stop in the stop sequence, that value
# should be interpolated and its trip ID should be added to those to be
# interpolated - this additional case would have to be benchmarked
# for speed to ensure it doesnt slow down existing process
# Find trips with more than one missing time
# Note: all trip IDs have at least 1 null departure time because the
# last stop in a trip is always null
null_times = stop_times_df[stop_times_df.departure_time_sec.isnull()]
trips_with_null = null_times.unique_trip_id.value_counts()
trips_with_more_than_one_null = trips_with_null[
trips_with_null > 1].index.values
# Subset stop times DataFrame to only those with >1 null time
df_for_interpolation = stop_times_df.loc[
stop_times_df.unique_trip_id.isin(trips_with_more_than_one_null)]
if len(df_for_interpolation) > 0:
# check for duplicate stop_sequence and unique_trip_id combination,
# if dups are found this will throw an error during the pivot()
# operation so catch and return to user instead
dup_df = df_for_interpolation[df_for_interpolation.duplicated(
subset=['stop_sequence', 'unique_trip_id'], keep='first')]
if len(dup_df) != 0:
dup_values = list(dup_df['unique_trip_id'].unique())
raise ValueError('Found duplicate values when values from '
'stop_sequence and unique_trip_id are combined. '
'Check values in these columns for '
'trip_id(s): {}.'.format(dup_values))
# Pivot to DataFrame where each unique trip has its own column
# Index is stop_sequence
pivot = df_for_interpolation.pivot(
index='stop_sequence', columns='unique_trip_id',
values='departure_time_sec')
# Interpolate on the whole DataFrame at once
interpolator = pivot.interpolate(
method='linear', axis=0, limit_direction='forward')
# Melt back into stacked format
interpolator['stop_sequence_merge'] = interpolator.index
melted = pd.melt(interpolator, id_vars='stop_sequence_merge')
melted.rename(columns={'value': 'departure_time_sec_interpolate'},
inplace=True)
# Get the last valid stop for each unique trip,
# to filter out trailing NaNs
last_valid_stop_series = pivot.apply(
lambda col: col.last_valid_index(), axis=0)
last_valid_stop_df = last_valid_stop_series.to_frame('last_valid_stop')
df_for_interpolation = (
df_for_interpolation.merge(
last_valid_stop_df, left_on='unique_trip_id',
right_index=True))
trailing = (df_for_interpolation.stop_sequence >
df_for_interpolation.last_valid_stop)
# Calculate a stop_sequence without trailing NaNs, to merge the correct
# interpolated times back in
df_for_interpolation['stop_sequence_merge'] = (
df_for_interpolation[~trailing]['stop_sequence'])
# Need to check if existing index is in column names and drop if
# so (else a ValueError where Pandas can't insert
# b/c col already exists will occur)
drop_bool = False
if _check_if_index_name_in_cols(df_for_interpolation):
# move the current index to its own col named 'index'
log('stop_times index name: {} is also a column name. '
'Index will be dropped for interpolation and re-created '
'afterwards to continue.'.format(
df_for_interpolation.index.name))
col_name_to_copy = df_for_interpolation.index.name
col_to_copy = df_for_interpolation[col_name_to_copy].copy()
df_for_interpolation['index'] = col_to_copy
drop_bool = True
df_for_interpolation.reset_index(inplace=True, drop=drop_bool)
# Merge back into original index
interpolated_df = pd.merge(
df_for_interpolation, melted, how='left',
on=['stop_sequence_merge', 'unique_trip_id'])
# set index back to what it was if it was removed above before merge
if drop_bool is False:
interpolated_df.set_index('index', inplace=True)
interpolated_times = (
interpolated_df[['departure_time_sec_interpolate']])
final_stop_times_df = pd.merge(
stop_times_df, interpolated_times, how='left',
left_index=True, right_index=True, sort=False, copy=False)
else:
final_stop_times_df = stop_times_df
final_stop_times_df['departure_time_sec_interpolate'] = (
final_stop_times_df['departure_time_sec'])
# fill in nulls in interpolated departure time column using trips that
# did not need interpolation in order to create
# one column with both original and interpolated times
final_stop_times_df['departure_time_sec_interpolate'].fillna(
final_stop_times_df['departure_time_sec'], inplace=True)
num_not_interpolated = final_stop_times_df[
'departure_time_sec_interpolate'].isnull().sum()
if num_not_interpolated > 0:
log('WARNING: Number of stop_time records unable to interpolate: {:,}.'
' These records likely had stops in either the start or '
'end sequence that did not have time information avaiable to '
'interpolate between. These records have been removed.'.format(
num_not_interpolated),
level=lg.WARNING)
# convert the interpolated times (float) to integer so all times are
# the same number format
# first run int converter on non-null records (nulls here are the last
# stop times in a trip because there is no departure)
final_stop_times_df = final_stop_times_df[
final_stop_times_df['departure_time_sec_interpolate'].notnull()]
# convert float to int
final_stop_times_df['departure_time_sec_interpolate'] = \
final_stop_times_df['departure_time_sec_interpolate'].astype(int)
# add unique stop ID
final_stop_times_df['unique_stop_id'] = (
final_stop_times_df['stop_id'].str.cat(
final_stop_times_df['unique_agency_id'].astype('str'), sep='_'))
if missing_stop_times_count > 0:
log('Departure stop time interpolation complete. '
'Took {:,.2f} seconds.'.format(time.time() - start_time))
return final_stop_times_df
def _time_difference(stop_times_df):
"""
Calculate the difference in departure_time between stops in stop times
table to produce travel time
Parameters
----------
stop_times_df : pandas.DataFrame
interpolated stop times DataFrame
Returns
-------
stop_times_df : pandas.DataFrame
"""
start_time = time.time()
# calculate difference between consecutive records grouping by trip ID
stop_times_df['timediff'] = stop_times_df.groupby('unique_trip_id')[
'departure_time_sec_interpolate'].diff()
log('Difference between stop times has been successfully calculated. '
'Took {:,.2f} seconds.'.format(time.time() - start_time))
return stop_times_df
def _time_selector(df, starttime, endtime, timerange_pad=None):
"""
Select stop times that fall within a specified time range
Parameters
----------
df : pandas.DataFrame
interpolated stop times DataFrame
starttime : str
24 hour clock formatted time 1
endtime : str
24 hour clock formatted time 2,
timerange_pad: str, optional
string indicating the number of hours minutes seconds to pad after the
end of the time interval specified in 'timerange'. Must follow format
of a 24 hour clock for example: '02:00:00' for a two hour pad or
'02:30:00' for a 2 hour and 30 minute pad.
Returns
-------
selected_stop_timesdf : pandas.DataFrame
"""
start_time = time.time()
# takes input start and end time range from 24 hour clock and converts
# it to seconds past midnight
# in order to select times that may be after midnight
# convert string time components to integer and then calculate seconds
# past midnight
# convert starttime 24 hour to seconds past midnight
# TODO: optimize for speed
start_h = int(str(starttime[0:2]))
start_m = int(str(starttime[3:5]))
start_s = int(str(starttime[6:8]))
starttime_sec = (start_h * 60 * 60) + (start_m * 60) + start_s
# convert endtime 24 hour to seconds past midnight
end_h = int(str(endtime[0:2]))
end_m = int(str(endtime[3:5]))
end_s = int(str(endtime[6:8]))
endtime_sec = (end_h * 60 * 60) + (end_m * 60) + end_s
# define timepad in seconds to include stops active after specified endtime
if timerange_pad:
# convert timerange_pad 24 hour to seconds
pad_h = int(str(timerange_pad[0:2]))
pad_m = int(str(timerange_pad[3:5]))
pad_s = int(str(timerange_pad[6:8]))
pad_sec = (pad_h * 60 * 60) + (pad_m * 60) + pad_s
# add endtime and timerange_pad to get new endtime and convert to
# str for informative print
dt1 = datetime.strptime(endtime, '%H:%M:%S')
dt2 = datetime.strptime(timerange_pad, '%H:%M:%S')
dt2_delta = timedelta(hours=dt2.hour, minutes=dt2.minute,
seconds=dt2.second)
dt3 = dt1 + dt2_delta
str_t3 = datetime.strftime(dt3, '%H:%M:%S')
log(' Additional stop times active between the specified end time: '
'{} with timerange_pad of: {} (padded end time: {}) '
'will be selected...'.format(endtime, timerange_pad, str_t3))
pad = int(0 if timerange_pad is None else pad_sec)
# create df of stops times that are within the requested range
selected_stop_timesdf = df[(
(starttime_sec <= df["departure_time_sec_interpolate"]) & (
df["departure_time_sec_interpolate"] <= endtime_sec + pad))]
subset_df_count = len(selected_stop_timesdf)
df_count = len(df)
if timerange_pad:
log('Stop times from {} to {} (with time_pad end time: {}) '
'successfully selected {:,} records out of {:,} total records '
'({:.2f} percent of total). '
'Took {:,.2f} seconds.'.format(
starttime, endtime, str_t3, subset_df_count, df_count,
(subset_df_count / df_count) * 100,
time.time() - start_time))
else:
log('Stop times from {} to {} successfully selected {:,} records '
'out of {:,} total records ({:.2f} percent of total). '
'Took {:,.2f} seconds.'.format(
starttime, endtime, subset_df_count, df_count,
(subset_df_count / df_count) * 100,
time.time() - start_time))
return selected_stop_timesdf
def _format_transit_net_edge(stop_times_df, time_aware=False):
"""
Format transit network data table to match the format required for edges
in Pandana graph networks edges
Parameters
----------
stop_times_df : pandas.DataFrame
interpolated stop times with travel time between stops for the subset
time and day
time_aware: bool, optional
boolean to indicate whether the transit network should include
time information. If True, 'arrival_time' and 'departure_time' columns
from the stop_times table will be included in the transit edge table
where 'departure_time' is the departure time at node_id_from stop and
'arrival_time' is the arrival time at node_id_to stop
Returns
-------
merged_edge_df : pandas.DataFrame
"""
start_time = time.time()
log('Starting transformation process for {:,} '
'total trips...'.format(len(stop_times_df['unique_trip_id'].unique())))
# subset to only columns needed for processing
cols_of_interest = ['unique_trip_id', 'stop_id', 'unique_stop_id',
'timediff', 'stop_sequence', 'unique_agency_id',
'trip_id', 'arrival_time', 'departure_time']
stop_times_df = stop_times_df[cols_of_interest]
# set columns for new df for data needed by Pandana for edges
merged_edge = []
stop_times_df.sort_values(by=['unique_trip_id', 'stop_sequence'],
inplace=True)
if time_aware:
log(' time_aware is True, also adding arrival and departure '
'stop times to edges...')
for trip, tmp_trip_df in stop_times_df.groupby(['unique_trip_id']):
# if 'time_aware', also create arrival and departure time cols
if time_aware:
edge_df = pd.DataFrame({
"node_id_from": tmp_trip_df['unique_stop_id'].iloc[:-1].values,
"node_id_to": tmp_trip_df['unique_stop_id'].iloc[1:].values,
"weight": tmp_trip_df['timediff'].iloc[1:].values,
"unique_agency_id":
tmp_trip_df['unique_agency_id'].iloc[1:].values,
# set unique trip ID without edge order to join other data
# later
"unique_trip_id": trip,
# departure_time at node_id_from stop
"departure_time":
tmp_trip_df['departure_time'].iloc[:-1].values,
# arrival_time at node_id_to stop
"arrival_time":
tmp_trip_df['arrival_time'].iloc[1:].values
})
else:
edge_df = pd.DataFrame({
"node_id_from": tmp_trip_df['unique_stop_id'].iloc[:-1].values,
"node_id_to": tmp_trip_df['unique_stop_id'].iloc[1:].values,
"weight": tmp_trip_df['timediff'].iloc[1:].values,
"unique_agency_id":
tmp_trip_df['unique_agency_id'].iloc[1:].values,
# set unique trip ID without edge order to join other data
# later
"unique_trip_id": trip
})
# Set current trip ID to edge ID column adding edge order at
# end of string
edge_df['sequence'] = (edge_df.index + 1).astype(int)
# append completed formatted edge table to master edge table
merged_edge.append(edge_df)
merged_edge_df = pd.concat(merged_edge, ignore_index=True)
merged_edge_df['sequence'] = merged_edge_df['sequence'].astype(
int, copy=False)
# create a unique sequential edge ID
# TODO: consider changing col name to 'edge_id' for clarity
merged_edge_df['id'] = (
merged_edge_df['unique_trip_id'].str.cat(
merged_edge_df['sequence'].astype('str'), sep='_'))
log('Stop time table transformation to Pandana format edge table '
'completed. Took {:,.2f} seconds.'.format(time.time() - start_time))
return merged_edge_df
def _convert_imp_time_units(df, time_col='weight', convert_to='minutes'):
"""
Convert the travel time impedance units
Parameters
----------
df : pandas.DataFrame
edge DataFrame with weight column
time_col : str
name of column that holds the travel impedance
convert_to : {'seconds', 'minutes'}
unit to convert travel time to. should always be set to 'minutes'
Returns
-------
df : pandas.DataFrame
"""
valid_convert_to = ['seconds', 'minutes']
if convert_to not in valid_convert_to or not isinstance(convert_to, str):
raise ValueError('{} is not a valid value or is not a string.'.format(
convert_to))
if convert_to == 'seconds':
df[time_col] = df[time_col].astype('float')
df[time_col] = df[time_col] * 60
log('Time conversion completed: minutes converted to seconds.')
if convert_to == 'minutes':
df[time_col] = df[time_col].astype('float')
df[time_col] = df[time_col] / 60.0
log('Time conversion completed: seconds converted to minutes.')
return df
def _stops_in_edge_table_selector(input_stops_df, input_stop_times_df):
"""
Select stops that are active during the day and time period specified
Parameters
----------
input_stops_df : pandas.DataFrame
stops DataFrame
input_stop_times_df : pandas.DataFrame
stop_times DataFrame
Returns
-------
selected_stops_df : pandas.DataFrame
"""
start_time = time.time()
# add unique stop ID
input_stops_df['unique_stop_id'] = (
input_stops_df['stop_id'].str.cat(
input_stops_df['unique_agency_id'].astype('str'), sep='_'))
# Select stop IDs that match stop IDs in the subset stop time data that
# match day and time selection
selected_stops_df = input_stops_df.loc[
input_stops_df['unique_stop_id'].isin(
input_stop_times_df['unique_stop_id'])]
log('{:,} of {:,} records selected from stops. '
'Took {:,.2f} seconds.'.format(
len(selected_stops_df), len(input_stops_df),
time.time() - start_time))
return selected_stops_df
def _format_transit_net_nodes(df):
"""
Create transit node table from stops DataFrame and perform final formatting
Parameters
----------
df : pandas.DataFrame
transit node DataFrame
Returns
-------
final_node_df : pandas.DataFrame
"""
start_time = time.time()
# add unique stop ID
if 'unique_stop_id' not in df.columns:
df['unique_stop_id'] = (
df['stop_id'].str.cat(
df['unique_agency_id'].astype('str'), sep='_'))
final_node_df = pd.DataFrame()
final_node_df['node_id'] = df['unique_stop_id']
final_node_df['x'] = df['stop_lon']
final_node_df['y'] = df['stop_lat']
# keep useful info from stops table
col_list = ['unique_agency_id', 'route_type', 'stop_id', 'stop_name']
# if these optional cols exist then keep those that do
optional_gtfs_cols = ['parent_station', 'stop_code', 'wheelchair_boarding',
'zone_id', 'location_type']
for item in optional_gtfs_cols:
if item in df.columns:
col_list.append(item)
final_node_df = pd.concat([final_node_df, df[col_list]], axis=1)
# set node index to be unique stop ID
final_node_df = final_node_df.set_index('node_id')
log('Stop time table transformation to Pandana format node table '
'completed. Took {:,.2f} seconds.'.format(time.time() - start_time))
return final_node_df
def _route_type_to_edge(transit_edge_df, stop_time_df):
"""
Append route type information to transit edge table
Parameters
----------
transit_edge_df : pandas.DataFrame
transit edge DataFrame
stop_time_df : pandas.DataFrame
stop time DataFrame
Returns
-------
transit_edge_df_w_routetype : pandas.DataFrame
"""
start_time = time.time()
# create unique trip IDs
stop_time_df['unique_trip_id'] = (
stop_time_df['trip_id'].str.cat(
stop_time_df['unique_agency_id'].astype('str'), sep='_'))
# join route_id to the edge table
merged_df = pd.merge(
transit_edge_df, stop_time_df[['unique_trip_id', 'route_type']],
how='left', on='unique_trip_id', sort=False, copy=False)
merged_df.drop_duplicates(
subset='unique_trip_id', keep='first', inplace=True)
# need to get unique records here to have a one to one join -
# this serves as the look up table
# join the look up table created above to the table of interest
transit_edge_df_w_routetype = pd.merge(
transit_edge_df, merged_df[['route_type', 'unique_trip_id']],
how='left', on='unique_trip_id', sort=False, copy=False)
log('Route type successfully joined to transit edges. '
'Took {:,.2f} seconds.'.format(time.time() - start_time))
return transit_edge_df_w_routetype
def _route_id_to_edge(transit_edge_df, trips_df):
"""
Append route IDs to transit edge table
Parameters
----------
transit_edge_df : pandas.DataFrame
transit edge DataFrame
trips_df : pandas.DataFrame
trips DataFrame
Returns
-------
transit_edge_df_with_routes : pandas.DataFrame
"""
start_time = time.time()
if 'unique_route_id' not in transit_edge_df.columns:
# create unique trip and route IDs
trips_df['unique_trip_id'] = (
trips_df['trip_id'].str.cat(
trips_df['unique_agency_id'].astype('str'), sep='_'))
trips_df['unique_route_id'] = (
trips_df['route_id'].str.cat(
trips_df['unique_agency_id'].astype('str'), sep='_'))
transit_edge_df_with_routes = pd.merge(
transit_edge_df, trips_df[['unique_trip_id', 'unique_route_id']],
how='left', on='unique_trip_id', sort=False, copy=False)
log('Route ID successfully joined to transit edges. '
'Took {:,.2f} seconds.'.format(time.time() - start_time))
return transit_edge_df_with_routes
def edge_impedance_by_route_type(
transit_edge_df,
travel_time_col_name='weight',
street_level_rail=None,
underground_rail=None,
intercity_rail=None,
bus=None,
ferry=None,
cable_car=None,
gondola=None,
funicular=None,
trolleybus=None,
monorail=None):
"""
Penalize transit edge travel time based on transit mode type
Parameters
----------
transit_edge_df : pandas.DataFrame
transit edge DataFrame
travel_time_col_name : str, optional
name of travel time column to apply multiplier factor,
default column name is 'weight'
street_level_rail : float, optional
factor between -1 to 1 to multiply against travel time
underground_rail : float, optional
factor between -1 to 1 to multiply against travel time
intercity_rail : float, optional
factor between -1 to 1 to multiply against travel time
bus : float, optional
factor between -1 to 1 to multiply against travel time
ferry : float, optional
factor between -1 to 1 to multiply against travel time
cable_car : float, optional
factor between -1 to 1 to multiply against travel time
gondola : float, optional
factor between -1 to 1 to multiply against travel time
funicular : float, optional
factor between -1 to 1 to multiply against travel time
trolleybus : float, optional
factor between -1 to 1 to multiply against travel time
monorail : float, optional
factor between -1 to 1 to multiply against travel time
Returns
-------
transit_edge_df : pandas.DataFrame
Returns transit_edge_df with travel_time_col_name column weighted by
specified coefficients by route type
"""
req_cols = [travel_time_col_name, 'route_type']
if not isinstance(travel_time_col_name, str):
raise ValueError('travel_time_col_name must be a string.')
for col in req_cols:
if col in transit_edge_df.columns:
if not pd.api.types.is_numeric_dtype(transit_edge_df[col]):
raise ValueError('{} must be a number.'.format(col))
else:
raise ValueError('Column: {} was not found in transit_edge_df '
'DataFrame and is required.'.format(col))
# build route type lookup dict
route_type_dict = config._ROUTES_MODE_TYPE_LOOKUP.copy()
var_mode_id_lookup = {0: street_level_rail,
1: underground_rail,
2: intercity_rail,
3: bus,
4: ferry,
5: cable_car,
6: gondola,
7: funicular,
11: trolleybus,
12: monorail}
# ensure consistency btw the keys in the config obj and the keys
# used in this function in case changes are made in the config obj
if set(sorted(route_type_dict.keys())) != set(
sorted(var_mode_id_lookup.keys())):
ValueError('ROUTES_MODE_TYPE_LOOKUP keys do not match keys in '
'var_mode_id_lookup. Keys must match.')
for key, value in route_type_dict.items():
route_type_dict[key] = {'name': value,
'multiplier': var_mode_id_lookup[key]}
# create the dict to pass to value_counts()
route_type_desc = route_type_dict.copy()
for key, val in route_type_dict.items():
route_type_desc[key] = val['name']
# check count of records for each route type
log('Route type distribution as percentage of transit mode:')
summary_stat = transit_edge_df['route_type'].map(
route_type_desc.get).value_counts(normalize=True, dropna=False) * 100
log(summary_stat)
travel_time_col = transit_edge_df[travel_time_col_name]
for route_type, route_vals in route_type_dict.items():
if route_vals['multiplier'] is not None:
if not isinstance(route_vals['multiplier'], float):
raise ValueError('One or more multiplier variables are not '
'float.')
# warn if multiplier is not within optimal range
if not -1 <= route_vals['multiplier'] <= 1:
log('WARNING: Multiplier value of: {} should be a '
'value between -1 and 1.'.format(route_vals['multiplier']),
level=lg.WARNING)
route_type_cnt = len(
transit_edge_df[transit_edge_df['route_type'] == route_type])
# warn if route type is not found in DataFrame
if route_type_cnt == 0 and route_vals['multiplier'] is not None:
log('WARNING: Route type: {} with specified multiplier value '
'of: {} was not found in the specified edge '
'DataFrame.'.format(
route_vals['name'], route_vals['multiplier']),
level=lg.WARNING)
if route_type_cnt > 0:
transit_edge_df[travel_time_col_name][
transit_edge_df['route_type'] == route_type] = \
travel_time_col + (
travel_time_col * route_vals['multiplier'])
log('Adjusted {} transit edge impedance based on mode '
'type penalty coefficient: {}.'.format(
route_vals['name'], route_vals['multiplier']))
log('Transit edge impedance mode type penalty calculation complete.')
return transit_edge_df
def save_processed_gtfs_data(
gtfsfeeds_dfs, filename, dir=config.settings.data_folder):
"""
Write DataFrames in an urbanaccess_gtfs_df object to a HDF5 file
Parameters
----------
gtfsfeeds_dfs : object
urbanaccess_gtfs_df object
filename : string
name of the HDF5 file to save with .h5 extension
dir : string, optional
directory to save HDF5 file
Returns
-------
None
"""
log('Writing HDF5 store...')
if not isinstance(gtfsfeeds_dfs, urbanaccess_gtfs_df):
raise ValueError('gtfsfeeds_dfs must be an urbanaccess_gtfs_df '
'object.')
req_df_dict = {'stops': gtfsfeeds_dfs.stops,
'routes': gtfsfeeds_dfs.routes,
'trips': gtfsfeeds_dfs.trips,
'stop_times': gtfsfeeds_dfs.stop_times,
'stop_times_int': gtfsfeeds_dfs.stop_times_int}
# calendar or calendar_dates are required but not both
optional_df_dict = {'headways': gtfsfeeds_dfs.headways,
'calendar': gtfsfeeds_dfs.calendar,
'calendar_dates': gtfsfeeds_dfs.calendar_dates}
for name, gtfs_df in req_df_dict.items():
if gtfs_df.empty:
raise ValueError('gtfsfeeds_dfs is missing required '
'DataFrame: {}.'.format(name))
if gtfsfeeds_dfs.calendar.empty and gtfsfeeds_dfs.calendar_dates.empty:
raise ValueError('gtfsfeeds_dfs is missing either the calendar or '
'calendar_dates DataFrame.')
tables_saved = []
for name, gtfs_df in req_df_dict.items():
df_to_hdf5(data=gtfs_df, key=name,
overwrite_key=False, dir=dir, filename=filename,
overwrite_hdf5=False)
tables_saved.extend([name])
for name, gtfs_df in optional_df_dict.items():
if gtfs_df.empty is False:
df_to_hdf5(data=gtfs_df, key=name,
overwrite_key=False, dir=dir, filename=filename,
overwrite_hdf5=False)
tables_saved.extend([name])
log('Saved HDF5 store: {} with tables: {}.'.format(
os.path.join(dir, filename), tables_saved))
def load_processed_gtfs_data(filename, dir=config.settings.data_folder):
"""
Read data from a HDF5 file to an urbanaccess_gtfs_df object
Parameters
----------
filename : string
name of the HDF5 file to read with .h5 extension
dir : string, optional
directory to read HDF5 file
Returns
-------
gtfsfeeds_dfs : object
urbanaccess_gtfs_df object
"""
log('Loading HDF5 store...')
req_df_dict = {'stops': gtfsfeeds_dfs.stops,
'routes': gtfsfeeds_dfs.routes,
'trips': gtfsfeeds_dfs.trips,
'stop_times': gtfsfeeds_dfs.stop_times,
'stop_times_int': gtfsfeeds_dfs.stop_times_int}
# calendar or calendar_dates are required but not both
optional_df_dict = {'headways': gtfsfeeds_dfs.headways,
'calendar': gtfsfeeds_dfs.calendar,
'calendar_dates': gtfsfeeds_dfs.calendar_dates}
tables_read = []
for name, gtfs_df in req_df_dict.items():
vars(gtfsfeeds_dfs)[name] = hdf5_to_df(
dir=dir, filename=filename, key=name)
tables_read.extend([name])
# open HDF5 to read keys
hdf5_load_path = os.path.join(dir, filename)
with pd.HDFStore(hdf5_load_path) as store:
hdf5_keys = store.keys()
hdf5_keys = [item.replace('/', '') for item in hdf5_keys]
for name, gtfs_df in optional_df_dict.items():
# if optional key exists, read it
if name in hdf5_keys:
vars(gtfsfeeds_dfs)[name] = hdf5_to_df(
dir=dir, filename=filename, key=name)
tables_read.extend([name])
log('Read HDF5 store: {} tables: {}.'.format(
hdf5_load_path, tables_read))
return gtfsfeeds_dfs
def _check_if_index_name_in_cols(df):
"""
Check if specified Dataframe has an index name that is also a column name
Parameters
----------
df : pandas.DataFrame
Dataframe to check index and columns
Returns
-------
iname : boolean
True if index name is also a column name, else False
"""
cols = df.columns.values
iname = df.index.name
return (iname in cols)
| agpl-3.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/learn/text_classification_character_rnn.py | 29 | 4506 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
_, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
notconfusing/WIGI | df-comparator.py | 1 | 1111 | import pandas
import os
def changes_between(fa, fb):
dfa = pandas.DataFrame.from_csv(fa)
dfb = pandas.DataFrame.from_csv(fb)
removed_columns = dfa.columns.difference(dfb.columns)
added_columns = dfb.columns.difference(dfa.columns)
change_df = dfb - dfa
return change_df
def find_dirs():
snapdir = '/home/maximilianklein/snapshot_data'
dates = os.listdir(snapdir)
sdates = sorted(dates)
latest = sdates[-1]
prev = sdates[-2]
latest_dir = os.path.join(os.path.join(snapdir,latest),'property_indexes')
prev_dir = os.path.join(os.path.join(snapdir,prev),'property_indexes')
latest_files = os.listdir(latest_dir)
prev_files = os.listdir(prev_dir)
changedir = os.path.join(latest_dir,'changes-since-{}'.format(prev))
if not os.path.exists(changedir):
os.makedirs(changedir)
for ind_file in latest_files:
if ind_file in prev_files:
p_f = os.path.join(prev_dir, ind_file)
l_f = os.path.join(latest_dir, ind_file)
change_df = changes_between(p_f, l_f)
find_dirs()
| mit |
toastedcornflakes/scikit-learn | examples/mixture/plot_gmm_selection.py | 95 | 3310 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 14 | 12469 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
assert_ae = assert_array_equal
if X.dtype.kind == 'f' or X_true.dtype.kind == 'f':
assert_ae = assert_array_almost_equal
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_ae(imputer.statistics_, statistics,
err_msg=err_msg.format(0, False))
assert_ae(X_trans, X_true, err_msg=err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_ae(X_trans, X_true.transpose(),
err_msg=err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_ae(imputer.statistics_, statistics,
err_msg=err_msg.format(0, True))
assert_ae(X_trans, X_true, err_msg=err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_ae(X_trans, X_true.transpose(),
err_msg=err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_almost_equal(
imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
err_msg="Fail to transform the data after pickling "
"(strategy = %s)" % (strategy)
)
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_almost_equal(X, Xt)
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_almost_equal(X.data, Xt.data)
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_almost_equal(X.data, Xt.data)
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
zhoulingjun/zipline | tests/serialization_cases.py | 20 | 3818 | import datetime
import pytz
import nose.tools as nt
import pandas.util.testing as tm
import pandas as pd
from zipline.finance.blotter import Blotter, Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.performance.period import PerformancePeriod
from zipline.finance.performance.position import Position
from zipline.finance.performance.tracker import PerformanceTracker
from zipline.finance.performance.position_tracker import PositionTracker
from zipline.finance.risk.cumulative import RiskMetricsCumulative
from zipline.finance.risk.period import RiskMetricsPeriod
from zipline.finance.risk.report import RiskReport
from zipline.finance.slippage import (
FixedSlippage,
Transaction,
VolumeShareSlippage
)
from zipline.protocol import Account
from zipline.protocol import Portfolio
from zipline.protocol import Position as ProtocolPosition
from zipline.finance.trading import SimulationParameters
from zipline.utils import factory
def stringify_cases(cases, func=None):
# get better test case names
results = []
if func is None:
def func(case):
return case[0].__name__
for case in cases:
new_case = list(case)
key = func(case)
new_case.insert(0, key)
results.append(new_case)
return results
sim_params_daily = SimulationParameters(
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
10000,
emission_rate='daily')
sim_params_minute = SimulationParameters(
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
10000,
emission_rate='minute')
returns = factory.create_returns_from_list(
[1.0], sim_params_daily)
def object_serialization_cases(skip_daily=False):
# Wrapped in a function to recreate DI objects.
cases = [
(Blotter, (), {}, 'repr'),
(Order, (datetime.datetime(2013, 6, 19), 8554, 100), {}, 'dict'),
(PerShare, (), {}, 'dict'),
(PerTrade, (), {}, 'dict'),
(PerDollar, (), {}, 'dict'),
(PerformancePeriod,
(10000,), {'position_tracker': PositionTracker()}, 'to_dict'),
(Position, (8554,), {}, 'dict'),
(PositionTracker, (), {}, 'dict'),
(PerformanceTracker, (sim_params_minute,), {}, 'to_dict'),
(RiskMetricsCumulative, (sim_params_minute,), {}, 'to_dict'),
(RiskMetricsPeriod,
(returns.index[0], returns.index[0], returns), {}, 'to_dict'),
(RiskReport, (returns, sim_params_minute), {}, 'to_dict'),
(FixedSlippage, (), {}, 'dict'),
(Transaction,
(8554, 10, datetime.datetime(2013, 6, 19), 100, "0000"), {},
'dict'),
(VolumeShareSlippage, (), {}, 'dict'),
(Account, (), {}, 'dict'),
(Portfolio, (), {}, 'dict'),
(ProtocolPosition, (8554,), {}, 'dict')
]
if not skip_daily:
cases.extend([
(PerformanceTracker, (sim_params_daily,), {}, 'to_dict'),
(RiskMetricsCumulative, (sim_params_daily,), {}, 'to_dict'),
(RiskReport, (returns, sim_params_daily), {}, 'to_dict'),
])
return stringify_cases(cases)
def assert_dict_equal(d1, d2):
# check keys
nt.assert_is_instance(d1, dict)
nt.assert_is_instance(d2, dict)
nt.assert_set_equal(set(d1.keys()), set(d2.keys()))
for k in d1:
v1 = d1[k]
v2 = d2[k]
asserter = nt.assert_equal
if isinstance(v1, pd.DataFrame):
asserter = tm.assert_frame_equal
if isinstance(v1, pd.Series):
asserter = tm.assert_series_equal
try:
asserter(v1, v2)
except AssertionError:
raise AssertionError('{k} is not equal'.format(k=k))
| apache-2.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| mit |
jaantollander/Pointwise-Convergence | src_legacy/io/save/metadata.py | 4 | 1616 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from time import time
import pandas as pd
import psutil
class Runtime(list):
# TODO: https://docs.python.org/3/library/time.html#time.process_time
# TODO: https://docs.python.org/3/library/time.html#time.perf_counter
def __init__(self):
self.starting_time = time()
self.previous = 0
super(Runtime, self).__init__()
def dataframe(self):
return pd.DataFrame(data=self, columns=['cumulative', 'incremental'])
def record(self):
cumulative = time() - self.starting_time
incremental = cumulative - self.previous
self.previous = cumulative
self.append([cumulative, incremental])
print('Runtime:{:0.2f} s,\t{:0.3f} s'.format(cumulative,
incremental))
class MemoryUsage(list):
def __init__(self):
self.process = psutil.Process(os.getpid())
self.previous = 0
super(MemoryUsage, self).__init__()
def dataframe(self):
return pd.DataFrame(data=self, columns=['memory_usage', 'differential'])
def record(self):
memory_usage = self.process.get_memory_info()[0] / (2 ** 20)
differential = memory_usage - self.previous
self.previous = memory_usage
self.append([memory_usage, differential])
print('Memory: {:0.2f} MB,\t{:0.8f} MB'.format(memory_usage,
differential)) | mit |
fzheng/codejam | lib/python2.7/site-packages/IPython/testing/iptestcontroller.py | 14 | 18314 | # -*- coding: utf-8 -*-
"""IPython Test Process Controller
This module runs one or more subprocesses which will actually run the IPython
test suite.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import argparse
import json
import multiprocessing.pool
import os
import stat
import re
import requests
import shutil
import signal
import sys
import subprocess
import time
from .iptest import (
have, test_group_names as py_test_group_names, test_sections, StreamCapturer,
test_for,
)
from IPython.utils.path import compress_user
from IPython.utils.py3compat import bytes_to_str
from IPython.utils.sysinfo import get_sys_info
from IPython.utils.tempdir import TemporaryDirectory
from IPython.utils.text import strip_ansi
try:
# Python >= 3.3
from subprocess import TimeoutExpired
def popen_wait(p, timeout):
return p.wait(timeout)
except ImportError:
class TimeoutExpired(Exception):
pass
def popen_wait(p, timeout):
"""backport of Popen.wait from Python 3"""
for i in range(int(10 * timeout)):
if p.poll() is not None:
return
time.sleep(0.1)
if p.poll() is None:
raise TimeoutExpired
NOTEBOOK_SHUTDOWN_TIMEOUT = 10
class TestController(object):
"""Run tests in a subprocess
"""
#: str, IPython test suite to be executed.
section = None
#: list, command line arguments to be executed
cmd = None
#: dict, extra environment variables to set for the subprocess
env = None
#: list, TemporaryDirectory instances to clear up when the process finishes
dirs = None
#: subprocess.Popen instance
process = None
#: str, process stdout+stderr
stdout = None
def __init__(self):
self.cmd = []
self.env = {}
self.dirs = []
def setup(self):
"""Create temporary directories etc.
This is only called when we know the test group will be run. Things
created here may be cleaned up by self.cleanup().
"""
pass
def launch(self, buffer_output=False, capture_output=False):
# print('*** ENV:', self.env) # dbg
# print('*** CMD:', self.cmd) # dbg
env = os.environ.copy()
env.update(self.env)
if buffer_output:
capture_output = True
self.stdout_capturer = c = StreamCapturer(echo=not buffer_output)
c.start()
stdout = c.writefd if capture_output else None
stderr = subprocess.STDOUT if capture_output else None
self.process = subprocess.Popen(self.cmd, stdout=stdout,
stderr=stderr, env=env)
def wait(self):
self.process.wait()
self.stdout_capturer.halt()
self.stdout = self.stdout_capturer.get_buffer()
return self.process.returncode
def print_extra_info(self):
"""Print extra information about this test run.
If we're running in parallel and showing the concise view, this is only
called if the test group fails. Otherwise, it's called before the test
group is started.
The base implementation does nothing, but it can be overridden by
subclasses.
"""
return
def cleanup_process(self):
"""Cleanup on exit by killing any leftover processes."""
subp = self.process
if subp is None or (subp.poll() is not None):
return # Process doesn't exist, or is already dead.
try:
print('Cleaning up stale PID: %d' % subp.pid)
subp.kill()
except: # (OSError, WindowsError) ?
# This is just a best effort, if we fail or the process was
# really gone, ignore it.
pass
else:
for i in range(10):
if subp.poll() is None:
time.sleep(0.1)
else:
break
if subp.poll() is None:
# The process did not die...
print('... failed. Manual cleanup may be required.')
def cleanup(self):
"Kill process if it's still alive, and clean up temporary directories"
self.cleanup_process()
for td in self.dirs:
td.cleanup()
__del__ = cleanup
class PyTestController(TestController):
"""Run Python tests using IPython.testing.iptest"""
#: str, Python command to execute in subprocess
pycmd = None
def __init__(self, section, options):
"""Create new test runner."""
TestController.__init__(self)
self.section = section
# pycmd is put into cmd[2] in PyTestController.launch()
self.cmd = [sys.executable, '-c', None, section]
self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
self.options = options
def setup(self):
ipydir = TemporaryDirectory()
self.dirs.append(ipydir)
self.env['IPYTHONDIR'] = ipydir.name
self.workingdir = workingdir = TemporaryDirectory()
self.dirs.append(workingdir)
self.env['IPTEST_WORKING_DIR'] = workingdir.name
# This means we won't get odd effects from our own matplotlib config
self.env['MPLCONFIGDIR'] = workingdir.name
# For security reasons (http://bugs.python.org/issue16202), use
# a temporary directory to which other users have no access.
self.env['TMPDIR'] = workingdir.name
# Add a non-accessible directory to PATH (see gh-7053)
noaccess = os.path.join(self.workingdir.name, "_no_access_")
self.noaccess = noaccess
os.mkdir(noaccess, 0)
PATH = os.environ.get('PATH', '')
if PATH:
PATH = noaccess + os.pathsep + PATH
else:
PATH = noaccess
self.env['PATH'] = PATH
# From options:
if self.options.xunit:
self.add_xunit()
if self.options.coverage:
self.add_coverage()
self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
self.cmd.extend(self.options.extra_args)
def cleanup(self):
"""
Make the non-accessible directory created in setup() accessible
again, otherwise deleting the workingdir will fail.
"""
os.chmod(self.noaccess, stat.S_IRWXU)
TestController.cleanup(self)
@property
def will_run(self):
try:
return test_sections[self.section].will_run
except KeyError:
return True
def add_xunit(self):
xunit_file = os.path.abspath(self.section + '.xunit.xml')
self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
def add_coverage(self):
try:
sources = test_sections[self.section].includes
except KeyError:
sources = ['IPython']
coverage_rc = ("[run]\n"
"data_file = {data_file}\n"
"source =\n"
" {source}\n"
).format(data_file=os.path.abspath('.coverage.'+self.section),
source="\n ".join(sources))
config_file = os.path.join(self.workingdir.name, '.coveragerc')
with open(config_file, 'w') as f:
f.write(coverage_rc)
self.env['COVERAGE_PROCESS_START'] = config_file
self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
def launch(self, buffer_output=False):
self.cmd[2] = self.pycmd
super(PyTestController, self).launch(buffer_output=buffer_output)
def prepare_controllers(options):
"""Returns two lists of TestController instances, those to run, and those
not to run."""
testgroups = options.testgroups
if not testgroups:
testgroups = py_test_group_names
controllers = [PyTestController(name, options) for name in testgroups]
to_run = [c for c in controllers if c.will_run]
not_run = [c for c in controllers if not c.will_run]
return to_run, not_run
def do_run(controller, buffer_output=True):
"""Setup and run a test controller.
If buffer_output is True, no output is displayed, to avoid it appearing
interleaved. In this case, the caller is responsible for displaying test
output on failure.
Returns
-------
controller : TestController
The same controller as passed in, as a convenience for using map() type
APIs.
exitcode : int
The exit code of the test subprocess. Non-zero indicates failure.
"""
try:
try:
controller.setup()
if not buffer_output:
controller.print_extra_info()
controller.launch(buffer_output=buffer_output)
except Exception:
import traceback
traceback.print_exc()
return controller, 1 # signal failure
exitcode = controller.wait()
return controller, exitcode
except KeyboardInterrupt:
return controller, -signal.SIGINT
finally:
controller.cleanup()
def report():
"""Return a string with a summary report of test-related variables."""
inf = get_sys_info()
out = []
def _add(name, value):
out.append((name, value))
_add('IPython version', inf['ipython_version'])
_add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
_add('IPython package', compress_user(inf['ipython_path']))
_add('Python version', inf['sys_version'].replace('\n',''))
_add('sys.executable', compress_user(inf['sys_executable']))
_add('Platform', inf['platform'])
width = max(len(n) for (n,v) in out)
out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
avail = []
not_avail = []
for k, is_avail in have.items():
if is_avail:
avail.append(k)
else:
not_avail.append(k)
if avail:
out.append('\nTools and libraries available at test time:\n')
avail.sort()
out.append(' ' + ' '.join(avail)+'\n')
if not_avail:
out.append('\nTools and libraries NOT available at test time:\n')
not_avail.sort()
out.append(' ' + ' '.join(not_avail)+'\n')
return ''.join(out)
def run_iptestall(options):
"""Run the entire IPython test suite by calling nose and trial.
This function constructs :class:`IPTester` instances for all IPython
modules and package and then runs each of them. This causes the modules
and packages of IPython to be tested each in their own subprocess using
nose.
Parameters
----------
All parameters are passed as attributes of the options object.
testgroups : list of str
Run only these sections of the test suite. If empty, run all the available
sections.
fast : int or None
Run the test suite in parallel, using n simultaneous processes. If None
is passed, one process is used per CPU core. Default 1 (i.e. sequential)
inc_slow : bool
Include slow tests. By default, these tests aren't run.
url : unicode
Address:port to use when running the JS tests.
xunit : bool
Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
coverage : bool or str
Measure code coverage from tests. True will store the raw coverage data,
or pass 'html' or 'xml' to get reports.
extra_args : list
Extra arguments to pass to the test subprocesses, e.g. '-v'
"""
to_run, not_run = prepare_controllers(options)
def justify(ltext, rtext, width=70, fill='-'):
ltext += ' '
rtext = (' ' + rtext).rjust(width - len(ltext), fill)
return ltext + rtext
# Run all test runners, tracking execution time
failed = []
t_start = time.time()
print()
if options.fast == 1:
# This actually means sequential, i.e. with 1 job
for controller in to_run:
print('Test group:', controller.section)
sys.stdout.flush() # Show in correct order when output is piped
controller, res = do_run(controller, buffer_output=False)
if res:
failed.append(controller)
if res == -signal.SIGINT:
print("Interrupted")
break
print()
else:
# Run tests concurrently
try:
pool = multiprocessing.pool.ThreadPool(options.fast)
for (controller, res) in pool.imap_unordered(do_run, to_run):
res_string = 'OK' if res == 0 else 'FAILED'
print(justify('Test group: ' + controller.section, res_string))
if res:
controller.print_extra_info()
print(bytes_to_str(controller.stdout))
failed.append(controller)
if res == -signal.SIGINT:
print("Interrupted")
break
except KeyboardInterrupt:
return
for controller in not_run:
print(justify('Test group: ' + controller.section, 'NOT RUN'))
t_end = time.time()
t_tests = t_end - t_start
nrunners = len(to_run)
nfail = len(failed)
# summarize results
print('_'*70)
print('Test suite completed for system with the following information:')
print(report())
took = "Took %.3fs." % t_tests
print('Status: ', end='')
if not failed:
print('OK (%d test groups).' % nrunners, took)
else:
# If anything went wrong, point out what command to rerun manually to
# see the actual errors and individual summary
failed_sections = [c.section for c in failed]
print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
nrunners, ', '.join(failed_sections)), took)
print()
print('You may wish to rerun these, with:')
print(' iptest', *failed_sections)
print()
if options.coverage:
from coverage import coverage, CoverageException
cov = coverage(data_file='.coverage')
cov.combine()
cov.save()
# Coverage HTML report
if options.coverage == 'html':
html_dir = 'ipy_htmlcov'
shutil.rmtree(html_dir, ignore_errors=True)
print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
sys.stdout.flush()
# Custom HTML reporter to clean up module names.
from coverage.html import HtmlReporter
class CustomHtmlReporter(HtmlReporter):
def find_code_units(self, morfs):
super(CustomHtmlReporter, self).find_code_units(morfs)
for cu in self.code_units:
nameparts = cu.name.split(os.sep)
if 'IPython' not in nameparts:
continue
ix = nameparts.index('IPython')
cu.name = '.'.join(nameparts[ix:])
# Reimplement the html_report method with our custom reporter
cov.get_data()
cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
html_title='IPython test coverage',
)
reporter = CustomHtmlReporter(cov, cov.config)
reporter.report(None)
print('done.')
# Coverage XML report
elif options.coverage == 'xml':
try:
cov.xml_report(outfile='ipy_coverage.xml')
except CoverageException as e:
print('Generating coverage report failed. Are you running javascript tests only?')
import traceback
traceback.print_exc()
if failed:
# Ensure that our exit code indicates failure
sys.exit(1)
argparser = argparse.ArgumentParser(description='Run IPython test suite')
argparser.add_argument('testgroups', nargs='*',
help='Run specified groups of tests. If omitted, run '
'all tests.')
argparser.add_argument('--all', action='store_true',
help='Include slow tests not run by default.')
argparser.add_argument('--url', help="URL to use for the JS tests.")
argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
help='Run test sections in parallel. This starts as many '
'processes as you have cores, or you can specify a number.')
argparser.add_argument('--xunit', action='store_true',
help='Produce Xunit XML results')
argparser.add_argument('--coverage', nargs='?', const=True, default=False,
help="Measure test coverage. Specify 'html' or "
"'xml' to get reports.")
argparser.add_argument('--subproc-streams', default='capture',
help="What to do with stdout/stderr from subprocesses. "
"'capture' (default), 'show' and 'discard' are the options.")
def default_options():
"""Get an argparse Namespace object with the default arguments, to pass to
:func:`run_iptestall`.
"""
options = argparser.parse_args([])
options.extra_args = []
return options
def main():
# iptest doesn't work correctly if the working directory is the
# root of the IPython source tree. Tell the user to avoid
# frustration.
if os.path.exists(os.path.join(os.getcwd(),
'IPython', 'testing', '__main__.py')):
print("Don't run iptest from the IPython source directory",
file=sys.stderr)
sys.exit(1)
# Arguments after -- should be passed through to nose. Argparse treats
# everything after -- as regular positional arguments, so we separate them
# first.
try:
ix = sys.argv.index('--')
except ValueError:
to_parse = sys.argv[1:]
extra_args = []
else:
to_parse = sys.argv[1:ix]
extra_args = sys.argv[ix+1:]
options = argparser.parse_args(to_parse)
options.extra_args = extra_args
run_iptestall(options)
if __name__ == '__main__':
main()
| mit |
qifeigit/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
cdr-stats/cdr-stats | cdr_stats/cdr/views.py | 1 | 38314 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.translation import gettext as _
from django.conf import settings
from django_lets_go.common_functions import int_convert_to_minute,\
percentage, getvar, unset_session_var, ceil_strdate
from django_lets_go.common_functions import get_pagination_vars
from user_profile.models import AccountCode
from cdr.functions_def import get_country_name, get_hangupcause_name,\
get_switch_ip_addr, calculate_act_acd
from cdr.forms import CdrSearchForm, CountryReportForm, CdrOverviewForm, \
CompareCallSearchForm, \
SwitchForm, WorldForm, EmailReportForm
# from cdr.forms import ConcurrentCallForm
from cdr.filters import get_filter_operator_int, get_filter_operator_str
from cdr.decorators import check_user_detail
from cdr.constants import CDR_COLUMN_NAME, Export_choice, COMPARE_WITH
from cdr.models import CDR
from aggregator.aggregate_cdr import custom_sql_aggr_top_country, \
custom_sql_aggr_top_hangup_last24hours, \
custom_sql_matv_voip_cdr_aggr_last24hours, \
custom_sql_aggr_top_country_last24hours
from aggregator.pandas_cdr import get_report_cdr_per_switch, \
get_report_compare_cdr, get_report_cdr_per_country
from common.helpers import trunc_date_start, trunc_date_end
from cdr.helpers import get_cdr_mail_report
from datetime import datetime
from dateutil.relativedelta import relativedelta
import tablib
import time
import logging
def show_menu(request):
"""Check if we suppose to show menu"""
try:
return request.GET.get('menu')
except:
return 'on'
@permission_required('user_profile.search', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def cdr_view(request):
"""List of CDRs
**Attributes**:
* ``template`` - cdr/list.html
* ``form`` - CdrSearchForm
**Logic Description**:
* get the call records as well as daily call analytics
from postgresql according to search parameters
"""
logging.debug('CDR View Start')
result = 1 # default min
switch_id = 0 # default all
hangup_cause_id = 0 # default all
destination, destination_type, accountcode = '', '', ''
direction, duration, duration_type = '', '', ''
caller_id_number, caller_id_number_type, country_id = '', '', ''
action = 'tabs-1'
menu = 'on'
records_per_page = settings.PAGE_SIZE
form = CdrSearchForm(request.POST or None)
if form.is_valid():
logging.debug('CDR Search View')
# set session var value
field_list = ['destination', 'result', 'destination_type', 'accountcode',
'caller_id_number', 'caller_id_number_type', 'duration',
'duration_type', 'hangup_cause_id', 'switch_id', 'direction',
'country_id', 'export_query_var']
unset_session_var(request, field_list)
from_date = getvar(request, 'from_date', setsession=False)
to_date = getvar(request, 'to_date', setsession=False)
result = getvar(request, 'result', setsession=True)
destination = getvar(request, 'destination', setsession=True)
destination_type = getvar(request, 'destination_type', setsession=True)
accountcode = getvar(request, 'accountcode', setsession=True)
caller_id_number = getvar(request, 'caller_id_number', setsession=True)
caller_id_number_type = getvar(request, 'caller_id_number_type', setsession=True)
duration = getvar(request, 'duration', setsession=True)
duration_type = getvar(request, 'duration_type', setsession=True)
direction = getvar(request, 'direction', setsession=True)
if direction and direction != 'all' and direction != '0':
request.session['session_direction'] = str(direction)
switch_id = getvar(request, 'switch_id', setsession=True)
hangup_cause_id = getvar(request, 'hangup_cause_id', setsession=True)
records_per_page = getvar(request, 'records_per_page', setsession=True)
country_id = form.cleaned_data.get('country_id')
# convert list value in int
country_id = [int(row) for row in country_id]
if len(country_id) >= 1:
request.session['session_country_id'] = country_id
start_date = ceil_strdate(str(from_date), 'start', True)
end_date = ceil_strdate(str(to_date), 'end', True)
converted_start_date = start_date.strftime('%Y-%m-%d %H:%M')
converted_end_date = end_date.strftime('%Y-%m-%d %H:%M')
request.session['session_start_date'] = converted_start_date
request.session['session_end_date'] = converted_end_date
menu = show_menu(request)
using_session = False
# Display a specific page or sort
if request.GET.get('page') or request.GET.get('sort_by'):
using_session = True
from_date = start_date = request.session.get('session_start_date')
to_date = end_date = request.session.get('session_end_date')
start_date = ceil_strdate(start_date, 'start', True)
end_date = ceil_strdate(end_date, 'end', True)
destination = request.session.get('session_destination')
destination_type = request.session.get('session_destination_type')
accountcode = request.session.get('session_accountcode')
caller_id_number = request.session.get('session_caller_id_number')
caller_id_number_type = request.session.get('session_caller_id_number_type')
duration = request.session.get('session_duration')
duration_type = request.session.get('session_duration_type')
direction = request.session.get('session_direction')
switch_id = request.session.get('session_switch_id')
hangup_cause_id = request.session.get('session_hangup_cause_id')
result = request.session.get('session_result')
records_per_page = request.session.get('session_records_per_page')
country_id = request.session['session_country_id']
# Set default cause we display page for the first time
if request.method == 'GET' and not using_session:
tday = datetime.today()
from_date = datetime(tday.year, tday.month, 1, 0, 0, 0, 0)
last_day = ((datetime(tday.year, tday.month, 1, 23, 59, 59, 999999) +
relativedelta(months=1)) -
relativedelta(days=1)).strftime('%d')
# to_date = tday.strftime('%Y-%m-' + last_day + ' 23:59')
to_date = datetime(tday.year, tday.month, int(last_day), 23, 59, 59, 999999)
start_date = ceil_strdate(str(from_date), 'start', True)
end_date = ceil_strdate(str(to_date), 'end', True)
converted_start_date = start_date.strftime('%Y-%m-%d %H:%M')
converted_end_date = end_date.strftime('%Y-%m-%d %H:%M')
request.session['session_start_date'] = converted_start_date
request.session['session_end_date'] = converted_end_date
request.session['session_result'] = 1
field_list = [
'destination', 'destination_type', 'accountcode',
'caller_id_number', 'caller_id_number_type', 'duration',
'duration_type', 'hangup_cause_id',
'switch_id', 'direction', 'country_id']
unset_session_var(request, field_list)
request.session['session_records_per_page'] = records_per_page
request.session['session_country_id'] = ''
# Define no of records per page
records_per_page = int(records_per_page)
sort_col_field_list = ['id', 'caller_id_number', 'destination_number', 'starting_date']
page_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='id')
# Build filter for CDR.object
kwargs = {}
if hangup_cause_id and hangup_cause_id != '0':
kwargs['hangup_cause_id'] = int(hangup_cause_id)
if switch_id and switch_id != '0':
kwargs['switch_id'] = int(switch_id)
if direction and direction != 'all' and direction != "0":
kwargs['direction'] = direction
if len(country_id) >= 1 and country_id[0] != 0:
kwargs['country_id__in'] = country_id
if start_date:
kwargs['starting_date__gte'] = start_date
if end_date:
kwargs['starting_date__lte'] = end_date
if destination:
operator_query = get_filter_operator_str('destination_number', destination_type)
kwargs[operator_query] = destination
if duration:
operator_query = get_filter_operator_int('duration', duration_type)
kwargs[operator_query] = duration
if caller_id_number:
operator_query = get_filter_operator_str('caller_id_number', caller_id_number_type)
kwargs[operator_query] = caller_id_number
# user are restricted to their own CDRs
if not request.user.is_superuser:
kwargs['user_id'] = request.user.id
if request.user.is_superuser and accountcode:
try:
acc = AccountCode.objects.get(accountcode=accountcode)
kwargs['user_id'] = acc.user.id
# on specific accountcode filter let only display that one
kwargs['accountcode'] = accountcode
except AccountCode.DoesNotExist:
# cannot find a user for this accountcode
pass
cdrs = CDR.objects.filter(**kwargs).order_by(page_vars['sort_order'])
page_cdr_list = cdrs[page_vars['start_page']:page_vars['end_page']]
cdr_count = cdrs.count()
logging.debug('Create cdr result')
# store query_var in session without date
export_kwargs = kwargs.copy()
if 'starting_date__gte' in export_kwargs:
export_kwargs['starting_date__gte'] = export_kwargs['starting_date__gte'].strftime('%Y-%m-%dT%H:%M:%S')
if 'starting_date__lte' in export_kwargs:
export_kwargs['starting_date__lte'] = export_kwargs['starting_date__lte'].strftime('%Y-%m-%dT%H:%M:%S')
request.session['session_export_kwargs'] = export_kwargs
form = CdrSearchForm(
initial={
'from_date': from_date,
'to_date': to_date,
'destination': destination,
'destination_type': destination_type,
'accountcode': accountcode,
'caller_id_number': caller_id_number,
'caller_id_number_type': caller_id_number_type,
'duration': duration,
'duration_type': duration_type,
'result': result,
'direction': direction,
'hangup_cause_id': hangup_cause_id,
'switch_id': switch_id,
'country_id': country_id,
'records_per_page': records_per_page
}
)
template_data = {
'page_cdr_list': page_cdr_list,
'cdrs': cdrs,
'form': form,
'cdr_count': cdr_count,
'cdr_daily_data': {},
'col_name_with_order': page_vars['col_name_with_order'],
'menu': menu,
'start_date': start_date,
'end_date': end_date,
'action': action,
'result': result,
'CDR_COLUMN_NAME': CDR_COLUMN_NAME,
'records_per_page': records_per_page,
'up_icon': '<i class="glyphicon glyphicon-chevron-up"></i>',
'down_icon': '<i class="glyphicon glyphicon-chevron-down"></i>'
}
logging.debug('CDR View End')
return render_to_response('cdr/list.html', template_data, context_instance=RequestContext(request))
@login_required
def cdr_export_to_csv(request):
"""
**Logic Description**:
Retrieve calls records from Postgresql according to search
parameters, then export the result into CSV/XLS/JSON file
"""
format_type = request.GET['format']
# get the response object, this can be used as a stream
response = HttpResponse(content_type='text/%s' % format_type)
# force download
response['Content-Disposition'] = 'attachment;filename=export.%s' % format_type
# get query_var from request.session
start_date = request.session.get('session_start_date')
end_date = request.session.get('session_end_date')
start_date = ceil_strdate(start_date, 'start', True)
end_date = ceil_strdate(end_date, 'end', True)
if request.session.get('session_export_kwargs'):
kwargs = request.session.get('session_export_kwargs')
if start_date:
kwargs['starting_date__gte'] = start_date
if end_date:
kwargs['starting_date__lte'] = end_date
cdrs = CDR.objects.filter(**kwargs)
headers = ('Call-date', 'CLID', 'Destination', 'Duration', 'Bill sec', 'Hangup cause',
'AccountCode', 'Direction')
list_val = []
for cdr in cdrs:
starting_date = str(cdr.starting_date)
list_val.append((
starting_date,
cdr.caller_id_number + '-' + cdr.caller_id_name,
cdr.destination_number,
cdr.duration,
cdr.billsec,
get_hangupcause_name(cdr.hangup_cause_id),
cdr.accountcode,
cdr.direction,
))
data = tablib.Dataset(*list_val, headers=headers)
if format_type == Export_choice.XLS:
response.write(data.xls)
elif format_type == Export_choice.CSV:
response.write(data.csv)
elif format_type == Export_choice.JSON:
response.write(data.json)
return response
@permission_required('user_profile.cdr_detail', login_url='/')
@login_required
def cdr_detail(request, cdr_id):
"""Detail of Call
**Attributes**:
* ``template`` - cdr/cdr_detail.html
**Logic Description**:
Get details from CDR through the jsonb data field
"""
menu = show_menu(request)
try:
cdr = CDR.objects.get(id=cdr_id)
except CDR.DoesNotExist:
raise Http404
data = {
'cdr': cdr,
'cdr_data': cdr.data,
'menu': menu
}
return render_to_response('cdr/cdr_detail.html',
data, context_instance=RequestContext(request))
@permission_required('user_profile.dashboard', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def cdr_dashboard(request):
"""CDR dashboard on the last 24 hours
**Attributes**:
* ``template`` - cdr/dashboard.html
* ``form`` - SwitchForm
**Logic Description**:
Display calls aggregated information for the last 24hours, several report will be
created and displayed such as hourly call report and hangup cause/country analytics.
"""
logging.debug('CDR dashboard view start')
form = SwitchForm(request.POST or None)
if form.is_valid():
logging.debug('CDR dashboard view with search option')
switch_id = int(getvar(request, 'switch_id'))
else:
switch_id = 0
# Get list of calls/duration for each of the last 24 hours
(calls_hour_aggr, total_calls, total_duration, total_billsec, total_buy_cost, total_sell_cost) = custom_sql_matv_voip_cdr_aggr_last24hours(request.user, switch_id)
# Build chart data for last 24h calls
(xdata, ydata, ydata2, ydata3, ydata4, ydata5) = ([], [], [], [], [], [])
for i in calls_hour_aggr:
start_time = (time.mktime(calls_hour_aggr[i]['calltime'].timetuple()) * 1000)
xdata.append(start_time)
ydata.append(int(calls_hour_aggr[i]['nbcalls']))
ydata2.append(int(calls_hour_aggr[i]['duration']/60))
ydata3.append(int(calls_hour_aggr[i]['billsec']/60))
ydata4.append(int(calls_hour_aggr[i]['buy_cost']))
ydata5.append(int(calls_hour_aggr[i]['sell_cost']))
tooltip_date = "%d %b %y %H:%M %p"
extra_serie1 = {"tooltip": {"y_start": "", "y_end": " calls"}, "date_format": tooltip_date}
extra_serie2 = {"tooltip": {"y_start": "", "y_end": " min"}, "date_format": tooltip_date}
extra_serie3 = {"tooltip": {"y_start": "", "y_end": " min"}, "date_format": tooltip_date}
extra_serie4 = {"tooltip": {"y_start": "", "y_end": ""}, "date_format": tooltip_date}
extra_serie5 = {"tooltip": {"y_start": "", "y_end": ""}, "date_format": tooltip_date}
kwargs1 = {}
kwargs1['bar'] = True
final_chartdata = {
'x': xdata,
'name1': 'Calls', 'y1': ydata, 'extra1': extra_serie1, 'kwargs1': kwargs1,
'name2': 'Duration', 'y2': ydata2, 'extra2': extra_serie2,
'name3': 'Billsec', 'y3': ydata3, 'extra3': extra_serie3,
'name4': 'Buy cost', 'y4': ydata4, 'extra4': extra_serie4,
'name5': 'Sell cost', 'y5': ydata5, 'extra5': extra_serie5,
}
final_charttype = "linePlusBarChart"
# Get top 5 of country calls for last 24 hours
country_data = custom_sql_aggr_top_country_last24hours(request.user, switch_id, limit=5)
# Build pie chart data for last 24h calls per country
(xdata, ydata) = ([], [])
for country in country_data:
xdata.append(get_country_name(country["country_id"]))
ydata.append(percentage(country["nbcalls"], total_calls))
color_list = ['#FFC36C', '#FFFF9D', '#BEEB9F', '#79BD8F', '#FFB391']
extra_serie = {"tooltip": {"y_start": "", "y_end": " %"}, "color_list": color_list}
country_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}
country_analytic_charttype = "pieChart"
country_extra = {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': True,
}
# Get top 10 of hangup cause calls for last 24 hours
hangup_cause_data = custom_sql_aggr_top_hangup_last24hours(request.user, switch_id)
# hangup analytic pie chart data
(xdata, ydata) = ([], [])
for hangup_cause in hangup_cause_data:
xdata.append(str(get_hangupcause_name(hangup_cause["hangup_cause_id"])))
ydata.append(str(percentage(hangup_cause["nbcalls"], total_calls)))
color_list = ['#2A343F', '#7E8282', '#EA9664', '#30998F', '#449935']
extra_serie = {"tooltip": {"y_start": "", "y_end": " %"}, "color_list": color_list}
hangup_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}
hangup_analytic_charttype = "pieChart"
hangup_extra = country_extra
logging.debug("Result calls_hour_aggr %d" % len(calls_hour_aggr))
logging.debug("Result hangup_cause_data %d" % len(hangup_cause_data))
logging.debug("Result country_data %d" % len(country_data))
# Calculate the Average Time of Call
metric_aggr = calculate_act_acd(total_calls, total_duration)
final_extra = {
'x_is_date': True,
'x_axis_format': '%H:%M',
# 'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': True,
'focus_enable': True,
}
logging.debug('CDR dashboard view end')
variables = {
'total_calls': total_calls,
'total_duration': int_convert_to_minute(total_duration),
'total_buy_cost': total_buy_cost,
'total_sell_cost': total_sell_cost,
'metric_aggr': metric_aggr,
'country_data': country_data,
'hangup_analytic': hangup_cause_data,
'form': form,
'final_chartdata': final_chartdata,
'final_charttype': final_charttype,
'final_chartcontainer': 'final_container',
'final_extra': final_extra,
'hangup_analytic_charttype': hangup_analytic_charttype,
'hangup_analytic_chartdata': hangup_analytic_chartdata,
'hangup_chartcontainer': 'hangup_piechart_container',
'hangup_extra': hangup_extra,
'country_analytic_charttype': country_analytic_charttype,
'country_analytic_chartdata': country_analytic_chartdata,
'country_chartcontainer': 'country_piechart_container',
'country_extra': country_extra,
}
return render_to_response('cdr/dashboard.html', variables, context_instance=RequestContext(request))
@permission_required('user_profile.mail_report', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def mail_report(request):
"""Mail Report Template
**Attributes**:
* ``template`` - cdr/mail_report.html
* ``form`` - MailreportForm
**Logic Description**:
get top 10 calls Postgresql & hangup / country analytic
to generate mail report
"""
logging.debug('CDR mail report view start')
msg = ''
if not hasattr(request.user, 'userprofile'):
form = None
else:
form = EmailReportForm(request.user, request.POST or None, instance=request.user.userprofile)
if form.is_valid():
form.save()
msg = _('email ids are saved successfully.')
mail_data = get_cdr_mail_report(request.user)
data = {
'yesterday_date': mail_data['yesterday_date'],
'rows': mail_data['rows'],
'form': form,
'total_duration': mail_data['total_duration'],
'total_calls': mail_data['total_calls'],
'total_buy_cost': mail_data['total_buy_cost'],
'total_sell_cost': mail_data['total_sell_cost'],
'metric_aggr': mail_data['metric_aggr'],
'country_data': mail_data['country_data'],
'hangup_cause_data': mail_data['hangup_cause_data'],
'msg': msg,
}
return render_to_response('cdr/mail_report.html', data, context_instance=RequestContext(request))
@permission_required('user_profile.daily_comparison', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_daily_comparison(request):
"""
Hourly CDR graph that compare with previous dates
**Attributes**:
* ``template`` - cdr/daily_comparison.html
* ``form`` - CompareCallSearchForm
**Logic Description**:
get the call records aggregated from the CDR table
using the materialized view and compare with other date records
# hourly_charttype = "lineWithFocusChart"
# daily_charttype = "lineWithFocusChart"
# hourly_chartdata = {'x': []}
# daily_chartdata = {'x': []}
# metric = 'nbcalls' # Default metric
"""
# Default
metric = 'nbcalls'
switch_id = 0
hourly_charttype = "multiBarChart"
hourly_chartdata = {'x': []}
compare_days = 2
compare_type = COMPARE_WITH.previous_days
today_date = datetime.today()
form = CompareCallSearchForm(request.POST or None,
initial={'from_date': today_date.strftime('%Y-%m-%d'),
'compare_days': compare_days,
'compare_type': compare_type,
'switch_id': 0})
today_date = datetime(today_date.year, today_date.month, today_date.day)
current_date = today_date
if form.is_valid():
from_date = getvar(request, 'from_date')
current_date = ceil_strdate(str(from_date), 'start')
# current_date = trunc_date_start(from_date)
switch_id = getvar(request, 'switch_id')
compare_days = int(getvar(request, 'compare_days'))
metric = getvar(request, 'metric')
kwargs = {}
if switch_id and switch_id != '0':
kwargs['switch_id'] = int(switch_id)
xdata = [i for i in range(0, 24)]
hourly_chartdata = {'x': xdata}
y_count = 1
for nday in range(1, compare_days + 1):
start_date = current_date + relativedelta(days=-int(nday-1))
start_date = datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0, 0)
end_date = current_date + relativedelta(days=-int(nday-1))
end_date = datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59, 999999)
# Get hourly Data
hourly_data = get_report_compare_cdr(request.user, 'hour', start_date, end_date, switch_id)
extra_serie = {
"tooltip": {"y_start": "", "y_end": " " + metric}
}
# We only need to set x axis once, so let's do it for nbcalls
# hourly_chartdata['x'] = hourly_data["nbcalls"]["x_timestamp"]
for switch in hourly_data[metric]["columns"]:
serie = get_switch_ip_addr(switch) + "_day_" + str(nday)
hourly_chartdata['name' + str(y_count)] = serie
hourly_chartdata['y' + str(y_count)] = hourly_data[metric]["values"][str(switch)]
hourly_chartdata['extra' + str(y_count)] = extra_serie
y_count += 1
variables = {
'form': form,
'from_date': current_date,
'metric': metric,
'compare_days': compare_days,
'hourly_charttype': hourly_charttype,
'hourly_chartdata': hourly_chartdata,
'hourly_chartcontainer': 'hourly_chartcontainer',
'hourly_extra': {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': True,
},
}
return render_to_response('cdr/daily_comparison.html', variables, context_instance=RequestContext(request))
@permission_required('user_profile.overview', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_overview(request):
"""CDR graph by hourly/daily/monthly basis
**Attributes**:
* ``template`` - cdr/overview.html
* ``form`` - CdrOverviewForm
**Logic Description**:
Get Call records from Postgresql table and build
all monthly, daily, hourly analytics
"""
# initialize variables
hourly_charttype = "lineWithFocusChart"
daily_charttype = "lineWithFocusChart"
hourly_chartdata = {'x': []}
daily_chartdata = {'x': []}
metric = 'nbcalls' # Default metric
action = 'tabs-1'
tday = datetime.today()
switch_id = 0
# assign initial value in form fields
form = CdrOverviewForm(request.POST or None,
initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
'to_date': tday.strftime('%Y-%m-%d 23:55'),
'switch_id': switch_id})
start_date = trunc_date_start(tday)
end_date = trunc_date_end(tday)
if form.is_valid():
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = trunc_date_start(from_date)
end_date = trunc_date_end(to_date)
switch_id = getvar(request, 'switch_id')
metric = getvar(request, 'metric')
# get the number of hour that diff the date
delta = end_date - start_date
hour_diff = abs(divmod(delta.days * 86400 + delta.seconds, 60)[0]) / 60
if hour_diff <= 72:
display_chart = 'hourly'
else:
display_chart = 'daily'
# check metric is valid
if metric not in ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']:
metric = 'nbcalls'
extra_serie = {
"tooltip": {"y_start": "", "y_end": " " + metric},
"date_format": "%d %b %y %H:%M%p"
}
if display_chart == 'hourly':
hourly_data = get_report_cdr_per_switch(request.user, 'hour', start_date, end_date, switch_id)
for switch in hourly_data[metric]["columns"]:
hourly_chartdata['x'] = hourly_data[metric]["x_timestamp"]
hourly_chartdata['name' + str(switch)] = get_switch_ip_addr(switch)
hourly_chartdata['y' + str(switch)] = hourly_data[metric]["values"][str(switch)]
hourly_chartdata['extra' + str(switch)] = extra_serie
total_calls = hourly_data["nbcalls"]["total"]
total_duration = hourly_data["duration"]["total"]
total_billsec = hourly_data["billsec"]["total"]
total_buy_cost = hourly_data["buy_cost"]["total"]
total_sell_cost = hourly_data["sell_cost"]["total"]
elif display_chart == 'daily':
daily_data = get_report_cdr_per_switch(request.user, 'day', start_date, end_date, switch_id)
for switch in daily_data[metric]["columns"]:
daily_chartdata['x'] = daily_data[metric]["x_timestamp"]
daily_chartdata['name' + str(switch)] = get_switch_ip_addr(switch)
daily_chartdata['y' + str(switch)] = daily_data[metric]["values"][str(switch)]
daily_chartdata['extra' + str(switch)] = extra_serie
total_calls = daily_data["nbcalls"]["total"]
total_duration = daily_data["duration"]["total"]
total_billsec = daily_data["billsec"]["total"]
total_buy_cost = daily_data["buy_cost"]["total"]
total_sell_cost = daily_data["sell_cost"]["total"]
# Calculate the Average Time of Call
metric_aggr = calculate_act_acd(total_calls, total_duration)
# Get top 10 of country calls
country_data = custom_sql_aggr_top_country(request.user, switch_id, 10, start_date, end_date)
variables = {
'action': action,
'form': form,
'display_chart': display_chart,
'start_date': start_date,
'end_date': end_date,
'metric': metric,
'hourly_chartdata': hourly_chartdata,
'hourly_charttype': hourly_charttype,
'hourly_chartcontainer': 'hourly_container',
'hourly_extra': {
'x_is_date': True,
'x_axis_format': '%d %b %y %H%p',
'tag_script_js': True,
'jquery_on_ready': True,
},
'daily_chartdata': daily_chartdata,
'daily_charttype': daily_charttype,
'daily_chartcontainer': 'daily_container',
'daily_extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': True,
},
'total_calls': total_calls,
'total_duration': total_duration,
'total_billsec': total_billsec,
'total_buy_cost': total_buy_cost,
'total_sell_cost': total_sell_cost,
'metric_aggr': metric_aggr,
'country_data': country_data,
}
return render_to_response('cdr/overview.html', variables, context_instance=RequestContext(request))
@permission_required('user_profile.by_country', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_country_report(request):
"""CDR country report
**Attributes**:
* ``template`` - cdr/country_report.html
* ``form`` - CountryReportForm
**Logic Description**:
Retrieve call records from Postgresql for all countries
and create reporting information for those countries
"""
metric = 'nbcalls'
tday = datetime.today()
switch_id = 0
hourly_charttype = "lineWithFocusChart"
hourly_chartdata = {'x': []}
country_id_list = []
total_metric = 0
# assign initial value in form fields
form = CountryReportForm(request.POST or None,
initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
'to_date': tday.strftime('%Y-%m-%d 23:55'),
'switch_id': switch_id})
start_date = trunc_date_start(tday)
end_date = trunc_date_end(tday)
if form.is_valid():
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = trunc_date_start(from_date)
end_date = trunc_date_end(to_date)
switch_id = getvar(request, 'switch_id')
metric = getvar(request, 'metric')
country_id = form.cleaned_data['country_id']
# convert list value in int
country_id_list = [int(row) for row in country_id]
# handle 0 (All) selection
if 0 in country_id_list:
country_id_list = []
# check metric is valid
if metric not in ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']:
metric = 'nbcalls'
hourly_data = get_report_cdr_per_country(request.user, 'hour', start_date, end_date, switch_id, country_id_list)
extra_serie = {
"tooltip": {"y_start": "", "y_end": " " + metric},
"date_format": "%d %b %y %H:%M%p"
}
for country in hourly_data[metric]["columns"]:
hourly_chartdata['x'] = hourly_data[metric]["x_timestamp"]
country_name = get_country_name(int(country)).encode('utf-8')
hourly_chartdata['name' + str(country)] = country_name.decode('ascii', 'ignore').replace("'", " ")
hourly_chartdata['y' + str(country)] = hourly_data[metric]["values"][str(country)]
hourly_chartdata['extra' + str(country)] = extra_serie
total_calls = hourly_data["nbcalls"]["total"]
total_duration = hourly_data["duration"]["total"]
total_billsec = hourly_data["billsec"]["total"]
total_buy_cost = hourly_data["buy_cost"]["total"]
total_sell_cost = hourly_data["sell_cost"]["total"]
# Calculate the Average Time of Call
metric_aggr = calculate_act_acd(total_calls, total_duration)
# Get top 10 of country calls
top_country = 10
country_data = custom_sql_aggr_top_country(request.user, switch_id, top_country, start_date, end_date)
# Build pie chart data for last 24h calls per country
(xdata, ydata) = ([], [])
for country in country_data:
xdata.append(get_country_name(country["country_id"]))
ydata.append(percentage(country["nbcalls"], total_calls))
color_list = ['#FFC36C', '#FFFF9D', '#BEEB9F', '#79BD8F', '#FFB391',
'#58A6A6', '#86BF30', '#F2D022', '#D9AA1E', '#D98236']
extra_serie = {"tooltip": {"y_start": "", "y_end": " %"}, "color_list": color_list}
country_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}
country_analytic_charttype = "pieChart"
country_extra = {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': True,
}
data = {
'action': 'tabs-1',
'total_metric': total_metric,
'start_date': start_date,
'end_date': end_date,
'metric': metric,
'form': form,
'NUM_COUNTRY': settings.NUM_COUNTRY,
'hourly_charttype': hourly_charttype,
'hourly_chartdata': hourly_chartdata,
'hourly_chartcontainer': 'hourly_container',
'hourly_extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': False,
},
'total_calls': total_calls,
'total_duration': total_duration,
'total_billsec': total_billsec,
'total_buy_cost': total_buy_cost,
'total_sell_cost': total_sell_cost,
'metric_aggr': metric_aggr,
'country_data': country_data,
'country_analytic_charttype': country_analytic_charttype,
'country_analytic_chartdata': country_analytic_chartdata,
'country_chartcontainer': 'country_piechart_container',
'country_extra': country_extra,
'top_country': top_country,
}
return render_to_response('cdr/country_report.html', data, context_instance=RequestContext(request))
def generate_crate(max_num):
"""
helper function to generate well distributed grates
"""
if not max_num:
max_num = 100
increment = max_num / 7
base = max_num / 10
grades = []
for i in range(0, 7):
x = i * increment
grades.append(int(base * round(float(x) / base)))
# Add the max too
grades.append(max_num)
return grades
@permission_required('user_profile.world_map', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def world_map_view(request):
"""CDR world report
**Attributes**:
* ``template`` - cdr/world_map.html
* ``form`` - WorldForm
"""
logging.debug('CDR world report view start')
action = 'tabs-1'
switch_id = 0
tday = datetime.today()
# Assign initial value in form fields
form = WorldForm(request.POST or None,
initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
'to_date': tday.strftime('%Y-%m-%d 23:55'),
'switch_id': switch_id})
start_date = trunc_date_start(tday)
end_date = trunc_date_end(tday)
if form.is_valid():
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = trunc_date_start(from_date)
end_date = trunc_date_end(to_date)
switch_id = getvar(request, 'switch_id')
# Get top 10 of country calls
top_country = 300
country_data = custom_sql_aggr_top_country(request.user, switch_id, top_country, start_date, end_date)
world_analytic_array = []
max_nbcalls = 0
for country in country_data:
if not country["country_id"]:
continue
# append data to world_analytic_array with following order
# country id|country name|call count|call duration|country_id|buy cost|sell cost
country_data = {}
country_data["country_id"] = int(country["country_id"])
country_data["country_iso3"] = get_country_name(int(country["country_id"]), type='iso3').upper()
country_data["country_name"] = get_country_name(int(country["country_id"]))
country_data["nbcalls"] = int(country["nbcalls"])
if country_data["nbcalls"] > max_nbcalls:
max_nbcalls = country_data["nbcalls"]
country_data["duration"] = int(country["duration"])
country_data["billsec"] = int(country["billsec"])
country_data["buy_cost"] = float(country["buy_cost"])
country_data["sell_cost"] = float(country["sell_cost"])
world_analytic_array.append(country_data)
max_nbcalls = int(round(max_nbcalls, -3))
call_crates = generate_crate(max_nbcalls)
variables = {
'form': form,
'start_date': start_date,
'end_date': end_date,
'world_analytic_array': world_analytic_array,
'action': action,
'call_crates': call_crates,
}
return render_to_response('cdr/world_map.html',
variables, context_instance=RequestContext(request))
| mpl-2.0 |
NifTK/NiftyNet | niftynet/engine/windows_aggregator_grid.py | 1 | 8065 | # -*- coding: utf-8 -*-
"""
windows aggregator decode sampling grid coordinates and image id from
batch data, forms image level output and write to hard drive.
"""
from __future__ import absolute_import, division, print_function
import os
from collections import OrderedDict
import numpy as np
import pandas as pd
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-branches
import niftynet.io.misc_io as misc_io
from niftynet.engine.windows_aggregator_base import ImageWindowsAggregator
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.pad import PadLayer
class GridSamplesAggregator(ImageWindowsAggregator):
"""
This class keeps record of the currently cached image,
initialised as all zeros, and the values are replaced
by image window data decoded from batch.
"""
def __init__(self,
image_reader,
name='image',
output_path=os.path.join('.', 'output'),
window_border=(),
interp_order=0,
postfix='niftynet_out',
fill_constant=0.0):
ImageWindowsAggregator.__init__(
self, image_reader=image_reader, output_path=output_path)
self.name = name
self.image_out = None
self.csv_out = None
self.window_border = window_border
self.output_interp_order = interp_order
self.postfix = postfix
self.fill_constant = fill_constant
def decode_batch(self, window, location):
"""
Function used to save multiple outputs listed in the window
dictionary. For the fields that have the keyword 'window' in the
dictionary key, it will be saved as image. The rest will be saved as
csv. CSV files will contain at saving a first line of 0 (to be
changed into the header by the user), the first column being the
index of the window, followed by the list of output and the location
array for each considered window
:param window: dictionary of output
:param location: location of the input
:return:
"""
n_samples = location.shape[0]
location_cropped = {}
for key in window:
if 'window' in key: # all outputs to be created as images should
# contained the keyword "window"
window[key], location_cropped[key] = self.crop_batch(
window[key], location, self.window_border)
for batch_id in range(n_samples):
image_id = location[batch_id, 0]
if image_id != self.image_id:
# image name changed:
# save current result and create an empty result file
self._save_current_image()
self._save_current_csv()
if self._is_stopping_signal(location[batch_id]):
return False
self.image_out, self.csv_out = {}, {}
for key in window:
if 'window' in key:
# to be saved as image
self.image_out[key] = self._initialise_empty_image(
image_id=image_id,
n_channels=window[key].shape[-1],
dtype=window[key].dtype)
else:
# to be saved as csv file
n_elements = np.int64(
np.asarray(window[key]).size / n_samples)
table_header = [
'{}_{}'.format(key, idx)
for idx in range(n_elements)
] if n_elements > 1 else ['{}'.format(key)]
table_header += [
'coord_{}'.format(idx)
for idx in range(location.shape[-1])
]
self.csv_out[key] = self._initialise_empty_csv(
key_names=table_header)
for key in window:
if 'window' in key:
x_start, y_start, z_start, x_end, y_end, z_end = \
location_cropped[key][batch_id, 1:]
self.image_out[key][
x_start:x_end, y_start:y_end, z_start:z_end, ...] = \
window[key][batch_id, ...]
else:
window[key] = np.asarray(window[key]).reshape(
[n_samples, -1])
window_save = window[key][batch_id:batch_id + 1, :]
window_loc = location[batch_id:batch_id + 1, :]
csv_row = np.concatenate([window_save, window_loc], 1)
csv_row = csv_row.ravel()
key_names = self.csv_out[key].columns
self.csv_out[key] = self.csv_out[key].append(
OrderedDict(zip(key_names, csv_row)),
ignore_index=True)
return True
def _initialise_empty_image(self, image_id, n_channels, dtype=np.float):
"""
Initialise an empty image in which to populate the output
:param image_id: image_id to be used in the reader
:param n_channels: numbers of channels of the saved output (for
multimodal output)
:param dtype: datatype used for the saving
:return: the initialised empty image
"""
self.image_id = image_id
spatial_shape = self.input_image[self.name].shape[:3]
output_image_shape = spatial_shape + (n_channels, )
empty_image = np.zeros(output_image_shape, dtype=dtype)
for layer in self.reader.preprocessors:
if isinstance(layer, PadLayer):
empty_image, _ = layer(empty_image)
if self.fill_constant != 0.0:
empty_image[:] = self.fill_constant
return empty_image
def _initialise_empty_csv(self, key_names):
"""
Initialise a csv output file with a first line of zeros
:param n_channel: number of saved fields
:return: empty first line of the array to be saved as csv
"""
return pd.DataFrame(columns=key_names)
def _save_current_image(self):
"""
For all the outputs to be saved as images, go through the dictionary
and save the resulting output after reversing the initial preprocessing
:return:
"""
if self.input_image is None:
return
for layer in reversed(self.reader.preprocessors):
if isinstance(layer, PadLayer):
for i in self.image_out:
self.image_out[i], _ = layer.inverse_op(self.image_out[i])
if isinstance(layer, DiscreteLabelNormalisationLayer):
for i in self.image_out:
self.image_out[i], _ = layer.inverse_op(self.image_out[i])
subject_name = self.reader.get_subject_id(self.image_id)
for i in self.image_out:
filename = "{}_{}_{}.nii.gz".format(i, subject_name, self.postfix)
source_image_obj = self.input_image[self.name]
misc_io.save_data_array(self.output_path, filename,
self.image_out[i], source_image_obj,
self.output_interp_order)
self.log_inferred(subject_name, filename)
return
def _save_current_csv(self):
"""
For all output to be saved as csv, loop through the dictionary of
output and create the csv
:return:
"""
if self.input_image is None:
return
subject_name = self.reader.get_subject_id(self.image_id)
for i in self.csv_out:
filename = "{}_{}_{}.csv".format(i, subject_name, self.postfix)
misc_io.save_csv_array(self.output_path, filename, self.csv_out[i])
self.log_inferred(subject_name, filename)
return
| apache-2.0 |
Akshay0724/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
mrshu/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 2 | 7071 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.utils.testing import assert_less
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=True)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
#X_pred2 = kpca.inverse_transform(X_pred_transformed)
#assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
"""Test the linear separability of the first 2D KPCA transform"""
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
shikhardb/scikit-learn | sklearn/preprocessing/tests/test_data.py | 8 | 31730 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "assumes floating point values as input, got uint8"
clean_warning_registry()
assert_warns_message(UserWarning, w, scale, X)
assert_warns_message(UserWarning, w, StandardScaler().fit, X)
assert_warns_message(UserWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
ueshin/apache-spark | python/pyspark/pandas/tests/plot/test_series_plot.py | 15 | 4133 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.plot import PandasOnSparkPlotAccessor, BoxPlotBase
from pyspark.testing.pandasutils import have_plotly, plotly_requirement_message
class SeriesPlotTest(unittest.TestCase):
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@unittest.skipIf(not have_plotly, plotly_requirement_message)
def test_plot_backends(self):
plot_backend = "plotly"
with ps.option_context("plotting.backend", plot_backend):
self.assertEqual(ps.options.plotting.backend, plot_backend)
module = PandasOnSparkPlotAccessor._get_plot_backend(plot_backend)
self.assertEqual(module.__name__, "pyspark.pandas.plot.plotly")
def test_plot_backends_incorrect(self):
fake_plot_backend = "none_plotting_module"
with ps.option_context("plotting.backend", fake_plot_backend):
self.assertEqual(ps.options.plotting.backend, fake_plot_backend)
with self.assertRaises(ValueError):
PandasOnSparkPlotAccessor._get_plot_backend(fake_plot_backend)
def test_box_summary(self):
def check_box_summary(psdf, pdf):
k = 1.5
stats, fences = BoxPlotBase.compute_stats(psdf["a"], "a", whis=k, precision=0.01)
outliers = BoxPlotBase.outliers(psdf["a"], "a", *fences)
whiskers = BoxPlotBase.calc_whiskers("a", outliers)
fliers = BoxPlotBase.get_fliers("a", outliers, whiskers[0])
expected_mean = pdf["a"].mean()
expected_median = pdf["a"].median()
expected_q1 = np.percentile(pdf["a"], 25)
expected_q3 = np.percentile(pdf["a"], 75)
iqr = expected_q3 - expected_q1
expected_fences = (expected_q1 - k * iqr, expected_q3 + k * iqr)
pdf["outlier"] = ~pdf["a"].between(fences[0], fences[1])
expected_whiskers = (
pdf.query("not outlier")["a"].min(),
pdf.query("not outlier")["a"].max(),
)
expected_fliers = pdf.query("outlier")["a"].values
self.assertEqual(expected_mean, stats["mean"])
self.assertEqual(expected_median, stats["med"])
self.assertEqual(expected_q1, stats["q1"] + 0.5)
self.assertEqual(expected_q3, stats["q3"] - 0.5)
self.assertEqual(expected_fences[0], fences[0] + 2.0)
self.assertEqual(expected_fences[1], fences[1] - 2.0)
self.assertEqual(expected_whiskers[0], whiskers[0])
self.assertEqual(expected_whiskers[1], whiskers[1])
self.assertEqual(expected_fliers, fliers)
check_box_summary(self.psdf1, self.pdf1)
check_box_summary(-self.psdf1, -self.pdf1)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_series_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.