Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -16,18 +16,53 @@ if ("0" == "mycustom"):
|
|
16 |
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
17 |
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
zip_file
|
23 |
-
|
24 |
-
|
|
|
25 |
|
26 |
-
if ("0"
|
27 |
mybacklogmax = 10000
|
28 |
df = df.head(n=mybacklogmax)
|
29 |
st.dataframe(df)
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
#%%
|
32 |
|
33 |
title = "Timeseries forecasting for weather prediction"
|
@@ -85,18 +120,29 @@ learning_rate = 0.001
|
|
85 |
batch_size = 256
|
86 |
epochs = 10
|
87 |
|
|
|
|
|
|
|
|
|
88 |
|
89 |
def normalize(data, train_split):
|
90 |
data_mean = data[:train_split].mean(axis=0)
|
91 |
data_std = data[:train_split].std(axis=0)
|
92 |
return (data - data_mean) / data_std
|
93 |
|
94 |
-
|
95 |
print(
|
96 |
"The selected parameters are:",
|
97 |
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
|
98 |
)
|
99 |
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
features = df[selected_features]
|
101 |
features.index = df[date_time_key]
|
102 |
features.head()
|
@@ -119,6 +165,10 @@ learning_rate = 0.001
|
|
119 |
batch_size = 256
|
120 |
epochs = 10
|
121 |
|
|
|
|
|
|
|
|
|
122 |
|
123 |
def normalize(data, train_split):
|
124 |
data_mean = data[:train_split].mean(axis=0)
|
@@ -129,6 +179,14 @@ print(
|
|
129 |
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
|
130 |
)
|
131 |
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
features = df[selected_features]
|
133 |
features.index = df[date_time_key]
|
134 |
features.head()
|
|
|
16 |
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
17 |
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
18 |
|
19 |
+
if ("0" == "mycustom"):
|
20 |
+
uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"
|
21 |
+
zip_path = keras.utils.get_file(origin=uri, fname="jena_climate_2009_2016.csv.zip")
|
22 |
+
zip_file = ZipFile(zip_path)
|
23 |
+
zip_file.extractall()
|
24 |
+
csv_path = "jena_climate_2009_2016.csv"
|
25 |
+
df = pd.read_csv(csv_path)
|
26 |
|
27 |
+
if ("0" == "mycustom"):
|
28 |
mybacklogmax = 10000
|
29 |
df = df.head(n=mybacklogmax)
|
30 |
st.dataframe(df)
|
31 |
|
32 |
+
if ("0" == "mycustom"):
|
33 |
+
myfields = [0, 1, 5, 7, 8, 10, 11]
|
34 |
+
myfields = [1]
|
35 |
+
mytitles = ["Date Time","p (mbar)","T (degC)","Tpot (K)","Tdew (degC)","rh (%)","VPmax (mbar)","VPact (mbar)","VPdef (mbar)","sh (g/kg)","H2OC (mmol/mol)","rho (g/m**3)","wv (m/s)","max. wv (m/s)","wd (deg)"]
|
36 |
+
|
37 |
+
mybacklogmax = 10
|
38 |
+
|
39 |
+
atoday = datetime.date.today()
|
40 |
+
|
41 |
+
ayear = int(atoday.strftime("%Y"))-0
|
42 |
+
amonth = int(atoday.strftime("%m"))
|
43 |
+
amonthday = int(atoday.strftime("%d"))
|
44 |
+
|
45 |
+
csvString = ""
|
46 |
+
csvString += (",").join(mytitles)
|
47 |
+
adf = pd.DataFrame(columns=mytitles)
|
48 |
+
for i in range((ayear-mybacklogmax),ayear,1):
|
49 |
+
alink = ("https://data.weather.gov.hk/weatherAPI/opendata/opendata.php?dataType=CLMTEMP&year={}&rformat=csv&station=HKO").format(str(i))
|
50 |
+
df = pd.read_csv(alink, skiprows=[0,1,2], skipfooter=3, engine='python', on_bad_lines='skip')
|
51 |
+
|
52 |
+
df = df.reset_index() # make sure indexes pair with number of rows
|
53 |
+
for index, row in df.iterrows():
|
54 |
+
if (row[2]!=amonth) or (row[3]!=amonthday):
|
55 |
+
continue
|
56 |
+
|
57 |
+
adate = ("{:02d}.{:02d}.{} 00:00:00").format(row[3], row[2], row[1])
|
58 |
+
csvString += '\n'+(",").join([adate,"",str(row[4]),"","","","","","","","","","","",""])
|
59 |
+
st.write(row[0],adate)
|
60 |
+
adf = adf.append({"Date Time":adate,"T (degC)":(row[4]),}, ignore_index=True)
|
61 |
+
break
|
62 |
+
adf = pd.read_csv(StringIO(csvString), sep=",")
|
63 |
+
df = adf
|
64 |
+
st.dataframe(df)
|
65 |
+
|
66 |
#%%
|
67 |
|
68 |
title = "Timeseries forecasting for weather prediction"
|
|
|
120 |
batch_size = 256
|
121 |
epochs = 10
|
122 |
|
123 |
+
if ("0" != "mycustom"):
|
124 |
+
past = 10
|
125 |
+
future = 1
|
126 |
+
batch_size = 1
|
127 |
|
128 |
def normalize(data, train_split):
|
129 |
data_mean = data[:train_split].mean(axis=0)
|
130 |
data_std = data[:train_split].std(axis=0)
|
131 |
return (data - data_mean) / data_std
|
132 |
|
|
|
133 |
print(
|
134 |
"The selected parameters are:",
|
135 |
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
|
136 |
)
|
137 |
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
|
138 |
+
|
139 |
+
if ("0" != "mycustom"):
|
140 |
+
print(
|
141 |
+
"The selected parameters are:",
|
142 |
+
", ".join([titles[i] for i in myfields]),
|
143 |
+
)
|
144 |
+
selected_features = [feature_keys[i] for i in myfields]
|
145 |
+
|
146 |
features = df[selected_features]
|
147 |
features.index = df[date_time_key]
|
148 |
features.head()
|
|
|
165 |
batch_size = 256
|
166 |
epochs = 10
|
167 |
|
168 |
+
if ("0" != "mycustom"):
|
169 |
+
past = 10
|
170 |
+
future = 1
|
171 |
+
batch_size = 1
|
172 |
|
173 |
def normalize(data, train_split):
|
174 |
data_mean = data[:train_split].mean(axis=0)
|
|
|
179 |
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
|
180 |
)
|
181 |
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
|
182 |
+
|
183 |
+
if ("0" != "mycustom"):
|
184 |
+
print(
|
185 |
+
"The selected parameters are:",
|
186 |
+
", ".join([titles[i] for i in myfields]),
|
187 |
+
)
|
188 |
+
selected_features = [feature_keys[i] for i in myfields]
|
189 |
+
|
190 |
features = df[selected_features]
|
191 |
features.index = df[date_time_key]
|
192 |
features.head()
|