Spaces:
Sleeping
Sleeping
File size: 6,729 Bytes
349c960 1a57d8f 349c960 1a57d8f 349c960 1a57d8f 349c960 1a57d8f 349c960 1a57d8f 349c960 1a57d8f 349c960 1a57d8f 349c960 1a57d8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
# ------------------------ Libraries --------------------------
import os
import pandas as pd
import streamlit as st
import plotly.graph_objs as go
import logging
import subprocess
import threading
from dotenv import load_dotenv
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
# ------------------------ Environment Variables --------------------------
load_dotenv()
log_folder = os.getenv("LOG_FOLDER")
# Logging
log_folder = os.getenv("LOG_STREAMLIT")
os.makedirs(log_folder, exist_ok=True)
log_file = os.path.join(log_folder, "front.log")
log_format = "%(asctime)s [%(levelname)s] - %(message)s"
logging.basicConfig(filename=log_file, level=logging.INFO, format=log_format)
logging.info("Streamlit app has started")
# Create output folder if it doesn't exist
if not os.path.exists("output"):
os.makedirs("output")
#-------------------------------------back----------------------------------
def safe_read_csv(file_path, sep=','):
if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
return pd.read_csv(file_path, sep=sep)
else:
logging.warning(f"File {file_path} is empty or does not exist.")
return pd.DataFrame() # return an empty DataFrame
# etherscan
## Load the data from the CSV files
df_etherscan = pd.DataFrame()
for filename in os.listdir('output'):
if filename.endswith('.csv') and 'transactions_' in filename:
df_temp = safe_read_csv(os.path.join('output', filename), sep=',')
df_etherscan = pd.concat([df_etherscan, df_temp], ignore_index=True)
# CMC
## Load cmc data
df_cmc = safe_read_csv("output/top_100_update.csv", sep=',')
df_cmc = df_cmc[df_cmc["last_updated"] == df_cmc["last_updated"].max()]
# Function to execute the scraping functions
def execute_etherscan_scraping():
subprocess.call(["python", "utils/scrap_etherscan.py"])
logging.info("Etherscan scraping completed")
threading.Timer(3600, execute_etherscan_scraping).start()
# Function to execute the scraping functions
def execute_cmc_scraping():
subprocess.call(["python", "utils/scrap_cmc.py"])
logging.info("CMC scraping completed")
threading.Timer(2592000 / 9000, execute_cmc_scraping).start()
if "initialized" not in st.session_state:
# Start the scraping threads
threading.Thread(target=execute_etherscan_scraping).start()
threading.Thread(target=execute_cmc_scraping).start()
st.session_state["initialized"] = True
#-------------------------------------streamlit ----------------------------------
# Set the title and other page configurations
st.title('Crypto Analysis')
# Create two columns for the two plots
col1, col2 = st.columns(2)
with st.container():
with col1:
# etherscan
selected_token = st.selectbox('Select Token', df_etherscan['tokenSymbol'].unique(), index=0)
# Filter the data based on the selected token
filtered_df = df_etherscan[df_etherscan['tokenSymbol'] == selected_token]
# Plot the token value over time
st.plotly_chart(
go.Figure(
data=[
go.Scatter(
x=filtered_df['timeStamp'],
y=filtered_df['value'],
mode='lines',
name='Value over time'
)
],
layout=go.Layout(
title='Token Value Over Time',
yaxis=dict(
title=f'Value ({selected_token})',
),
showlegend=True,
legend=go.layout.Legend(x=0, y=1.0),
margin=go.layout.Margin(l=40, r=0, t=40, b=30),
width=500,
height=500
)
)
)
with col2:
# cmc
selected_var = st.selectbox('Select Token', ["percent_change_24h","percent_change_7d","percent_change_90d"], index=0)
# Sort the DataFrame by the 'percent_change_24h' column in ascending order
df_sorted = df_cmc.sort_values(by=selected_var, ascending=False)
# Select the top 10 and worst 10 rows
top_10 = df_sorted.head(10)
worst_10 = df_sorted.tail(10)
# Combine the top and worst dataframes for plotting
combined_df = pd.concat([top_10, worst_10], axis=0)
max_abs_val = max(abs(combined_df[selected_var].min()), abs(combined_df[selected_var].max()))
# Create a bar plot for the top 10 with a green color scale
fig = go.Figure(data=[
go.Bar(
x=top_10["symbol"],
y=top_10[selected_var],
marker_color='rgb(0,100,0)', # Green color for top 10
hovertext= "Name : "+top_10["name"].astype(str)+ '<br>' +
selected_var + " : " + top_10["percent_tokens_circulation"].astype(str) + '<br>' +
'Market Cap: ' + top_10["market_cap"].astype(str) + '<br>' +
'Fully Diluted Market Cap: ' + top_10["fully_diluted_market_cap"].astype(str) + '<br>' +
'Last Updated: ' + top_10["last_updated"].astype(str),
name="top_10"
)
])
# Add the worst 10 to the same plot with a red color scale
fig.add_traces(go.Bar(
x=worst_10["symbol"],
y=worst_10[selected_var],
marker_color='rgb(255,0,0)', # Red color for worst 10
hovertext="Name:"+worst_10["name"].astype(str)+ '<br>' +
selected_var + " : " + worst_10["percent_tokens_circulation"].astype(str) + '<br>' +
'Market Cap: ' + worst_10["market_cap"].astype(str) + '<br>' +
'Fully Diluted Market Cap: ' + worst_10["fully_diluted_market_cap"].astype(str) + '<br>' +
'Last Updated: ' + worst_10["last_updated"].astype(str),
name="worst_10"
)
)
# Customize aspect
fig.update_traces(marker_line_color='rgb(8,48,107)', marker_line_width=1.5, opacity=0.8)
fig.update_layout(title_text=f'Top 10 and Worst 10 by {selected_var.split("_")[-1]} Percentage Change')
fig.update_xaxes(categoryorder='total ascending')
fig.update_layout(
autosize=False,
width=500,
height=500,
margin=dict(
l=50,
r=50,
b=100,
t=100,
pad=4
),
#paper_bgcolor="LightSteelBlue",
)
st.plotly_chart(fig)
#-------------------------------------end ----------------------------------
|