parent
b7bcb629db
commit
84adfb5a96
@ -1,18 +1,44 @@
|
|||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
import sys
|
||||||
|
sys.path.append('./back/')
|
||||||
|
|
||||||
|
import managing_missing_values as mmv
|
||||||
|
import load_csv as lc
|
||||||
|
|
||||||
|
if 'original_df' in st.session_state:
|
||||||
|
df = st.session_state.original_df
|
||||||
|
|
||||||
st.write("# 🧼 Data cleaning")
|
st.write("# 🧼 Data cleaning")
|
||||||
|
|
||||||
st.write("## Missing data")
|
st.write("## Missing data")
|
||||||
rm_empty_rows_or_cols = st.checkbox("Remove empty rows or columns", True)
|
rm_empty_rows_or_cols = st.checkbox("Remove empty rows or columns", True)
|
||||||
|
|
||||||
|
|
||||||
st.write("#### Replace missing values")
|
st.write("#### Replace missing values")
|
||||||
replace_methods = ["Mean","Median","Mode","KNN","Regression"]
|
replace_methods = ["mean","median","mode","knn","regression"]
|
||||||
replace_method = st.radio('Choose an option:', replace_methods)
|
replace_method = st.radio('Choose an option:', replace_methods)
|
||||||
|
|
||||||
st.write("## Normalize data")
|
st.write("## Normalize data")
|
||||||
normalize_methods = ["Min-Max","Z-Score","Another One"]
|
normalize_methods = ["min-max","z-score","robust"]
|
||||||
normalize_method = st.radio('Choose an option:', normalize_methods)
|
normalize_method = st.radio('Choose an option:', normalize_methods)
|
||||||
|
|
||||||
if st.button("Clean dataset"):
|
is_cleaned = st.button("Clean dataset")
|
||||||
# TODO: Actual processing
|
if is_cleaned:
|
||||||
st.write("TODO")
|
if rm_empty_rows_or_cols:
|
||||||
|
st.write("- Removing hight null percentage values")
|
||||||
|
df = mmv.drop_high_null_percentage(df)
|
||||||
|
st.dataframe(df)
|
||||||
|
|
||||||
|
st.write("- Handle missing values with method:", replace_method)
|
||||||
|
df = mmv.handle_missing_values(df, replace_method)
|
||||||
|
st.session_state.df = df
|
||||||
|
st.dataframe(df)
|
||||||
|
|
||||||
|
st.write("- Normalize with method:", normalize_method)
|
||||||
|
df = lc.handle_normalization(df, normalize_method)
|
||||||
|
st.session_state.df = df
|
||||||
|
st.dataframe(df)
|
||||||
|
|
||||||
|
st.switch_page("pages/visualize.py")
|
||||||
|
else:
|
||||||
|
st.write("Please upload you dataset.")
|
||||||
|
@ -0,0 +1,41 @@
|
|||||||
|
import streamlit as st
|
||||||
|
import pandas as pd
|
||||||
|
import sys
|
||||||
|
sys.path.append('./back/')
|
||||||
|
|
||||||
|
import clustering_csv as cc
|
||||||
|
import prediction as p
|
||||||
|
|
||||||
|
if 'df' in st.session_state:
|
||||||
|
|
||||||
|
df = st.session_state.df
|
||||||
|
df_cols = df.columns.tolist()
|
||||||
|
|
||||||
|
st.write("# 🔮 Prediction")
|
||||||
|
|
||||||
|
if st.button("K-means"):
|
||||||
|
st.pyplot(cc.launch_cluster_knn(df, ["Route Type", "Traffic Control"]))
|
||||||
|
|
||||||
|
if st.button("DBSCAN"):
|
||||||
|
st.pyplot(cc.launch_cluster_dbscan(df, ["Route Type", "Traffic Control"]))
|
||||||
|
|
||||||
|
if st.button("Linear Regression"):
|
||||||
|
col = "Route Type"
|
||||||
|
df_cols.remove(col)
|
||||||
|
original_col = df[col]
|
||||||
|
predicted_col = p.getColumnsForPredictionAndPredict(df, df_cols, "Route Type", "Linear Regression")
|
||||||
|
|
||||||
|
if st.button("Random Forest"):
|
||||||
|
col = "Route Type"
|
||||||
|
df_cols.remove(col)
|
||||||
|
original_col = df[col]
|
||||||
|
predicted_col = p.getColumnsForPredictionAndPredict(df, df_cols, "Route Type", "Random Forest")
|
||||||
|
|
||||||
|
ndf = pd.DataFrame()
|
||||||
|
ndf['Original'] = original_col
|
||||||
|
ndf['Predicted'] = predicted_col
|
||||||
|
|
||||||
|
st.dataframe(ndf)
|
||||||
|
|
||||||
|
else:
|
||||||
|
st.write("Please clean your dataset.")
|
Loading…
Reference in new issue