Compare commits

..

11 Commits

Author SHA1 Message Date
gorky1234 f464f6166a corection bug figsize
continuous-integration/drone/push Build is passing Details
10 months ago
Clément FRÉVILLE 3038bd9841 Merge pull request 'Use cluster strategies and support PCA' (#15) from clustering-strategy into main
continuous-integration/drone/push Build is passing Details
10 months ago
Clément FRÉVILLE 7cb0d55969 Allow using PCA to reduce dataset dimensions
continuous-integration/drone/push Build is passing Details
10 months ago
Clément FRÉVILLE 01ef19a2f8 Merge files using strategies
continuous-integration/drone/push Build is passing Details
10 months ago
Bastien OLLIER 86bd285193 Merge pull request 'stat_prediction' (#14) from stat_prediction into main
continuous-integration/drone/push Build is passing Details
10 months ago
Bastien OLLIER 9bc9e21e45 add r2 score
continuous-integration/drone/push Build is passing Details
10 months ago
Bastien OLLIER da1e97f07f add r2 score
continuous-integration/drone/push Build is passing Details
10 months ago
Bastien OLLIER 27e69b2af8 add confusion_matrix
continuous-integration/drone/push Build is passing Details
10 months ago
bastien 4054395641 update
continuous-integration/drone/push Build is failing Details
10 months ago
bastien 01168f3588 add visu to prediction regression
continuous-integration/drone/push Build is failing Details
10 months ago
Bastien OLLIER 9da6e2d594 Add cluster stats (#13)
continuous-integration/drone/push Build is passing Details
10 months ago

2
.gitignore vendored

@ -1,2 +1,2 @@
__pycache__ __pycache__
*/myenv .venv

@ -1,45 +0,0 @@
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
def perform_classification(data, data_name, target_name, test_size):
X = data[data_name]
y = data[target_name]
label_encoders = {}
for column in X.select_dtypes(include=['object']).columns:
le = LabelEncoder()
X[column] = le.fit_transform(X[column])
label_encoders[column] = le
if y.dtype == 'object':
le = LabelEncoder()
y = le.fit_transform(y)
label_encoders[target_name] = le
else:
if y.nunique() > 10:
raise ValueError("The target variable seems to be continuous. Please select a categorical target for classification.")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
return model, label_encoders, accuracy
def make_prediction(model, label_encoders, data_name, target_name, input_values):
X_new = []
for feature, value in zip(data_name, input_values):
if feature in label_encoders:
value = label_encoders[feature].transform([value])[0]
X_new.append(value)
prediction = model.predict([X_new])
if target_name in label_encoders:
prediction = label_encoders[target_name].inverse_transform(prediction)
return prediction[0]

@ -1,16 +0,0 @@
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
def perform_dbscan_clustering(data, data_name, eps, min_samples):
x = data[data_name].to_numpy()
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
y_dbscan = dbscan.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_dbscan, s=50, cmap="viridis")
return fig

@ -1,20 +0,0 @@
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter):
x = data[data_name].to_numpy()
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
y_kmeans = kmeans.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X")
return fig

@ -1,18 +0,0 @@
from sklearn.linear_model import LinearRegression
def perform_regression(data, data_name, target_name):
X = data[data_name]
y = data[target_name]
if not isinstance(y.iloc[0], (int, float)):
raise ValueError("The target variable should be numeric (continuous) for regression.")
model = LinearRegression()
model.fit(X, y)
return model
def make_prediction(model, feature_names, input_values):
prediction = model.predict([input_values])
return prediction[0]

@ -1,16 +0,0 @@
import matplotlib.pyplot as plt
import seaborn as sns
def plot_histogram(data, column):
fig, ax = plt.subplots()
ax.hist(data[column].dropna(), bins=20, edgecolor='k')
ax.set_title(f"Histogram of {column}")
ax.set_xlabel(column)
ax.set_ylabel("Frequency")
return fig
def plot_boxplot(data, column):
fig, ax = plt.subplots()
sns.boxplot(data=data, x=column, ax=ax)
ax.set_title(f"Boxplot of {column}")
return fig

@ -0,0 +1,83 @@
from sklearn.cluster import DBSCAN, KMeans
import numpy as np
from dataclasses import dataclass
from abc import ABC, abstractmethod
from typing import Any, Optional
@dataclass
class ClusterResult:
labels: np.array
centers: Optional[np.array]
statistics: list[dict[str, Any]]
class Cluster(ABC):
@abstractmethod
def run(self, data: np.array) -> ClusterResult:
pass
class DBSCANCluster(Cluster):
def __init__(self, eps: float = 0.5, min_samples: int = 5):
self.eps = eps
self.min_samples = min_samples
#@typing.override
def run(self, data: np.array) -> ClusterResult:
dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples)
labels = dbscan.fit_predict(data)
return ClusterResult(labels, None, self.get_statistics(data, labels))
def get_statistics(self, data: np.array, labels: np.array) -> list[dict[str, Any]]:
unique_labels = np.unique(labels)
stats = []
for label in unique_labels:
if label == -1:
continue
cluster_points = data[labels == label]
num_points = len(cluster_points)
density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod()
stats.append({
"cluster": label,
"num_points": num_points,
"density": density
})
return stats
def __str__(self) -> str:
return "DBScan"
class KMeansCluster(Cluster):
def __init__(self, n_clusters: int = 8, n_init: int = 1, max_iter: int = 300):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
#@typing.override
def run(self, data: np.array) -> ClusterResult:
kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111)
labels = kmeans.fit_predict(data)
centers = kmeans.cluster_centers_
return ClusterResult(labels, centers, self.get_statistics(data, labels, centers))
def get_statistics(self, data: np.array, labels: np.array, centers: np.array) -> list[dict[str, Any]]:
unique_labels = np.unique(labels)
stats = []
for label in unique_labels:
cluster_points = data[labels == label]
num_points = len(cluster_points)
center = centers[label]
stats.append({
"cluster": label,
"num_points": num_points,
"center": center,
})
return stats
def __str__(self) -> str:
return "KMeans"
CLUSTERING_STRATEGIES = [DBSCANCluster(), KMeansCluster()]

@ -0,0 +1,86 @@
import streamlit as st
import matplotlib.pyplot as plt
from clusters import DBSCANCluster, KMeansCluster, CLUSTERING_STRATEGIES
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
import numpy as np
st.header("Clustering")
if "data" in st.session_state:
data = st.session_state.data
general_row = st.columns([1, 1, 1])
clustering = general_row[0].selectbox("Clustering method", CLUSTERING_STRATEGIES)
data_name = general_row[1].multiselect("Columns", data.select_dtypes(include="number").columns)
n_components = general_row[2].number_input("Reduce dimensions to (PCA)", min_value=1, max_value=3, value=2)
with st.form("cluster_form"):
if isinstance(clustering, KMeansCluster):
row1 = st.columns([1, 1, 1])
clustering.n_clusters = row1[0].number_input("Number of clusters", min_value=1, max_value=data.shape[0], value=clustering.n_clusters)
clustering.n_init = row1[1].number_input("n_init", min_value=1, value=clustering.n_init)
clustering.max_iter = row1[2].number_input("max_iter", min_value=1, value=clustering.max_iter)
elif isinstance(clustering, DBSCANCluster):
row1 = st.columns([1, 1])
clustering.eps = row1[0].slider("eps", min_value=0.0001, max_value=1.0, step=0.05, value=clustering.eps)
clustering.min_samples = row1[1].number_input("min_samples", min_value=1, value=clustering.min_samples)
st.form_submit_button("Launch")
if len(data_name) > 0:
x = data[data_name].to_numpy()
n_components = min(n_components, len(data_name))
if len(data_name) > n_components:
pca = PCA(n_components)
x = pca.fit_transform(x)
if n_components == 2:
(fig, ax) = plt.subplots(figsize=(8, 8))
for i in range(0, pca.components_.shape[1]):
ax.arrow(
0,
0,
pca.components_[0, i],
pca.components_[1, i],
head_width=0.1,
head_length=0.1
)
plt.text(
pca.components_[0, i] + 0.05,
pca.components_[1, i] + 0.05,
data_name[i]
)
circle = plt.Circle((0, 0), radius=1, edgecolor='b', facecolor='None')
ax.add_patch(circle)
plt.axis("equal")
ax.set_title("PCA result - Correlation circle")
st.pyplot(fig)
result = clustering.run(x)
st.write("## Cluster stats")
st.table(result.statistics)
st.write("## Graphical representation")
fig = plt.figure()
if n_components == 1:
plt.scatter(x, np.zeros_like(x))
elif n_components == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=result.labels, s=50, cmap="viridis")
if result.centers is not None:
plt.scatter(result.centers[:, 0], result.centers[:, 1], c="black", s=200, marker="X")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=result.labels, s=50, cmap="viridis")
if result.centers is not None:
ax.scatter(result.centers[:, 0], result.centers[:, 1], result.centers[:, 2], c="black", s=200, marker="X")
st.pyplot(fig)
if not (result.labels == 0).all():
st.write("Silhouette score:", silhouette_score(x, result.labels))
else:
st.error("Select at least one column")
else:
st.error("file not loaded")

@ -1,22 +0,0 @@
import streamlit as st
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from dbscan_strategy import perform_dbscan_clustering
st.header("Clustering: DBSCAN")
if "data" in st.session_state:
data = st.session_state.data
with st.form("dbscan_form"):
data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
min_samples = st.number_input("min_samples", step=1, min_value=1, value=5)
submitted = st.form_submit_button("Launch")
if submitted and 2 <= len(data_name) <= 3:
fig = perform_dbscan_clustering(data, data_name, eps, min_samples)
st.pyplot(fig)
else:
st.error("File not loaded")

@ -1,26 +0,0 @@
import streamlit as st
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from kmeans_strategy import perform_kmeans_clustering
st.header("Clustering: KMeans")
if "data" in st.session_state:
data = st.session_state.data
with st.form("kmeans_form"):
row1 = st.columns([1, 1, 1])
n_clusters = row1[0].selectbox("Number of clusters", range(1, data.shape[0]))
data_name = row1[1].multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
n_init = row1[2].number_input("n_init", step=1, min_value=1)
row2 = st.columns([1, 1])
max_iter = row2[0].number_input("max_iter", step=1, min_value=1)
submitted = st.form_submit_button("Launch")
if submitted and 2 <= len(data_name) <= 3:
fig = perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter)
st.pyplot(fig)
else:
st.error("File not loaded")

@ -1,8 +1,5 @@
import streamlit as st import streamlit as st
import sys from normstrategy import MVStrategy, ScalingStrategy, KNNStrategy
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from norm_strategy import MVStrategy, ScalingStrategy, KNNStrategy
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.original_data data = st.session_state.original_data

@ -1,8 +1,11 @@
import streamlit as st import streamlit as st
import sys from sklearn.linear_model import LogisticRegression
import os from sklearn.model_selection import train_test_split
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend'))) from sklearn.metrics import accuracy_score,confusion_matrix
from classification_strategy import perform_classification, make_prediction from sklearn.preprocessing import LabelEncoder
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
st.header("Prediction: Classification") st.header("Prediction: Classification")
@ -11,38 +14,66 @@ if "data" in st.session_state:
with st.form("classification_form"): with st.form("classification_form"):
st.subheader("Classification Parameters") st.subheader("Classification Parameters")
data_name = st.multiselect("Features", data.columns, key="classification_features") data_name = st.multiselect("Features", data.columns)
target_name = st.selectbox("Target", data.columns, key="classification_target") target_name = st.selectbox("Target", data.columns)
test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1, key="classification_test_size") test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1)
submitted = st.form_submit_button('Train and Predict') st.form_submit_button('Train and Predict')
if submitted and data_name and target_name: if data_name and target_name:
try: X = data[data_name]
model, label_encoders, accuracy = perform_classification(data, data_name, target_name, test_size) y = data[target_name]
st.session_state.classification_model = model
st.session_state.classification_label_encoders = label_encoders label_encoders = {}
st.session_state.classification_accuracy = accuracy for column in X.select_dtypes(include=['object']).columns:
st.session_state.classification_features_selected = data_name le = LabelEncoder()
st.session_state.classification_target_selected = target_name X[column] = le.fit_transform(X[column])
except ValueError as e: label_encoders[column] = le
st.error(e)
if y.dtype == 'object':
if "classification_model" in st.session_state: le = LabelEncoder()
y = le.fit_transform(y)
label_encoders[target_name] = le
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
st.subheader("Model Accuracy") st.subheader("Model Accuracy")
st.write(f"Accuracy on test data: {st.session_state.classification_accuracy:.2f}") st.write(f"Accuracy on test data: {accuracy:.2f}")
st.subheader("Enter values for prediction") st.subheader("Enter values for prediction")
input_values = [] pred_values = []
for feature in st.session_state.classification_features_selected: for feature in data_name:
if feature in st.session_state.classification_label_encoders: if feature in label_encoders:
values = list(st.session_state.classification_label_encoders[feature].classes_) values = list(label_encoders[feature].classes_)
value = st.selectbox(f"Value for {feature}", values, key=f"classification_input_{feature}") value = st.selectbox(f"Value for {feature}", values)
value_encoded = label_encoders[feature].transform([value])[0]
pred_values.append(value_encoded)
else: else:
value = st.number_input(f"Value for {feature}", value=0.0, key=f"classification_input_{feature}") value = st.number_input(f"Value for {feature}", value=0.0)
input_values.append(value) pred_values.append(value)
prediction = model.predict(pd.DataFrame([pred_values], columns=data_name))
if target_name in label_encoders:
prediction = label_encoders[target_name].inverse_transform(prediction)
st.write("Prediction:", prediction[0])
if len(data_name) == 1:
fig = plt.figure()
y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()]
cm = confusion_matrix(y, y_pred)
sns.heatmap(cm, annot=True, fmt="d")
prediction = make_prediction(st.session_state.classification_model, st.session_state.classification_label_encoders, st.session_state.classification_features_selected, st.session_state.classification_target_selected, input_values) plt.xlabel('Predicted')
plt.ylabel('True')
st.write("Prediction:", prediction) st.pyplot(fig)
else: else:
st.error("File not loaded") st.error("File not loaded")

@ -1,8 +1,8 @@
import streamlit as st import streamlit as st
import sys from sklearn.linear_model import LinearRegression
import os from sklearn.metrics import r2_score
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend'))) import pandas as pd
from regression_strategy import perform_regression, make_prediction import matplotlib.pyplot as plt
st.header("Prediction: Regression") st.header("Prediction: Regression")
@ -11,24 +11,53 @@ if "data" in st.session_state:
with st.form("regression_form"): with st.form("regression_form"):
st.subheader("Linear Regression Parameters") st.subheader("Linear Regression Parameters")
data_name = st.multiselect("Features", data.select_dtypes(include="number").columns, key="regression_features") data_name = st.multiselect("Features", data.select_dtypes(include="number").columns)
target_name = st.selectbox("Target", data.select_dtypes(include="number").columns, key="regression_target") target_name = st.selectbox("Target", data.select_dtypes(include="number").columns)
submitted = st.form_submit_button('Train and Predict') st.form_submit_button('Train and Predict')
if submitted and data_name and target_name: if data_name and target_name:
try: X = data[data_name]
model = perform_regression(data, data_name, target_name) y = data[target_name]
st.session_state.regression_model = model
st.session_state.regression_features_selected = data_name model = LinearRegression()
st.session_state.regression_target_selected = target_name model.fit(X, y)
except ValueError as e:
st.error(e)
if "regression_model" in st.session_state:
st.subheader("Enter values for prediction") st.subheader("Enter values for prediction")
input_values = [st.number_input(f"Value for {feature}", value=0.0, key=f"regression_input_{feature}") for feature in st.session_state.regression_features_selected] pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name]
prediction = make_prediction(st.session_state.regression_model, st.session_state.regression_features_selected, input_values) prediction = model.predict(pd.DataFrame([pred_values], columns=data_name))
st.write("Prediction:", prediction[0])
fig = plt.figure()
dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name)
if len(data_name) == 1:
y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()]
r2 = r2_score(y, y_pred)
st.write('R-squared score:', r2)
X = dataframe_sorted[data_name[0]]
y = dataframe_sorted[target_name]
prediction_array_y = [
model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0]
for i in range(dataframe_sorted.shape[0])
]
plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b')
plt.plot(dataframe_sorted[data_name[0]], prediction_array_y, color='r')
elif len(data_name) == 2:
ax = fig.add_subplot(111, projection='3d')
prediction_array_y = [
model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i], dataframe_sorted[data_name[1]].iloc[i]]], columns=data_name))[0]
for i in range(dataframe_sorted.shape[0])
]
ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], dataframe_sorted[target_name], color='b')
ax.plot(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r')
st.pyplot(fig)
st.write("Prediction:", prediction)
else: else:
st.error("File not loaded") st.error("File not loaded")

@ -1,25 +1,30 @@
import streamlit as st import streamlit as st
import sys import matplotlib.pyplot as plt
import os import seaborn as sns
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from visualization_strategy import plot_histogram, plot_boxplot
st.header("Data Visualization") st.header("Data Visualization")
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.data data = st.session_state.data
st.subheader("Histogram") st.subheader("Histogram")
column_to_plot = st.selectbox("Select Column for Histogram", data.columns) column_to_plot = st.selectbox("Select Column for Histogram", data.columns)
if column_to_plot: if column_to_plot:
fig = plot_histogram(data, column_to_plot) fig, ax = plt.subplots()
ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k')
ax.set_title(f"Histogram of {column_to_plot}")
ax.set_xlabel(column_to_plot)
ax.set_ylabel("Frequency")
st.pyplot(fig) st.pyplot(fig)
st.subheader("Boxplot") st.subheader("Boxplot")
dataNumeric = data.select_dtypes(include="number") dataNumeric = data.select_dtypes(include="number")
column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns)
if column_to_plot: if column_to_plot:
fig = plot_boxplot(data, column_to_plot) fig, ax = plt.subplots()
sns.boxplot(data=data, x=column_to_plot, ax=ax)
ax.set_title(f"Boxplot of {column_to_plot}")
st.pyplot(fig) st.pyplot(fig)
else: else:
st.error("file not loaded") st.error("file not loaded")
Loading…
Cancel
Save