Compare commits
15 Commits
987e255dad
...
c8cf0fe045
Author | SHA1 | Date |
---|---|---|
|
c8cf0fe045 | 1 year ago |
|
4d82767c68 | 1 year ago |
|
9cb0d90eb1 | 1 year ago |
|
3eac3f6b8d | 1 year ago |
|
c87308cc21 | 1 year ago |
|
d4aeb87f75 | 1 year ago |
|
3c5f6849f8 | 1 year ago |
|
cd0c85ea44 | 1 year ago |
|
96d390c749 | 1 year ago |
|
089cc66042 | 1 year ago |
|
2d1c867bed | 1 year ago |
|
a914c3f8f9 | 1 year ago |
|
70641ebca4 | 1 year ago |
|
e5f05a2c8a | 1 year ago |
|
972fde561f | 1 year ago |
@ -0,0 +1,44 @@
|
||||
kind: pipeline
|
||||
name: default
|
||||
type: docker
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
|
||||
steps:
|
||||
- name: lint
|
||||
image: python:3.12
|
||||
commands:
|
||||
- pip install --root-user-action=ignore -r requirements.txt
|
||||
- ruff check .
|
||||
|
||||
- name: docker-image
|
||||
image: plugins/docker
|
||||
settings:
|
||||
dockerfile: Dockerfile
|
||||
registry: hub.codefirst.iut.uca.fr
|
||||
repo: hub.codefirst.iut.uca.fr/bastien.ollier/miner
|
||||
username:
|
||||
from_secret: REGISTRY_USER
|
||||
password:
|
||||
from_secret: REGISTRY_PASSWORD
|
||||
cache_from:
|
||||
- hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
|
||||
depends_on: [ lint ]
|
||||
|
||||
- name: deploy-miner
|
||||
image: hub.codefirst.iut.uca.fr/clement.freville2/codefirst-dockerproxy-clientdrone:latest
|
||||
settings:
|
||||
image: hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
|
||||
container: miner
|
||||
command: create
|
||||
overwrite: true
|
||||
admins: bastienollier,clementfreville2,hugopradier2
|
||||
environment:
|
||||
DRONE_REPO_OWNER: bastien.ollier
|
||||
depends_on: [ docker-image ]
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
- ci/*
|
@ -1 +1,2 @@
|
||||
__pycache__
|
||||
.venv
|
||||
|
@ -0,0 +1,9 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
EXPOSE 80
|
||||
ENTRYPOINT ["streamlit", "run", "frontend/exploration.py", "--server.port=80", "--server.address=0.0.0.0", "--server.baseUrlPath=/containers/bastienollier-miner"]
|
@ -1 +0,0 @@
|
||||
from . import normstrategy
|
@ -1,179 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from pandas import DataFrame, Series
|
||||
from pandas.api.types import is_numeric_dtype
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
from typing import Any, Union
|
||||
|
||||
class DataFrameFunction(ABC):
|
||||
"""A command that may be applied in-place to a dataframe."""
|
||||
|
||||
@abstractmethod
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
"""Apply the current function to the given dataframe, in-place.
|
||||
|
||||
The series is described by its label and dataframe."""
|
||||
return df
|
||||
|
||||
|
||||
class MVStrategy(DataFrameFunction):
|
||||
"""A way to handle missing values in a dataframe."""
|
||||
|
||||
@staticmethod
|
||||
def list_available(df: DataFrame, label: str, series: Series) -> list['MVStrategy']:
|
||||
"""Get all the strategies that can be used."""
|
||||
choices = [DropStrategy(), ModeStrategy()]
|
||||
if is_numeric_dtype(series):
|
||||
choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy()))
|
||||
other_columns = df.select_dtypes(include="number").drop(label, axis=1).columns.to_list()
|
||||
if len(other_columns):
|
||||
choices.append(KNNStrategy(other_columns))
|
||||
return choices
|
||||
|
||||
|
||||
class ScalingStrategy(DataFrameFunction):
|
||||
"""A way to handle missing values in a dataframe."""
|
||||
|
||||
@staticmethod
|
||||
def list_available(df: DataFrame, series: Series) -> list['MVStrategy']:
|
||||
"""Get all the strategies that can be used."""
|
||||
choices = [KeepStrategy()]
|
||||
if is_numeric_dtype(series):
|
||||
choices.extend((MinMaxStrategy(), ZScoreStrategy()))
|
||||
if series.sum() != 0:
|
||||
choices.append(UnitLengthStrategy())
|
||||
return choices
|
||||
|
||||
|
||||
class DropStrategy(MVStrategy):
|
||||
#@typing.override
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
df.dropna(subset=label, inplace=True)
|
||||
return df
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Drop"
|
||||
|
||||
|
||||
class PositionStrategy(MVStrategy):
|
||||
#@typing.override
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
series.fillna(self.get_value(series), inplace=True)
|
||||
return df
|
||||
|
||||
@abstractmethod
|
||||
def get_value(self, series: Series) -> Any:
|
||||
pass
|
||||
|
||||
|
||||
class MeanStrategy(PositionStrategy):
|
||||
#@typing.override
|
||||
def get_value(self, series: Series) -> Union[int, float]:
|
||||
return series.mean()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Use mean"
|
||||
|
||||
|
||||
class MedianStrategy(PositionStrategy):
|
||||
#@typing.override
|
||||
def get_value(self, series: Series) -> Union[int, float]:
|
||||
return series.median()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Use median"
|
||||
|
||||
|
||||
class ModeStrategy(PositionStrategy):
|
||||
#@typing.override
|
||||
def get_value(self, series: Series) -> Any:
|
||||
return series.mode()[0]
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Use mode"
|
||||
|
||||
|
||||
class LinearRegressionStrategy(MVStrategy):
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
series.interpolate(inplace=True)
|
||||
return df
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Use linear regression"
|
||||
|
||||
|
||||
class KNNStrategy(MVStrategy):
|
||||
def __init__(self, training_features: list[str]):
|
||||
self.available_features = training_features
|
||||
self.training_features = training_features
|
||||
self.n_neighbors = 3
|
||||
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
# Remove any training column that have any missing values
|
||||
usable_data = df.dropna(subset=self.training_features)
|
||||
# Select columns to impute from
|
||||
train_data = usable_data.dropna(subset=label)
|
||||
# Create train dataframe
|
||||
x_train = train_data.drop(label, axis=1)
|
||||
y_train = train_data[label]
|
||||
|
||||
reg = KNeighborsClassifier(self.n_neighbors).fit(x_train, y_train)
|
||||
|
||||
# Create test dataframe
|
||||
test_data = usable_data[usable_data[label].isnull()]
|
||||
if test_data.empty:
|
||||
return df
|
||||
x_test = test_data.drop(label, axis=1)
|
||||
predicted = reg.predict(x_test)
|
||||
|
||||
# Fill with predicated values and patch the original data
|
||||
usable_data[label].fillna(Series(predicted), inplace=True)
|
||||
df.fillna(usable_data, inplace=True)
|
||||
return df
|
||||
|
||||
def count_max(self, df: DataFrame, label: str) -> int:
|
||||
usable_data = df.dropna(subset=self.training_features)
|
||||
return usable_data[label].count()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "kNN"
|
||||
|
||||
|
||||
class KeepStrategy(ScalingStrategy):
|
||||
#@typing.override
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
return df
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "No-op"
|
||||
|
||||
|
||||
class MinMaxStrategy(ScalingStrategy):
|
||||
#@typing.override
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
minimum = series.min()
|
||||
maximum = series.max()
|
||||
df[label] = (series - minimum) / (maximum - minimum)
|
||||
return df
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Min-max"
|
||||
|
||||
|
||||
class ZScoreStrategy(ScalingStrategy):
|
||||
#@typing.override
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
df[label] = (series - series.mean()) / series.std()
|
||||
return df
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Z-Score"
|
||||
|
||||
|
||||
class UnitLengthStrategy(ScalingStrategy):
|
||||
#@typing.override
|
||||
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
|
||||
df[label] = series / series.sum()
|
||||
return df
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "Unit length"
|
@ -0,0 +1,64 @@
|
||||
import streamlit as st
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
import pandas as pd
|
||||
|
||||
st.header("Prediction: Classification")
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
with st.form("classification_form"):
|
||||
st.subheader("Classification Parameters")
|
||||
data_name = st.multiselect("Features", data.columns)
|
||||
target_name = st.selectbox("Target", data.columns)
|
||||
test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1)
|
||||
st.form_submit_button('Train and Predict')
|
||||
|
||||
if data_name and target_name:
|
||||
X = data[data_name]
|
||||
y = data[target_name]
|
||||
|
||||
label_encoders = {}
|
||||
for column in X.select_dtypes(include=['object']).columns:
|
||||
le = LabelEncoder()
|
||||
X[column] = le.fit_transform(X[column])
|
||||
label_encoders[column] = le
|
||||
|
||||
if y.dtype == 'object':
|
||||
le = LabelEncoder()
|
||||
y = le.fit_transform(y)
|
||||
label_encoders[target_name] = le
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
|
||||
|
||||
model = LogisticRegression()
|
||||
model.fit(X_train, y_train)
|
||||
y_pred = model.predict(X_test)
|
||||
accuracy = accuracy_score(y_test, y_pred)
|
||||
|
||||
st.subheader("Model Accuracy")
|
||||
st.write(f"Accuracy on test data: {accuracy:.2f}")
|
||||
|
||||
st.subheader("Enter values for prediction")
|
||||
pred_values = []
|
||||
for feature in data_name:
|
||||
if feature in label_encoders:
|
||||
values = list(label_encoders[feature].classes_)
|
||||
value = st.selectbox(f"Value for {feature}", values)
|
||||
value_encoded = label_encoders[feature].transform([value])[0]
|
||||
pred_values.append(value_encoded)
|
||||
else:
|
||||
value = st.number_input(f"Value for {feature}", value=0.0)
|
||||
pred_values.append(value)
|
||||
|
||||
prediction = model.predict(pd.DataFrame([pred_values], columns=data_name))
|
||||
|
||||
if target_name in label_encoders:
|
||||
prediction = label_encoders[target_name].inverse_transform(prediction)
|
||||
|
||||
st.write("Prediction:", prediction[0])
|
||||
else:
|
||||
st.error("File not loaded")
|
@ -0,0 +1,29 @@
|
||||
import streamlit as st
|
||||
from sklearn.linear_model import LinearRegression
|
||||
import pandas as pd
|
||||
|
||||
st.header("Prediction: Regression")
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
with st.form("regression_form"):
|
||||
st.subheader("Linear Regression Parameters")
|
||||
data_name = st.multiselect("Features", data.select_dtypes(include="number").columns)
|
||||
target_name = st.selectbox("Target", data.select_dtypes(include="number").columns)
|
||||
st.form_submit_button('Train and Predict')
|
||||
|
||||
if data_name and target_name:
|
||||
X = data[data_name]
|
||||
y = data[target_name]
|
||||
|
||||
model = LinearRegression()
|
||||
model.fit(X, y)
|
||||
|
||||
st.subheader("Enter values for prediction")
|
||||
pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name]
|
||||
prediction = model.predict(pd.DataFrame([pred_values], columns=data_name))
|
||||
|
||||
st.write("Prediction:", prediction[0])
|
||||
else:
|
||||
st.error("File not loaded")
|
@ -0,0 +1,6 @@
|
||||
matplotlib>=3.5.0
|
||||
pandas>=1.5.0
|
||||
seaborn>=0.12.0
|
||||
scikit-learn>=0.23.0
|
||||
streamlit>=1.35.0
|
||||
ruff>=0.4.8
|
Loading…
Reference in new issue