You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
132 lines
5.3 KiB
132 lines
5.3 KiB
from flask import Flask, render_template, request
|
|
import pandas as pd
|
|
from test import * # Assurez-vous d'avoir un fichier predict.py avec votre fonction predict
|
|
|
|
app = Flask(__name__)
|
|
|
|
# Charger le DataFrame une seule fois pour économiser des ressources
|
|
df = pd.read_csv('archive/data.csv') # Assurez-vous de spécifier le bon chemin vers votre fichier de données
|
|
|
|
limit_date = '2001-04-01'
|
|
df = df[(df['date'] > limit_date)]
|
|
|
|
displayNumberOfNaNValues(df)
|
|
|
|
imp_features = ['R_Weight_lbs', 'R_Height_cms', 'B_Height_cms', 'R_age', 'B_age', 'R_Reach_cms', 'B_Reach_cms']
|
|
imp_median = SimpleImputer(missing_values=np.nan, strategy='median')
|
|
|
|
# Iterate over each feature to impute missing values
|
|
for feature in imp_features:
|
|
# Fit and transform the feature using median imputation
|
|
imp_feature = imp_median.fit_transform(df[feature].values.reshape(-1,1))
|
|
# Assign the imputed values back to the DataFrame
|
|
df[feature] = imp_feature
|
|
|
|
# Impute missing values for 'R_Stance' using most frequent strategy
|
|
imp_stance_R = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
|
|
imp_R_stance = imp_stance_R.fit_transform(df['R_Stance'].values.reshape(-1,1))
|
|
|
|
# Impute missing values for 'B_Stance' using most frequent strategy
|
|
imp_stance_B = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
|
|
imp_B_stance = imp_stance_B.fit_transform(df['B_Stance'].values.reshape(-1,1))
|
|
|
|
# Create DataFrames for imputed stances
|
|
df['R_Stance'] = pd.DataFrame(imp_R_stance, columns=['R_Stance'])
|
|
df['B_Stance'] = pd.DataFrame(imp_B_stance, columns=['B_Stance'])
|
|
|
|
|
|
df.drop(['Referee', 'location'], axis=1, inplace=True)
|
|
|
|
# Drop column 'B_draw' and 'R_draw' and 'Draw' fight and 'Catch Weight' fight
|
|
df.drop(['B_draw', 'R_draw'], axis=1, inplace=True)
|
|
df = df[df['Winner'] != 'Draw']
|
|
df = df[df['weight_class'] != 'Catch Weight']
|
|
|
|
# Remove column when data type is not float or int
|
|
dfWithoutString = df.select_dtypes(include=['float64', 'int64'])
|
|
|
|
plt.figure(figsize=(50, 40))
|
|
corr_matrix = dfWithoutString.corr(method='pearson').abs()
|
|
sns.heatmap(corr_matrix, annot=True)
|
|
|
|
fighters = list_fighters(df,'2015-01-01')
|
|
|
|
df_train = build_df_all_but_last(df, fighters)
|
|
df_test = build_df(df, fighters,0)
|
|
|
|
preprocessor = make_column_transformer((OrdinalEncoder(), ['weight_class', 'B_Stance', 'R_Stance']), remainder='passthrough')
|
|
|
|
|
|
label_encoder = LabelEncoder()
|
|
y_train = label_encoder.fit_transform(df_train['Winner'])
|
|
y_test = label_encoder.transform(df_test['Winner'])
|
|
|
|
X_train, X_test = df_train.drop(['Winner'], axis=1), df_test.drop(['Winner'], axis=1)
|
|
|
|
# Random Forest composed of 100 decision trees. We optimized parameters using cross-validation and GridSearch tool paired together
|
|
random_forest = RandomForestClassifier(n_estimators=100,
|
|
criterion='entropy',
|
|
max_depth=10,
|
|
min_samples_split=2,
|
|
min_samples_leaf=1,
|
|
random_state=0)
|
|
|
|
model = Pipeline([('encoding', preprocessor), ('random_forest', random_forest)])
|
|
model.fit(X_train, y_train)
|
|
|
|
# We use cross-validation with 5-folds to have a more precise accuracy (reduce variation)
|
|
accuracies = cross_val_score(estimator=model, X=X_train, y=y_train, cv=5)
|
|
print('Accuracy mean : ', accuracies.mean())
|
|
print('Accuracy standard deviation : ', accuracies.std())
|
|
|
|
y_pred = model.predict(X_test)
|
|
print('Testing accuracy : ', accuracy_score(y_test, y_pred), '\n')
|
|
|
|
target_names = ["Blue","Red"]
|
|
print(classification_report(y_test, y_pred, labels=[0,1], target_names=target_names))
|
|
|
|
feature_names = [col for col in X_train]
|
|
feature_importances = model['random_forest'].feature_importances_
|
|
indices = np.argsort(feature_importances)[::-1]
|
|
n = 30 # maximum feature importances displayed
|
|
idx = indices[0:n]
|
|
std = np.std([tree.feature_importances_ for tree in model['random_forest'].estimators_], axis=0)
|
|
|
|
#for f in range(n):
|
|
# print("%d. feature %s (%f)" % (f + 1, feature_names[idx[f]], feature_importances[idx[f]]))
|
|
|
|
# plt.figure(figsize=(30, 8))
|
|
# plt.title("Feature importances")
|
|
# plt.bar(range(n), feature_importances[idx], color="r", yerr=std[idx], align="center")
|
|
# plt.xticks(range(n), [feature_names[id] for id in idx], rotation = 45)
|
|
# plt.xlim([-1, n])
|
|
# plt.show()
|
|
|
|
# Sélectionnez un arbre de votre modèle
|
|
tree_estimator = model['random_forest'].estimators_[10]
|
|
|
|
|
|
@app.route('/')
|
|
def index():
|
|
return render_template('index.html')
|
|
|
|
@app.route('/predict', methods=['POST'])
|
|
def make_prediction():
|
|
blue_fighter = request.form['blue_fighter']
|
|
red_fighter = request.form['red_fighter']
|
|
weightclass = request.form['weightclass']
|
|
rounds = int(request.form['rounds'])
|
|
title_bout = True if request.form['title_bout'] == 'True' else False
|
|
|
|
prediction_proba = predict(df, model, blue_fighter, red_fighter, weightclass, rounds, title_bout)
|
|
|
|
# Formatage du résultat pour l'afficher dans le navigateur
|
|
result = ""
|
|
if prediction_proba is not None:
|
|
result = f"The predicted probability of {blue_fighter} winning is {round(prediction_proba[0][0] * 100, 2)}% and the predicted probability of {red_fighter} winning is {round(prediction_proba[0][1] * 100, 2)}%"
|
|
|
|
return render_template('result.html', result=result)
|
|
|
|
if __name__ == '__main__':
|
|
app.run(debug=True)
|