Fichier propre + api php

master
Pierre BALLANDRAS 4 months ago
parent 0e422c4a51
commit f65cbf5b5a

3
.gitignore vendored

@ -1,2 +1,5 @@
UTKFace/ UTKFace/
progress_images/ progress_images/
.venv/
.vscode/
src/api/vendor/

@ -10,21 +10,65 @@ Ce projet a pour objectif de développer une application permettant de simuler l
## Livrables ## Livrables
Une application qui répond aux besoins spécifiés dans la liste de tâches ci-dessous ; ### Fonctionnalités
Un code source bien documenté, facile à maintenir et fonctionnel ; - Un code optimisé et bien organisé (architecture logicielle et tests)
Un code optimisé et bien organisé (architecture logicielle et tests) ; - Un readme qui explique les fonctions utilisées dans le code
Un readme qui explique les fonctions utilisées dans le code ; - Une présentation claire et concise selon les consignes
Une présentation claire et concise selon les consignes.
Liste de tâches et outils non exhaustive) : ### Liste de tâches et outils non exhaustive :
- Développement d'une interface Tkinter ou web
Développement d'une interface Tkinter ou web ; - Utilisation du Python et de bibliothèques libres
Utilisation du Python et de bibliothèques libres ; - Implémentation d'une IA par apprentissage supervisé
Implémentation d'une IA par apprentissage supervisé.
L'application en étapes : ### L'application en étapes :
- Charger une image réelle d'un visage 2D en couleur
Charger une image réelle d'un visage 2D en couleur ; - Appliquer un vieillissement progressif sur le visage
Appliquer un vieillissement progressif sur le visage ; - Afficher le résultat sous forme d'une vidéo
Afficher le résultat sous forme d'une vidéo.
Ressources et modalités
## Projet
Vous avez à votre disposition la documentation en ligne, des LLMs, ainsi que des livres que je peux vous prêter. L'essentiel est que vous maîtrisiez les détails du code rendu.
Les fichiers de train permettent d'entraîner les modèles.
Le fichier face_aging_model.h5 permet de prédire l'âge à partir d'une image.
Le fichier face_aging_autoencoder.h5 permet d'appliquer un effet de vieillissement sur un visage (128×128).
Les fichiers de test servent à vérifier le fonctionnement des modèles.
## Lancer l'application Tkinter
### Installation des dépendances
```bash
pip install -r requirements.txt
```
### Lancer l'application
```bash
python app.py
```
## Lancer l'API PHP
### Installation des dépendances
```bash
curl -sS https://getcomposer.org/installer | php
php composer.phar update
```
### Lancer le serveur
```bash
php -S localhost:8000
```
## Structure du projet
- app.py : Script principal pour l'application Tkinter.
- public/ : Répertoire contenant les fichiers PHP pour l'API.
- uploads/ : Répertoire pour stocker les images téléchargées.
- scripts/ : Répertoire contenant les scripts Python pour le traitement des images.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Binary file not shown.

228
app.py

@ -1,164 +1,88 @@
import tkinter as tk import tkinter as tk
from tkinter import filedialog, Label, Button, Scale, HORIZONTAL from tkinter import filedialog, messagebox
from PIL import Image, ImageTk from PIL import Image, ImageTk
import cv2
import numpy as np
import os
import threading
import tensorflow as tf import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model import numpy as np
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense import cv2
from tensorflow.keras.preprocessing.image import img_to_array, load_img from tensorflow.keras.models import load_model
from sklearn.model_selection import train_test_split
from tensorflow.keras.losses import MeanSquaredError from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.utils import get_custom_objects
# Prétraitement des données UTKFace
def load_utkface_dataset(dataset_path):
images = []
ages = []
for file_name in os.listdir(dataset_path):
if file_name.endswith(".jpg"):
age = int(file_name.split("_")[0]) # Extraire l'âge du nom du fichier
img_path = os.path.join(dataset_path, file_name)
img = load_img(img_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
images.append(img_array)
ages.append(age)
return np.array(images), np.array(ages)
class FaceAgingApp:
def __init__(self, root):
self.root = root
self.root.title("Face Aging AI")
self.label = Label(root, text="Chargez une image de visage")
self.label.pack()
self.btn_upload = Button(root, text="Choisir une image", command=self.load_image)
self.btn_upload.pack()
self.image_label = Label(root)
self.image_label.pack()
self.scale = Scale(root, from_=-10, to=10, orient=HORIZONTAL, label="Degré de modification (-10 = rajeunir, 10 = vieillir)")
self.scale.pack()
self.btn_process = Button(root, text="Appliquer", command=self.process_image)
self.btn_process.pack()
self.processed_label = Label(root)
self.processed_label.pack()
self.video_label = Label(root)
self.video_label.pack()
# Chargement des données UTKFace
dataset_path = "./UTKFace/part1/part1" # Modifier selon l'emplacement du dataset
self.images, self.ages = load_utkface_dataset(dataset_path)
# Charger le modèle si le fichier existe, sinon entraîner un nouveau modèle
model_path = "face_aging_model.h5"
if os.path.exists(model_path):
self.load_model(model_path)
else:
self.train_model()
def load_model(self, model_path): def mse(y_true, y_pred):
self.model = load_model(model_path, custom_objects={'mse': MeanSquaredError()}) return MeanSquaredError()(y_true, y_pred)
print("Modèle chargé avec succès !")
get_custom_objects().update({'mse': mse})
def load_image(self): # Chargement des modèles
file_path = filedialog.askopenfilename(filetypes=[("Images", "*.png;*.jpg;*.jpeg")]) face_aging_model = load_model("face_aging_model.h5", custom_objects={"mse": mse})
face_aging_autoencoder = load_model("face_aging_autoencoder.h5", custom_objects={"mse": mse})
def load_image():
file_path = filedialog.askopenfilename(title="Sélectionner une image", filetypes=[("Image Files", "*.jpg;*.png;*.jpeg")])
if file_path: if file_path:
self.image = Image.open(file_path) img = Image.open(file_path)
self.image.thumbnail((300, 300)) img = img.resize((256, 256)) # Redimensionner l'image
self.tk_image = ImageTk.PhotoImage(self.image) img_tk = ImageTk.PhotoImage(img)
self.image_label.config(image=self.tk_image) original_panel.configure(image=img_tk)
original_panel.image = img_tk
def process_image(self): global image_data, image_path
degree = self.scale.get() image_path = file_path
self.processed_label.config(text=f"L'image sera modifiée avec un degré de {degree}") image_data = np.array(img) / 255.0 # Normalisation de l'image
image_data = np.expand_dims(image_data, axis=0)
img_cv = cv2.cvtColor(np.array(self.image), cv2.COLOR_RGB2BGR)
frames = self.generate_aging_video(img_cv, degree) def predict_age():
if 'image_path' in globals():
output_path = "aging_video.avi" predicted_age = predict_age_from_model(face_aging_model, image_path)
self.save_video(frames, output_path) messagebox.showinfo("Prédiction d'âge", f"L'âge prédit du visage est : {predicted_age:.2f} ans")
else:
self.display_video(output_path) messagebox.showerror("Erreur", "Veuillez d'abord charger une image.")
def generate_aging_video(self, image, degree): def predict_age_from_model(model, image_path):
frames = [] img = Image.open(image_path).resize((128, 128))
for i in range(10): # Simule une transition en 10 étapes img_array = np.array(img) / 255.0
modified_image = self.apply_aging_effect(image, degree * (i / 10)) img_array = np.expand_dims(img_array, axis=0)
if modified_image is not None: prediction = model.predict(img_array)
frames.append(modified_image) return prediction[0][0]
return frames
def apply_aging_effect(model, image_path):
def apply_aging_effect(self, image, degree): img = Image.open(image_path).resize((128, 128))
resized_img = cv2.resize(image, (128, 128)) / 255.0 img_array = np.array(img) / 255.0
predicted_img = self.model.predict(np.expand_dims(resized_img, axis=0)) predicted_img = model.predict(np.expand_dims(img_array, axis=0))
predicted_img = np.clip(predicted_img[0], 0, 1) # S'assurer que les valeurs sont entre 0 et 1 predicted_img = np.clip(predicted_img[0], 0, 1)
predicted_img = (predicted_img * 255).astype(np.uint8) predicted_img = (predicted_img * 255).astype(np.uint8)
return Image.fromarray(predicted_img)
def show_aged_image():
if 'image_path' in globals():
aged_image = apply_aging_effect(face_aging_autoencoder, image_path)
aged_image_tk = ImageTk.PhotoImage(aged_image.resize((256, 256)))
aged_panel.configure(image=aged_image_tk)
aged_panel.image = aged_image_tk
else:
messagebox.showerror("Erreur", "Veuillez d'abord charger une image.")
if predicted_img.size == 0: # Création de la fenêtre principale Tkinter
print("Erreur: Image prédite vide")
return None
return cv2.cvtColor(predicted_img, cv2.COLOR_BGR2RGB) # Convertir en BGR pour OpenCV
def save_video(self, frames, output_path):
if not frames:
print("Erreur: aucune frame générée.")
return
height, width, _ = frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video = cv2.VideoWriter(output_path, fourcc, 5, (width, height))
for frame in frames:
if frame is not None:
video.write(frame)
video.release()
print(f"Vidéo sauvegardée: {output_path}")
def display_video(self, video_path):
def play_video():
cap = cv2.VideoCapture(video_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(frame)
img = img.resize((300, 300))
self.tk_video = ImageTk.PhotoImage(img)
self.video_label.config(image=self.tk_video)
self.root.update_idletasks()
self.root.after(200)
cap.release()
threading.Thread(target=play_video, daemon=True).start()
def train_model(self):
X_train, X_test, y_train, y_test = train_test_split(self.images, self.ages, test_size=0.2, random_state=42)
self.model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),
MaxPooling2D(2, 2),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Flatten(),
Dense(128, activation='relu'),
Dense(1, activation='linear')
])
self.model.compile(optimizer='adam', loss='mse', metrics=['mae'])
self.model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
print("Modèle entraîné avec succès !")
# Sauvegarder le modèle
self.model.save("face_aging_model.h5")
print("Modèle sauvegardé avec succès !")
if __name__ == "__main__":
root = tk.Tk() root = tk.Tk()
app = FaceAgingApp(root) root.title("Face Age Prediction and Aging")
# Affichage de l'image originale
original_panel = tk.Label(root)
original_panel.pack(pady=10)
# Boutons et widgets
load_button = tk.Button(root, text="Charger une image", command=load_image)
load_button.pack(pady=5)
predict_button = tk.Button(root, text="Prédire l'âge", command=predict_age)
predict_button.pack(pady=5)
age_button = tk.Button(root, text="Afficher l'image vieillie", command=show_aged_image)
age_button.pack(pady=5)
# Affichage de l'image vieillie
aged_panel = tk.Label(root)
aged_panel.pack(pady=10)
# Lancer l'interface
root.mainloop() root.mainloop()

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

@ -0,0 +1,50 @@
absl-py==2.2.0
astunparse==1.6.3
certifi==2025.1.31
charset-normalizer==3.4.1
contourpy==1.3.1
cycler==0.12.1
flatbuffers==25.2.10
fonttools==4.56.0
gast==0.6.0
google-pasta==0.2.0
grpcio==1.71.0
h5py==3.13.0
idna==3.10
joblib==1.4.2
keras==3.9.0
kiwisolver==1.4.8
libclang==18.1.1
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
mdurl==0.1.2
ml_dtypes==0.5.1
namex==0.0.8
numpy==2.1.3
opencv-python==4.11.0.86
opt_einsum==3.4.0
optree==0.14.1
packaging==24.2
pillow==11.1.0
protobuf==5.29.4
Pygments==2.19.1
pyparsing==3.2.3
python-dateutil==2.9.0.post0
requests==2.32.3
rich==13.9.4
scikit-learn==1.6.1
scipy==1.15.2
setuptools==78.1.0
six==1.17.0
tensorboard==2.19.0
tensorboard-data-server==0.7.2
tensorflow==2.19.0
termcolor==2.5.0
threadpoolctl==3.6.0
typing_extensions==4.13.0
urllib3==2.3.0
Werkzeug==3.1.3
wheel==0.45.1
wrapt==1.17.2

@ -0,0 +1,6 @@
{
"require": {
"slim/psr7": "^1.7",
"slim/slim": "^4.14"
}
}

771
src/api/composer.lock generated

@ -0,0 +1,771 @@
{
"_readme": [
"This file locks the dependencies of your project to a known state",
"Read more about it at https://getcomposer.org/doc/01-basic-usage.md#installing-dependencies",
"This file is @generated automatically"
],
"content-hash": "6df1366a2decb2a1f901ea6014c526f9",
"packages": [
{
"name": "fig/http-message-util",
"version": "1.1.5",
"source": {
"type": "git",
"url": "https://github.com/php-fig/http-message-util.git",
"reference": "9d94dc0154230ac39e5bf89398b324a86f63f765"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/http-message-util/zipball/9d94dc0154230ac39e5bf89398b324a86f63f765",
"reference": "9d94dc0154230ac39e5bf89398b324a86f63f765",
"shasum": ""
},
"require": {
"php": "^5.3 || ^7.0 || ^8.0"
},
"suggest": {
"psr/http-message": "The package containing the PSR-7 interfaces"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "1.1.x-dev"
}
},
"autoload": {
"psr-4": {
"Fig\\Http\\Message\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "Utility classes and constants for use with PSR-7 (psr/http-message)",
"keywords": [
"http",
"http-message",
"psr",
"psr-7",
"request",
"response"
],
"support": {
"issues": "https://github.com/php-fig/http-message-util/issues",
"source": "https://github.com/php-fig/http-message-util/tree/1.1.5"
},
"time": "2020-11-24T22:02:12+00:00"
},
{
"name": "nikic/fast-route",
"version": "v1.3.0",
"source": {
"type": "git",
"url": "https://github.com/nikic/FastRoute.git",
"reference": "181d480e08d9476e61381e04a71b34dc0432e812"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/nikic/FastRoute/zipball/181d480e08d9476e61381e04a71b34dc0432e812",
"reference": "181d480e08d9476e61381e04a71b34dc0432e812",
"shasum": ""
},
"require": {
"php": ">=5.4.0"
},
"require-dev": {
"phpunit/phpunit": "^4.8.35|~5.7"
},
"type": "library",
"autoload": {
"files": [
"src/functions.php"
],
"psr-4": {
"FastRoute\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"BSD-3-Clause"
],
"authors": [
{
"name": "Nikita Popov",
"email": "nikic@php.net"
}
],
"description": "Fast request router for PHP",
"keywords": [
"router",
"routing"
],
"support": {
"issues": "https://github.com/nikic/FastRoute/issues",
"source": "https://github.com/nikic/FastRoute/tree/master"
},
"time": "2018-02-13T20:26:39+00:00"
},
{
"name": "psr/container",
"version": "2.0.2",
"source": {
"type": "git",
"url": "https://github.com/php-fig/container.git",
"reference": "c71ecc56dfe541dbd90c5360474fbc405f8d5963"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/container/zipball/c71ecc56dfe541dbd90c5360474fbc405f8d5963",
"reference": "c71ecc56dfe541dbd90c5360474fbc405f8d5963",
"shasum": ""
},
"require": {
"php": ">=7.4.0"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "2.0.x-dev"
}
},
"autoload": {
"psr-4": {
"Psr\\Container\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "Common Container Interface (PHP FIG PSR-11)",
"homepage": "https://github.com/php-fig/container",
"keywords": [
"PSR-11",
"container",
"container-interface",
"container-interop",
"psr"
],
"support": {
"issues": "https://github.com/php-fig/container/issues",
"source": "https://github.com/php-fig/container/tree/2.0.2"
},
"time": "2021-11-05T16:47:00+00:00"
},
{
"name": "psr/http-factory",
"version": "1.1.0",
"source": {
"type": "git",
"url": "https://github.com/php-fig/http-factory.git",
"reference": "2b4765fddfe3b508ac62f829e852b1501d3f6e8a"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/http-factory/zipball/2b4765fddfe3b508ac62f829e852b1501d3f6e8a",
"reference": "2b4765fddfe3b508ac62f829e852b1501d3f6e8a",
"shasum": ""
},
"require": {
"php": ">=7.1",
"psr/http-message": "^1.0 || ^2.0"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "1.0.x-dev"
}
},
"autoload": {
"psr-4": {
"Psr\\Http\\Message\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "PSR-17: Common interfaces for PSR-7 HTTP message factories",
"keywords": [
"factory",
"http",
"message",
"psr",
"psr-17",
"psr-7",
"request",
"response"
],
"support": {
"source": "https://github.com/php-fig/http-factory"
},
"time": "2024-04-15T12:06:14+00:00"
},
{
"name": "psr/http-message",
"version": "2.0",
"source": {
"type": "git",
"url": "https://github.com/php-fig/http-message.git",
"reference": "402d35bcb92c70c026d1a6a9883f06b2ead23d71"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/http-message/zipball/402d35bcb92c70c026d1a6a9883f06b2ead23d71",
"reference": "402d35bcb92c70c026d1a6a9883f06b2ead23d71",
"shasum": ""
},
"require": {
"php": "^7.2 || ^8.0"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "2.0.x-dev"
}
},
"autoload": {
"psr-4": {
"Psr\\Http\\Message\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "Common interface for HTTP messages",
"homepage": "https://github.com/php-fig/http-message",
"keywords": [
"http",
"http-message",
"psr",
"psr-7",
"request",
"response"
],
"support": {
"source": "https://github.com/php-fig/http-message/tree/2.0"
},
"time": "2023-04-04T09:54:51+00:00"
},
{
"name": "psr/http-server-handler",
"version": "1.0.2",
"source": {
"type": "git",
"url": "https://github.com/php-fig/http-server-handler.git",
"reference": "84c4fb66179be4caaf8e97bd239203245302e7d4"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/http-server-handler/zipball/84c4fb66179be4caaf8e97bd239203245302e7d4",
"reference": "84c4fb66179be4caaf8e97bd239203245302e7d4",
"shasum": ""
},
"require": {
"php": ">=7.0",
"psr/http-message": "^1.0 || ^2.0"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "1.0.x-dev"
}
},
"autoload": {
"psr-4": {
"Psr\\Http\\Server\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "Common interface for HTTP server-side request handler",
"keywords": [
"handler",
"http",
"http-interop",
"psr",
"psr-15",
"psr-7",
"request",
"response",
"server"
],
"support": {
"source": "https://github.com/php-fig/http-server-handler/tree/1.0.2"
},
"time": "2023-04-10T20:06:20+00:00"
},
{
"name": "psr/http-server-middleware",
"version": "1.0.2",
"source": {
"type": "git",
"url": "https://github.com/php-fig/http-server-middleware.git",
"reference": "c1481f747daaa6a0782775cd6a8c26a1bf4a3829"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/http-server-middleware/zipball/c1481f747daaa6a0782775cd6a8c26a1bf4a3829",
"reference": "c1481f747daaa6a0782775cd6a8c26a1bf4a3829",
"shasum": ""
},
"require": {
"php": ">=7.0",
"psr/http-message": "^1.0 || ^2.0",
"psr/http-server-handler": "^1.0"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "1.0.x-dev"
}
},
"autoload": {
"psr-4": {
"Psr\\Http\\Server\\": "src/"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "Common interface for HTTP server-side middleware",
"keywords": [
"http",
"http-interop",
"middleware",
"psr",
"psr-15",
"psr-7",
"request",
"response"
],
"support": {
"issues": "https://github.com/php-fig/http-server-middleware/issues",
"source": "https://github.com/php-fig/http-server-middleware/tree/1.0.2"
},
"time": "2023-04-11T06:14:47+00:00"
},
{
"name": "psr/log",
"version": "3.0.2",
"source": {
"type": "git",
"url": "https://github.com/php-fig/log.git",
"reference": "f16e1d5863e37f8d8c2a01719f5b34baa2b714d3"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/php-fig/log/zipball/f16e1d5863e37f8d8c2a01719f5b34baa2b714d3",
"reference": "f16e1d5863e37f8d8c2a01719f5b34baa2b714d3",
"shasum": ""
},
"require": {
"php": ">=8.0.0"
},
"type": "library",
"extra": {
"branch-alias": {
"dev-master": "3.x-dev"
}
},
"autoload": {
"psr-4": {
"Psr\\Log\\": "src"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "PHP-FIG",
"homepage": "https://www.php-fig.org/"
}
],
"description": "Common interface for logging libraries",
"homepage": "https://github.com/php-fig/log",
"keywords": [
"log",
"psr",
"psr-3"
],
"support": {
"source": "https://github.com/php-fig/log/tree/3.0.2"
},
"time": "2024-09-11T13:17:53+00:00"
},
{
"name": "ralouphie/getallheaders",
"version": "3.0.3",
"source": {
"type": "git",
"url": "https://github.com/ralouphie/getallheaders.git",
"reference": "120b605dfeb996808c31b6477290a714d356e822"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/ralouphie/getallheaders/zipball/120b605dfeb996808c31b6477290a714d356e822",
"reference": "120b605dfeb996808c31b6477290a714d356e822",
"shasum": ""
},
"require": {
"php": ">=5.6"
},
"require-dev": {
"php-coveralls/php-coveralls": "^2.1",
"phpunit/phpunit": "^5 || ^6.5"
},
"type": "library",
"autoload": {
"files": [
"src/getallheaders.php"
]
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Ralph Khattar",
"email": "ralph.khattar@gmail.com"
}
],
"description": "A polyfill for getallheaders.",
"support": {
"issues": "https://github.com/ralouphie/getallheaders/issues",
"source": "https://github.com/ralouphie/getallheaders/tree/develop"
},
"time": "2019-03-08T08:55:37+00:00"
},
{
"name": "slim/psr7",
"version": "1.7.0",
"source": {
"type": "git",
"url": "https://github.com/slimphp/Slim-Psr7.git",
"reference": "753e9646def5ff4db1a06e5cf4ef539bfd30f467"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/slimphp/Slim-Psr7/zipball/753e9646def5ff4db1a06e5cf4ef539bfd30f467",
"reference": "753e9646def5ff4db1a06e5cf4ef539bfd30f467",
"shasum": ""
},
"require": {
"fig/http-message-util": "^1.1.5",
"php": "^8.0",
"psr/http-factory": "^1.1",
"psr/http-message": "^1.0 || ^2.0",
"ralouphie/getallheaders": "^3.0",
"symfony/polyfill-php80": "^1.29"
},
"provide": {
"psr/http-factory-implementation": "^1.0",
"psr/http-message-implementation": "^1.0 || ^2.0"
},
"require-dev": {
"adriansuter/php-autoload-override": "^1.4",
"ext-json": "*",
"http-interop/http-factory-tests": "^1.1.0",
"php-http/psr7-integration-tests": "1.3.0",
"phpspec/prophecy": "^1.19",
"phpspec/prophecy-phpunit": "^2.2",
"phpstan/phpstan": "^1.11",
"phpunit/phpunit": "^9.6",
"squizlabs/php_codesniffer": "^3.10"
},
"type": "library",
"autoload": {
"psr-4": {
"Slim\\Psr7\\": "src"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Josh Lockhart",
"email": "hello@joshlockhart.com",
"homepage": "http://joshlockhart.com"
},
{
"name": "Andrew Smith",
"email": "a.smith@silentworks.co.uk",
"homepage": "http://silentworks.co.uk"
},
{
"name": "Rob Allen",
"email": "rob@akrabat.com",
"homepage": "http://akrabat.com"
},
{
"name": "Pierre Berube",
"email": "pierre@lgse.com",
"homepage": "http://www.lgse.com"
}
],
"description": "Strict PSR-7 implementation",
"homepage": "https://www.slimframework.com",
"keywords": [
"http",
"psr-7",
"psr7"
],
"support": {
"issues": "https://github.com/slimphp/Slim-Psr7/issues",
"source": "https://github.com/slimphp/Slim-Psr7/tree/1.7.0"
},
"time": "2024-06-08T14:48:17+00:00"
},
{
"name": "slim/slim",
"version": "4.14.0",
"source": {
"type": "git",
"url": "https://github.com/slimphp/Slim.git",
"reference": "5943393b88716eb9e82c4161caa956af63423913"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/slimphp/Slim/zipball/5943393b88716eb9e82c4161caa956af63423913",
"reference": "5943393b88716eb9e82c4161caa956af63423913",
"shasum": ""
},
"require": {
"ext-json": "*",
"nikic/fast-route": "^1.3",
"php": "^7.4 || ^8.0",
"psr/container": "^1.0 || ^2.0",
"psr/http-factory": "^1.1",
"psr/http-message": "^1.1 || ^2.0",
"psr/http-server-handler": "^1.0",
"psr/http-server-middleware": "^1.0",
"psr/log": "^1.1 || ^2.0 || ^3.0"
},
"require-dev": {
"adriansuter/php-autoload-override": "^1.4",
"ext-simplexml": "*",
"guzzlehttp/psr7": "^2.6",
"httpsoft/http-message": "^1.1",
"httpsoft/http-server-request": "^1.1",
"laminas/laminas-diactoros": "^2.17 || ^3",
"nyholm/psr7": "^1.8",
"nyholm/psr7-server": "^1.1",
"phpspec/prophecy": "^1.19",
"phpspec/prophecy-phpunit": "^2.1",
"phpstan/phpstan": "^1.11",
"phpunit/phpunit": "^9.6",
"slim/http": "^1.3",
"slim/psr7": "^1.6",
"squizlabs/php_codesniffer": "^3.10",
"vimeo/psalm": "^5.24"
},
"suggest": {
"ext-simplexml": "Needed to support XML format in BodyParsingMiddleware",
"ext-xml": "Needed to support XML format in BodyParsingMiddleware",
"php-di/php-di": "PHP-DI is the recommended container library to be used with Slim",
"slim/psr7": "Slim PSR-7 implementation. See https://www.slimframework.com/docs/v4/start/installation.html for more information."
},
"type": "library",
"autoload": {
"psr-4": {
"Slim\\": "Slim"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Josh Lockhart",
"email": "hello@joshlockhart.com",
"homepage": "https://joshlockhart.com"
},
{
"name": "Andrew Smith",
"email": "a.smith@silentworks.co.uk",
"homepage": "http://silentworks.co.uk"
},
{
"name": "Rob Allen",
"email": "rob@akrabat.com",
"homepage": "http://akrabat.com"
},
{
"name": "Pierre Berube",
"email": "pierre@lgse.com",
"homepage": "http://www.lgse.com"
},
{
"name": "Gabriel Manricks",
"email": "gmanricks@me.com",
"homepage": "http://gabrielmanricks.com"
}
],
"description": "Slim is a PHP micro framework that helps you quickly write simple yet powerful web applications and APIs",
"homepage": "https://www.slimframework.com",
"keywords": [
"api",
"framework",
"micro",
"router"
],
"support": {
"docs": "https://www.slimframework.com/docs/v4/",
"forum": "https://discourse.slimframework.com/",
"irc": "irc://irc.freenode.net:6667/slimphp",
"issues": "https://github.com/slimphp/Slim/issues",
"rss": "https://www.slimframework.com/blog/feed.rss",
"slack": "https://slimphp.slack.com/",
"source": "https://github.com/slimphp/Slim",
"wiki": "https://github.com/slimphp/Slim/wiki"
},
"funding": [
{
"url": "https://opencollective.com/slimphp",
"type": "open_collective"
},
{
"url": "https://tidelift.com/funding/github/packagist/slim/slim",
"type": "tidelift"
}
],
"time": "2024-06-13T08:54:48+00:00"
},
{
"name": "symfony/polyfill-php80",
"version": "v1.31.0",
"source": {
"type": "git",
"url": "https://github.com/symfony/polyfill-php80.git",
"reference": "60328e362d4c2c802a54fcbf04f9d3fb892b4cf8"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/symfony/polyfill-php80/zipball/60328e362d4c2c802a54fcbf04f9d3fb892b4cf8",
"reference": "60328e362d4c2c802a54fcbf04f9d3fb892b4cf8",
"shasum": ""
},
"require": {
"php": ">=7.2"
},
"type": "library",
"extra": {
"thanks": {
"url": "https://github.com/symfony/polyfill",
"name": "symfony/polyfill"
}
},
"autoload": {
"files": [
"bootstrap.php"
],
"psr-4": {
"Symfony\\Polyfill\\Php80\\": ""
},
"classmap": [
"Resources/stubs"
]
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Ion Bazan",
"email": "ion.bazan@gmail.com"
},
{
"name": "Nicolas Grekas",
"email": "p@tchwork.com"
},
{
"name": "Symfony Community",
"homepage": "https://symfony.com/contributors"
}
],
"description": "Symfony polyfill backporting some PHP 8.0+ features to lower PHP versions",
"homepage": "https://symfony.com",
"keywords": [
"compatibility",
"polyfill",
"portable",
"shim"
],
"support": {
"source": "https://github.com/symfony/polyfill-php80/tree/v1.31.0"
},
"funding": [
{
"url": "https://symfony.com/sponsor",
"type": "custom"
},
{
"url": "https://github.com/fabpot",
"type": "github"
},
{
"url": "https://tidelift.com/funding/github/packagist/symfony/symfony",
"type": "tidelift"
}
],
"time": "2024-09-09T11:45:10+00:00"
}
],
"packages-dev": [],
"aliases": [],
"minimum-stability": "stable",
"stability-flags": {},
"prefer-stable": false,
"prefer-lowest": false,
"platform": {},
"platform-dev": {},
"plugin-api-version": "2.6.0"
}

Binary file not shown.

@ -0,0 +1,171 @@
<?php
use Psr\Http\Message\ResponseInterface as Response;
use Psr\Http\Message\ServerRequestInterface as Request;
use Slim\Factory\AppFactory;
require __DIR__ . '/../vendor/autoload.php';
$app = AppFactory::create();
// Route de test
$app->get('/', function (Request $request, Response $response, $args) {
$response->getBody()->write("Hello world!");
return $response;
});
// Route pour uploader une image
$app->post('/upload', function (Request $request, Response $response) {
$directory = __DIR__ . '/../uploads';
if (!is_dir($directory)) {
mkdir($directory, 0777, true);
}
$uploadedFiles = $request->getUploadedFiles();
if (empty($uploadedFiles['image'])) {
$response->getBody()->write(json_encode(["error" => "Aucune image reçue"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
$image = $uploadedFiles['image'];
$allowedTypes = ['image/jpeg', 'image/png', 'image/jpg'];
if (!in_array($image->getClientMediaType(), $allowedTypes)) {
$response->getBody()->write(json_encode(["error" => "Format non supporté"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
$filename = uniqid('img_') . '.' . pathinfo($image->getClientFilename(), PATHINFO_EXTENSION);
$image->moveTo($directory . '/' . $filename);
$response->getBody()->write(json_encode([
"message" => "Image uploadée avec succès",
"image_path" => "uploads/$filename"
]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(200);
});
// Route pour prédire l'âge
$app->get('/predict-age', function (Request $request, Response $response) {
$queryParams = $request->getQueryParams();
if (!isset($queryParams['image_path'])) {
$response->getBody()->write(json_encode(["error" => "Chemin de l'image requis"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
$imagePath = __DIR__ . '/../uploads/' . $queryParams['image_path'];
if (!file_exists($imagePath)) {
$response->getBody()->write(json_encode(["error" => "Image non trouvée"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(404);
}
exec("python3 ../scripts/predict_age.py $imagePath",$output);
$predictedAge = $output[1];
if ($predictedAge === null) {
$response->getBody()->write(json_encode(["error" => "Erreur lors de la prédiction"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(500);
}
$response->getBody()->write(json_encode([
"message" => "Prédiction réussie",
"predicted_age" => $predictedAge
]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(200);
});
// Route pour appliquer l'effet de vieillissement
$app->get('/apply-aging', function (Request $request, Response $response) {
$queryParams = $request->getQueryParams();
if (!isset($queryParams['image_path'])) {
$response->getBody()->write(json_encode(["error" => "Chemin de l'image requis"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
$imagePath = __DIR__ . '/../uploads/' . $queryParams['image_path'];
if (!file_exists($imagePath)) {
$response->getBody()->write(json_encode(["error" => "Image non trouvée"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
exec("python3 ../scripts/apply_agging.py $imagePath", $output);
$agedImagePath = $output[1];
if ($agedImagePath === null || isset($agedImagePath['error'])) {
$response->getBody()->write(json_encode(["error" => "Erreur lors de l'application du vieillissement"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(500);
}
if (!file_exists($agedImagePath)) {
$response->getBody()->write(json_encode(["error" => "Image vieillie non trouvée"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(500);
}
$image = file_get_contents($agedImagePath);
$response = $response->withHeader('Content-Type', 'image/jpeg');
$response->getBody()->write($image);
return $response;
});
// Route pour lister toutes les images
$app->get('/images', function (Request $request, Response $response) {
$directory = __DIR__ . '/../uploads';
$files = array_values(array_diff(scandir($directory), ['.', '..']));
$response->getBody()->write(json_encode(["images" => $files]));
return $response->withHeader('Content-Type', 'application/json');
});
// Route pour lire une image
$app->get('/image', function (Request $request, Response $response, array $args) {
$queryParams = $request->getQueryParams();
if (!isset($queryParams['image_path'])) {
$response->getBody()->write(json_encode(["error" => "Chemin de l'image requis"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
$imagePath = __DIR__ . '/../uploads/' . $queryParams['image_path'];
if (!file_exists($imagePath)) {
$response->getBody()->write(json_encode(["error" => "Image non trouvée"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(404);
}
$image = file_get_contents($imagePath);
$response = $response->withHeader('Content-Type', 'image/jpeg');
$response->getBody()->write($image);
return $response;
});
// Route pour supprimer une image
$app->delete('/image', function (Request $request, Response $response, array $args) {
$queryParams = $request->getQueryParams();
if (!isset($queryParams['image_path'])) {
$response->getBody()->write(json_encode(["error" => "Chemin de l'image requis"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(400);
}
$imagePath = __DIR__ . '/../uploads/' . $queryParams['image_path'];
if (!file_exists($imagePath)) {
$response->getBody()->write(json_encode(["error" => "Image non trouvée"]));
return $response->withHeader('Content-Type', 'application/json')->withStatus(404);
}
unlink($imagePath);
$response->getBody()->write(json_encode(["message" => "Image supprimée avec succès"]));
return $response->withHeader('Content-Type', 'application/json');
});
$app->run();

@ -0,0 +1,45 @@
import sys
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, load_img, array_to_img
import json
import tensorflow as tf
import os
from tensorflow.keras.losses import MeanSquaredError
def load_and_preprocess_image(image_path):
img = load_img(image_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
img_array = np.expand_dims(img_array, axis=0)
return img_array
def apply_aging(model, image_path):
img_array = load_and_preprocess_image(image_path)
aged_img_array = model.predict(img_array)
aged_img = array_to_img(aged_img_array[0])
return aged_img
if __name__ == "__main__":
if len(sys.argv) != 2:
print(json.dumps({"error": "Usage: python apply_aging.py <image_path>"}))
sys.exit(1)
image_path = sys.argv[1]
model_path = "../../../face_aging_autoencoder.h5"
try:
# Register the custom object
custom_objects = {'mse': MeanSquaredError()}
# Load the model with custom objects
model = load_model(model_path, custom_objects=custom_objects)
aged_img = apply_aging(model, image_path)
# Save the aged image
output_path = os.path.join(os.path.dirname(image_path), "aged_" + os.path.basename(image_path))
aged_img.save(output_path)
print(output_path)
except Exception as e:
print(json.dumps({"error": str(e)}))
sys.exit(1)

@ -0,0 +1,34 @@
import sys
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.losses import MeanSquaredError
import json
def load_and_preprocess_image(image_path):
img = load_img(image_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
img_array = np.expand_dims(img_array, axis=0)
return img_array
def predict_age(model, image_path):
img_array = load_and_preprocess_image(image_path)
predicted_age = model.predict(img_array)
return predicted_age[0][0]
if __name__ == "__main__":
if len(sys.argv) != 2:
print(json.dumps({"error": "Usage: python predict_age.py <image_path>"}))
sys.exit(1)
image_path = sys.argv[1]
model_path = "../../../face_aging_model.h5"
try:
# Load the model with custom objects
model = load_model(model_path, custom_objects={'mse': MeanSquaredError()})
predicted_age = predict_age(model, image_path)
print(str(predicted_age))
except Exception as e:
print(json.dumps({"error": str(e)}))
sys.exit(1)

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 563 KiB

@ -1,43 +0,0 @@
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.losses import MeanSquaredError
def generate_aged_image(model, noise_dim):
# Générer un vecteur de bruit
noise = np.random.normal(0, 1, (1, noise_dim))
# Générer l'image vieillie
generated_img = model.predict(noise)
print(generated_img)
print(type(generated_img[0][0]))
print(f"Shape of generated image: {generated_img.shape}")
print(f"Generated image values (min, max): {generated_img.min()}, {generated_img.max()}")
generated_img = np.clip(generated_img[0], 0, 1) # S'assurer que les valeurs sont entre 0 et 1
generated_img = (generated_img * 255).astype(np.uint8)
# Convertir en BGR pour OpenCV
generated_img_bgr = cv2.cvtColor(generated_img, cv2.COLOR_RGB2BGR)
return generated_img_bgr
def main():
# Chemin vers le modèle et l'image
model_path = "models/generator_epoch_7400.h5"
output_path = "visage_aged.jpg"
# Charger le modèle
model = load_model(model_path, custom_objects={'mse': MeanSquaredError()})
print("Modèle chargé avec succès !")
# Générer l'image vieillie
aged_image = generate_aged_image(model, noise_dim=100)
# Sauvegarder l'image résultante
cv2.imwrite(output_path, aged_image)
print(f"Image vieillie sauvegardée sous {output_path}")
if __name__ == "__main__":
main()

@ -6,40 +6,27 @@ import matplotlib.pyplot as plt
from tensorflow.keras.losses import MeanSquaredError from tensorflow.keras.losses import MeanSquaredError
def apply_aging_effect(model, image_path): def apply_aging_effect(model, image_path):
# Charger l'image img = load_img(image_path, target_size=(128, 128))
img = load_img(image_path, target_size=(512, 512)) # Augmenter la résolution des images
img_array = img_to_array(img) / 255.0 img_array = img_to_array(img) / 255.0
# Appliquer l'effet de vieillissement
predicted_img = model.predict(np.expand_dims(img_array, axis=0)) predicted_img = model.predict(np.expand_dims(img_array, axis=0))
predicted_img = np.clip(predicted_img[0], 0, 1)
print(predicted_img)
print(type(predicted_img[0][0]))
print(f"Shape of predicted image: {predicted_img.shape}")
print(f"Predicted image values (min, max): {predicted_img.min()}, {predicted_img.max()}")
predicted_img = np.clip(predicted_img[0], 0, 1) # S'assurer que les valeurs sont entre 0 et 1
predicted_img = (predicted_img * 255).astype(np.uint8) predicted_img = (predicted_img * 255).astype(np.uint8)
# Convertir en BGR pour OpenCV
predicted_img_bgr = cv2.cvtColor(predicted_img, cv2.COLOR_RGB2BGR) predicted_img_bgr = cv2.cvtColor(predicted_img, cv2.COLOR_RGB2BGR)
return predicted_img_bgr return predicted_img_bgr
def main(): def main():
# Chemin vers le modèle et l'image
model_path = "face_aging_autoencoder.h5" model_path = "face_aging_autoencoder.h5"
image_path = "visage.jpg" image_path = "visage.jpg"
output_path = "visage_aged.jpg" output_path = "visage_aged.jpg"
# Charger le modèle
model = load_model(model_path, custom_objects={'mse': MeanSquaredError()}) model = load_model(model_path, custom_objects={'mse': MeanSquaredError()})
print("Modèle chargé avec succès !") print("Modèle chargé avec succès !")
# Appliquer l'effet de vieillissement
aged_image = apply_aging_effect(model, image_path) aged_image = apply_aging_effect(model, image_path)
# Sauvegarder l'image résultante
cv2.imwrite(output_path, aged_image) cv2.imwrite(output_path, aged_image)
print(f"Image vieillie sauvegardée sous {output_path}") print(f"Image vieillie sauvegardée sous {output_path}")

@ -0,0 +1,33 @@
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.utils import get_custom_objects
def mse(y_true, y_pred):
return MeanSquaredError()(y_true, y_pred)
get_custom_objects().update({"mse": mse})
def predict_age(model, image_path):
img = load_img(image_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
img_array = np.expand_dims(img_array, axis=0)
predicted_age = model.predict(img_array)
return predicted_age[0][0]
def main():
model_path = "face_aging_model.h5"
image_path = "visage.jpg"
model = load_model(model_path, custom_objects={"mse": mse})
age = predict_age(model, image_path)
print(f"L'âge prédit est: {age}")
if __name__ == "__main__":
main()

@ -1,45 +0,0 @@
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, load_img
def apply_aging_effect(model, image_path):
# Charger l'image
img = load_img(image_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
# Appliquer l'effet de vieillissement
predicted_img = model.predict(np.expand_dims(img_array, axis=0))
print(predicted_img)
print(type(predicted_img[0][0]))
print(f"Shape of predicted image: {predicted_img.shape}")
print(f"Predicted image values (min, max): {predicted_img.min()}, {predicted_img.max()}")
predicted_img = np.clip(predicted_img[0], 0, 1) # S'assurer que les valeurs sont entre 0 et 1
predicted_img = (predicted_img * 255).astype(np.uint8)
# Convertir en BGR pour OpenCV
predicted_img_bgr = cv2.cvtColor(predicted_img, cv2.COLOR_RGB2BGR)
return predicted_img_bgr
def main():
# Chemin vers le modèle et l'image
model_path = "unet_face_aging_model.h5"
image_path = "visage.jpg"
output_path = "visage_aged.jpg"
# Charger le modèle
model = load_model(model_path)
print("Modèle chargé avec succès !")
# Appliquer l'effet de vieillissement
aged_image = apply_aging_effect(model, image_path)
# Sauvegarder l'image résultante
cv2.imwrite(output_path, aged_image)
print(f"Image vieillie sauvegardée sous {output_path}")
if __name__ == "__main__":
main()

@ -1,62 +0,0 @@
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Reshape, Conv2DTranspose
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.callbacks import Callback
import matplotlib.pyplot as plt
# Activer l'exécution immédiate
tf.config.run_functions_eagerly(True)
data_dir = "./UTKFace/part1/part1"
image_size = (128, 128)
def load_utkface_images(data_dir, image_size=(128, 128)):
images, ages = [], []
for file in os.listdir(data_dir):
if file.endswith(".jpg"):
age = int(file.split("_")[0]) # Age is the first part of filename
img = load_img(os.path.join(data_dir, file), target_size=image_size)
img = img_to_array(img) / 255.0 # Normalize images
images.append(img)
ages.append(age)
return np.array(images), np.array(ages)
X, y = load_utkface_images(data_dir)
class ImageLogger(Callback):
def __init__(self, autoencoder_model, sample_image, output_dir="progress_images"):
self.autoencoder_model = autoencoder_model
self.sample_image = sample_image
self.output_dir = output_dir
os.makedirs(output_dir, exist_ok=True)
def on_epoch_end(self, epoch, logs=None):
reconstructed_image = self.autoencoder_model.predict(np.expand_dims(self.sample_image, axis=0))[0]
reconstructed_image = (reconstructed_image * 255).astype(np.uint8)
output_path = os.path.join(self.output_dir, f"epoch_{epoch + 1}.png")
plt.imsave(output_path, reconstructed_image)
print(f"Image sauvegardée à l'époque {epoch + 1}")
# Charger le modèle sauvegardé
model_path = "face_aging_autoencoder.h5"
autoencoder = load_model(model_path, custom_objects={'mse': tf.keras.losses.MeanSquaredError()})
# Recompiler le modèle après le chargement
autoencoder.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
# Sélectionner une image d'exemple pour visualiser la progression
sample_image = X[0]
# Définir le nombre d'époques supplémentaires pour continuer l'entraînement
additional_epochs = 20
# Continuer l'entraînement avec le callback ImageLogger
autoencoder.fit(X, X, epochs=additional_epochs, batch_size=32, validation_split=0.2, callbacks=[ImageLogger(autoencoder, sample_image)])
# Sauvegarder le modèle mis à jour
autoencoder.save("face_aging_autoencoder_updated.h5")
print("Modèle mis à jour et sauvegardé avec succès !")

@ -0,0 +1,50 @@
import os
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from sklearn.model_selection import train_test_split
def load_utkface_dataset(dataset_path):
images = []
ages = []
for filename in os.listdir(dataset_path):
if filename.endswith(".jpg"):
age = int(filename.split("_")[0])
img_path = os.path.join(dataset_path, filename)
img = load_img(img_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
images.append(img_array)
ages.append(age)
return np.array(images), np.array(ages)
def build_model():
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(1)
])
model.compile(optimizer='adam', loss=MeanSquaredError(), metrics=['mae'])
return model
def train_model(dataset_path, model_path):
images, ages = load_utkface_dataset(dataset_path)
X_train, X_test, y_train, y_test = train_test_split(images, ages, test_size=0.2, random_state=42)
model = build_model()
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test))
model.save(model_path)
print(f"Model saved to {model_path}")
if __name__ == "__main__":
dataset_path = "./UTKFace/part1/part1"
model_path = "face_aging_model.h5"
train_model(dataset_path, model_path)

@ -13,7 +13,7 @@ from tensorflow.keras.models import load_model
from tensorflow.keras.losses import MeanSquaredError from tensorflow.keras.losses import MeanSquaredError
data_dir = "./UTKFace/part1/part1" data_dir = "./UTKFace/part1/part1"
image_size = (512, 512) # Augmenter la résolution des images image_size = (512, 512)
batch_size = 32 batch_size = 32
def create_dataframe(data_dir): def create_dataframe(data_dir):
@ -51,7 +51,7 @@ class ImageLogger(Callback):
print(f"Image sauvegardée à l'époque {epoch + 1}") print(f"Image sauvegardée à l'époque {epoch + 1}")
def build_autoencoder(): def build_autoencoder():
input_img = Input(shape=(512, 512, 3)) # Augmenter la résolution des images input_img = Input(shape=(512, 512, 3))
# Encoder # Encoder
x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img) x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
@ -62,7 +62,7 @@ def build_autoencoder():
latent = Dense(512, activation='relu')(x) latent = Dense(512, activation='relu')(x)
# Decoder # Decoder
x = Dense(64 * 64 * 256, activation='relu')(latent) # Ajuster la taille x = Dense(64 * 64 * 256, activation='relu')(latent)
x = Reshape((64, 64, 256))(x) x = Reshape((64, 64, 256))(x)
x = Conv2DTranspose(256, (3, 3), activation='relu', padding='same', strides=(2, 2))(x) x = Conv2DTranspose(256, (3, 3), activation='relu', padding='same', strides=(2, 2))(x)
x = Conv2DTranspose(128, (3, 3), activation='relu', padding='same', strides=(2, 2))(x) x = Conv2DTranspose(128, (3, 3), activation='relu', padding='same', strides=(2, 2))(x)
@ -76,28 +76,20 @@ def build_autoencoder():
autoencoder = build_autoencoder() autoencoder = build_autoencoder()
# Créer un DataFrame avec les chemins des images
df = create_dataframe(data_dir) df = create_dataframe(data_dir)
# Vérifier que le DataFrame ne contient pas de valeurs None
df = df.dropna() df = df.dropna()
# Vérifier que les fichiers existent et sont accessibles
df = df[df['filename'].apply(lambda x: os.path.exists(x))] df = df[df['filename'].apply(lambda x: os.path.exists(x))]
# Sélectionner une image d'exemple pour visualiser la progression
sample_image = load_img(df['filename'].iloc[0], target_size=image_size) sample_image = load_img(df['filename'].iloc[0], target_size=image_size)
sample_image = img_to_array(sample_image) / 255.0 sample_image = img_to_array(sample_image) / 255.0
# Réduire le nombre d'époques pour gagner du temps
epochs = 20 epochs = 20
# Utiliser le générateur de données pour l'entraînement
train_generator = data_generator(df, image_size=image_size, batch_size=batch_size) train_generator = data_generator(df, image_size=image_size, batch_size=batch_size)
# Entraîner le modèle avec le callback ImageLogger
autoencoder.fit(train_generator, epochs=epochs, callbacks=[ImageLogger(autoencoder, sample_image)]) autoencoder.fit(train_generator, epochs=epochs, callbacks=[ImageLogger(autoencoder, sample_image)])
# Sauvegarder le modèle
autoencoder.save("face_aging_autoencoder_2.h5") autoencoder.save("face_aging_autoencoder_2.h5")
print("Modèle entraîné et sauvegardé avec succès !") print("Modèle entraîné et sauvegardé avec succès !")

@ -1,109 +0,0 @@
import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
import os
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Parameters
BATCH_SIZE = 16
IMG_SIZE = (200, 200)
DATASET_PATH = "UTKFace/part1/part1"
EPOCHS = 50
# Data Loader
def load_image(image_path):
img = cv2.imread(image_path)
img = cv2.resize(img, IMG_SIZE)
img = img / 255.0 # Normalize
return img
def parse_age(filename):
try:
age = int(filename.split("_")[0])
return age
except:
return None
# Load Data
young_images, old_images = [], []
for filename in os.listdir(DATASET_PATH):
age = parse_age(filename)
if age is not None:
img_path = os.path.join(DATASET_PATH, filename)
img = load_image(img_path)
if age < 30:
young_images.append(img)
elif age > 50:
old_images.append(img)
young_images = np.array(young_images)
old_images = np.array(old_images)
# Define Generator
def build_generator():
model = models.Sequential([
layers.Input(shape=(200, 200, 3)),
layers.Conv2D(64, (3, 3), padding="same", activation="relu"),
layers.Conv2D(128, (3, 3), padding="same", activation="relu"),
layers.Conv2D(256, (3, 3), padding="same", activation="relu"),
layers.Conv2DTranspose(128, (3, 3), strides=1, padding="same", activation="relu"),
layers.Conv2DTranspose(64, (3, 3), strides=1, padding="same", activation="relu"),
layers.Conv2D(3, (3, 3), padding="same", activation="sigmoid") # Ensure output remains 200x200
])
return model
# Define Discriminator
def build_discriminator():
model = models.Sequential([
layers.Input(shape=(200, 200, 3)),
layers.Conv2D(64, (3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(128, (3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(1, activation="sigmoid")
])
return model
# Build and Compile Models
generator = build_generator()
discriminator = build_discriminator()
discriminator.compile(optimizer=tf.keras.optimizers.Adam(0.0002), loss='binary_crossentropy')
def aging_gan(generator, discriminator):
discriminator.trainable = False
gan_input = layers.Input(shape=(200, 200, 3))
generated_image = generator(gan_input)
validity = discriminator(generated_image)
model = models.Model(gan_input, validity)
model.compile(optimizer=tf.keras.optimizers.Adam(0.0002), loss='binary_crossentropy')
return model
# Train the GAN
gan = aging_gan(generator, discriminator)
def train_gan(epochs, batch_size):
for epoch in range(epochs):
idx = np.random.randint(0, young_images.shape[0], batch_size)
young_batch = young_images[idx]
idx = np.random.randint(0, old_images.shape[0], batch_size)
old_batch = old_images[idx]
# Generate aged faces
generated_old = generator.predict(young_batch)
# Train Discriminator
d_loss_real = discriminator.train_on_batch(old_batch, np.ones((batch_size, 1)))
d_loss_fake = discriminator.train_on_batch(generated_old, np.zeros((batch_size, 1)))
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Train Generator
g_loss = gan.train_on_batch(young_batch, np.ones((batch_size, 1)))
print(f"Epoch {epoch+1}/{epochs} - D Loss: {d_loss:.4f}, G Loss: {g_loss:.4f}")
train_gan(EPOCHS, BATCH_SIZE)
# Save Model
generator.save("aging_generator_model.h5")

@ -1,223 +0,0 @@
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, optimizers
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from glob import glob
import random
import cv2
# Define constants
IMAGE_SIZE = 200 # UTKFace images are commonly resized to 200x200
BATCH_SIZE = 10
EPOCHS = 10
LATENT_DIM = 100
BASE_DIR = "UTKFace/part3/part3" # Update this to your UTKFace dataset path
# Function to load and preprocess UTKFace dataset
def load_utkface_data(base_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE)):
# UTKFace filename format: [age]_[gender]_[race]_[date&time].jpg
images = []
ages = []
image_paths = glob(os.path.join(base_dir, "*.jpg"))
for img_path in image_paths:
try:
# Extract age from filename
filename = os.path.basename(img_path)
age = int(filename.split("_")[0])
# Load and preprocess image
img = load_img(img_path, target_size=target_size)
img_array = img_to_array(img)
img_array = (img_array - 127.5) / 127.5 # Normalize to [-1, 1]
images.append(img_array)
ages.append(age)
except Exception as e:
print(f"Error processing {img_path}: {e}")
continue
return np.array(images), np.array(ages)
# Load the dataset
print("Loading UTKFace dataset...")
images, ages = load_utkface_data(BASE_DIR)
print(f"Loaded {len(images)} images with age information")
# Create age-paired dataset for training
def create_age_pairs(images, ages, min_age_gap=10, max_age_gap=40):
young_images = []
old_images = []
# Group images by age
age_to_images = {}
for i, age in enumerate(ages):
if age not in age_to_images:
age_to_images[age] = []
age_to_images[age].append(i)
# Create pairs with specified age gap
for young_age in sorted(age_to_images.keys()):
for old_age in sorted(age_to_images.keys()):
age_gap = old_age - young_age
if min_age_gap <= age_gap <= max_age_gap:
for young_idx in age_to_images[young_age]:
young_images.append(images[young_idx])
# Randomly select an older face
old_idx = random.choice(age_to_images[old_age])
old_images.append(images[old_idx])
return np.array(young_images), np.array(old_images)
print("Creating age-paired training data...")
young_faces, old_faces = create_age_pairs(images, ages)
print(f"Created {len(young_faces)} young-old face pairs for training")
# Split into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(
young_faces, old_faces, test_size=0.2, random_state=42)
# Build the age progression model (using a modified U-Net architecture)
def build_age_progression_model():
# Encoder
inputs = layers.Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
# Encoder path
e1 = layers.Conv2D(64, (4, 4), strides=(2, 2), padding='same')(inputs)
e1 = layers.LeakyReLU(alpha=0.2)(e1)
e2 = layers.Conv2D(128, (4, 4), strides=(2, 2), padding='same')(e1)
e2 = layers.BatchNormalization()(e2)
e2 = layers.LeakyReLU(alpha=0.2)(e2)
e3 = layers.Conv2D(256, (4, 4), strides=(2, 2), padding='same')(e2)
e3 = layers.BatchNormalization()(e3)
e3 = layers.LeakyReLU(alpha=0.2)(e3)
e4 = layers.Conv2D(512, (4, 4), strides=(2, 2), padding='same')(e3)
e4 = layers.BatchNormalization()(e4)
e4 = layers.LeakyReLU(alpha=0.2)(e4)
e5 = layers.Conv2D(512, (4, 4), strides=(2, 2), padding='same')(e4)
e5 = layers.BatchNormalization()(e5)
e5 = layers.LeakyReLU(alpha=0.2)(e5)
# Decoder path with skip connections
d1 = layers.UpSampling2D(size=(2, 2))(e5)
d1 = layers.Conv2D(512, (3, 3), padding='same', activation='relu')(d1)
d1 = layers.BatchNormalization()(d1)
d1 = layers.Concatenate()([d1, e4])
d2 = layers.UpSampling2D(size=(2, 2))(d1)
d2 = layers.Conv2D(256, (3, 3), padding='same', activation='relu')(d2)
d2 = layers.BatchNormalization()(d2)
d2 = layers.Concatenate()([d2, e3])
d3 = layers.UpSampling2D(size=(2, 2))(d2)
d3 = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(d3)
d3 = layers.BatchNormalization()(d3)
d3 = layers.Concatenate()([d3, e2])
d4 = layers.UpSampling2D(size=(2, 2))(d3)
d4 = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(d4)
d4 = layers.BatchNormalization()(d4)
d4 = layers.Concatenate()([d4, e1])
d5 = layers.UpSampling2D(size=(2, 2))(d4)
outputs = layers.Conv2D(3, (3, 3), padding='same', activation='tanh')(d5)
model = models.Model(inputs=inputs, outputs=outputs)
return model
# Build and compile the model
print("Building age progression model...")
model = build_age_progression_model()
model.compile(
optimizer=optimizers.Adam(learning_rate=0.0002, beta_1=0.5),
loss='mae' # Mean Absolute Error for image generation
)
model.summary()
# Create a callback for saving the model
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='age_progression_model_best.h5',
save_best_only=True,
monitor='val_loss',
mode='min'
)
# Train the model
print("Training the age progression model...")
history = model.fit(
X_train, y_train,
validation_data=(X_val, y_val),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=[checkpoint_callback]
)
# Plot training history
plt.figure(figsize=(12, 4))
plt.subplot(1, 1, 1)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.title('Training and Validation Loss')
plt.savefig('training_history.png')
plt.close()
# Function to use the model for inference
def age_progress_face(model, face_image_path, output_path=None):
# Load and preprocess the input image
img = load_img(face_image_path, target_size=(IMAGE_SIZE, IMAGE_SIZE))
img_array = img_to_array(img)
img_array = (img_array - 127.5) / 127.5 # Normalize to [-1, 1]
img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
# Generate aged face
aged_face = model.predict(img_array)
# Convert back to uint8 format
aged_face = ((aged_face[0] * 127.5) + 127.5).astype(np.uint8)
# Save the result if output path is provided
if output_path:
cv2.imwrite(output_path, cv2.cvtColor(aged_face, cv2.COLOR_RGB2BGR))
return aged_face
# Example usage after training
print("Testing the model with a sample image...")
# Load the best model
best_model = models.load_model('age_progression_model_best.h5')
# Test with a sample image (you'll need to update this path)
sample_image_path = "sample_young_face.jpg" # Update with your test image path
output_path = "aged_face_result.jpg"
try:
aged_face = age_progress_face(best_model, sample_image_path, output_path)
print(f"Aged face saved to {output_path}")
# Display original and aged faces
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
axes[0].imshow(load_img(sample_image_path, target_size=(IMAGE_SIZE, IMAGE_SIZE)))
axes[0].set_title("Original Face")
axes[0].axis("off")
axes[1].imshow(aged_face)
axes[1].set_title("Aged Face")
axes[1].axis("off")
plt.tight_layout()
plt.savefig("comparison.png")
plt.show()
except Exception as e:
print(f"Error testing the model: {e}")
print("Training and testing complete!")

@ -1,230 +0,0 @@
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, optimizers
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from glob import glob
import random
import cv2
# Define constants
IMAGE_SIZE = 200 # UTKFace images are commonly resized to 200x200
BATCH_SIZE = 10
EPOCHS = 10
LATENT_DIM = 100
BASE_DIR = "UTKFace/part3/part3" # Update this to your UTKFace dataset path
# Function to load and preprocess UTKFace dataset
def load_utkface_data(base_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE)):
# UTKFace filename format: [age]_[gender]_[race]_[date&time].jpg
images = []
ages = []
image_paths = glob(os.path.join(base_dir, "*.jpg"))
for img_path in image_paths:
try:
# Extract age from filename
filename = os.path.basename(img_path)
age = int(filename.split("_")[0])
# Load and preprocess image
img = load_img(img_path, target_size=target_size)
img_array = img_to_array(img)
img_array = (img_array - 127.5) / 127.5 # Normalize to [-1, 1]
images.append(img_array)
ages.append(age)
except Exception as e:
print(f"Error processing {img_path}: {e}")
continue
return np.array(images), np.array(ages)
# Load the dataset
print("Loading UTKFace dataset...")
images, ages = load_utkface_data(BASE_DIR)
print(f"Loaded {len(images)} images with age information")
# Create age-paired dataset for training
def create_age_pairs(images, ages, min_age_gap=10, max_age_gap=40, batch_size=10000):
young_images = []
old_images = []
# Group images by age
age_to_images = {}
for i, age in enumerate(ages):
if age not in age_to_images:
age_to_images[age] = []
age_to_images[age].append(i)
# Create pairs with specified age gap
for young_age in sorted(age_to_images.keys()):
for old_age in sorted(age_to_images.keys()):
age_gap = old_age - young_age
if min_age_gap <= age_gap <= max_age_gap:
for young_idx in age_to_images[young_age]:
young_images.append(images[young_idx])
# Randomly select an older face
old_idx = random.choice(age_to_images[old_age])
old_images.append(images[old_idx])
# Process in batches to avoid memory issues
if len(young_images) >= batch_size:
yield np.array(young_images), np.array(old_images)
young_images, old_images = [], []
if young_images and old_images:
yield np.array(young_images), np.array(old_images)
# Usage
print("Creating age-paired training data...")
young_faces, old_faces = next(create_age_pairs(images, ages))
print(f"Created {len(young_faces)} young-old face pairs for training")
# Split into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(
young_faces, old_faces, test_size=0.2, random_state=42)
# Build the age progression model (using a modified U-Net architecture)
def build_age_progression_model():
# Encoder
inputs = layers.Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
# Encoder path
e1 = layers.Conv2D(64, (4, 4), strides=(2, 2), padding='same')(inputs)
e1 = layers.LeakyReLU(alpha=0.2)(e1)
e2 = layers.Conv2D(128, (4, 4), strides=(2, 2), padding='same')(e1)
e2 = layers.BatchNormalization()(e2)
e2 = layers.LeakyReLU(alpha=0.2)(e2)
e3 = layers.Conv2D(256, (4, 4), strides=(2, 2), padding='same')(e2)
e3 = layers.BatchNormalization()(e3)
e3 = layers.LeakyReLU(alpha=0.2)(e3)
e4 = layers.Conv2D(512, (4, 4), strides=(2, 2), padding='same')(e3)
e4 = layers.BatchNormalization()(e4)
e4 = layers.LeakyReLU(alpha=0.2)(e4)
e5 = layers.Conv2D(512, (4, 4), strides=(2, 2), padding='same')(e4)
e5 = layers.BatchNormalization()(e5)
e5 = layers.LeakyReLU(alpha=0.2)(e5)
# Decoder path with skip connections
d1 = layers.UpSampling2D(size=(2, 2))(e5)
d1 = layers.Conv2D(512, (3, 3), padding='same', activation='relu')(d1)
d1 = layers.BatchNormalization()(d1)
d1 = layers.Concatenate()([d1, e4])
d2 = layers.UpSampling2D(size=(2, 2))(d1)
d2 = layers.Conv2D(256, (3, 3), padding='same', activation='relu')(d2)
d2 = layers.BatchNormalization()(d2)
d2 = layers.Concatenate()([d2, e3])
d3 = layers.UpSampling2D(size=(2, 2))(d2)
d3 = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(d3)
d3 = layers.BatchNormalization()(d3)
d3 = layers.Concatenate()([d3, e2])
d4 = layers.UpSampling2D(size=(2, 2))(d3)
d4 = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(d4)
d4 = layers.BatchNormalization()(d4)
d4 = layers.Concatenate()([d4, e1])
d5 = layers.UpSampling2D(size=(2, 2))(d4)
outputs = layers.Conv2D(3, (3, 3), padding='same', activation='tanh')(d5)
model = models.Model(inputs=inputs, outputs=outputs)
return model
# Build and compile the model
print("Building age progression model...")
model = build_age_progression_model()
model.compile(
optimizer=optimizers.Adam(learning_rate=0.0002, beta_1=0.5),
loss='mae' # Mean Absolute Error for image generation
)
model.summary()
# Create a callback for saving the model
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='age_progression_model_best.h5',
save_best_only=True,
monitor='val_loss',
mode='min'
)
# Train the model
print("Training the age progression model...")
history = model.fit(
X_train, y_train,
validation_data=(X_val, y_val),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=[checkpoint_callback]
)
# Plot training history
plt.figure(figsize=(12, 4))
plt.subplot(1, 1, 1)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.title('Training and Validation Loss')
plt.savefig('training_history.png')
plt.close()
# Function to use the model for inference
def age_progress_face(model, face_image_path, output_path=None):
# Load and preprocess the input image
img = load_img(face_image_path, target_size=(IMAGE_SIZE, IMAGE_SIZE))
img_array = img_to_array(img)
img_array = (img_array - 127.5) / 127.5 # Normalize to [-1, 1]
img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
# Generate aged face
aged_face = model.predict(img_array)
# Convert back to uint8 format
aged_face = ((aged_face[0] * 127.5) + 127.5).astype(np.uint8)
# Save the result if output path is provided
if output_path:
cv2.imwrite(output_path, cv2.cvtColor(aged_face, cv2.COLOR_RGB2BGR))
return aged_face
# Example usage after training
print("Testing the model with a sample image...")
# Load the best model
best_model = models.load_model('age_progression_model_best.h5')
# Test with a sample image (you'll need to update this path)
sample_image_path = "sample_young_face.jpg" # Update with your test image path
output_path = "aged_face_result.jpg"
try:
aged_face = age_progress_face(best_model, sample_image_path, output_path)
print(f"Aged face saved to {output_path}")
# Display original and aged faces
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
axes[0].imshow(load_img(sample_image_path, target_size=(IMAGE_SIZE, IMAGE_SIZE)))
axes[0].set_title("Original Face")
axes[0].axis("off")
axes[1].imshow(aged_face)
axes[1].set_title("Aged Face")
axes[1].axis("off")
plt.tight_layout()
plt.savefig("comparison.png")
plt.show()
except Exception as e:
print(f"Error testing the model: {e}")
print("Training and testing complete!")

@ -1,330 +0,0 @@
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, Model, optimizers
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import gc
# Set memory growth for GPU to avoid OOM errors
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
# Configuration
IMAGE_SIZE = 200 # Resize images to this size (200x200)
BATCH_SIZE = 16 # Smaller batch size to save memory
BUFFER_SIZE = 1000
EPOCHS = 10
LEARNING_RATE = 0.0002
DATA_DIR = "UTKFace/part1/part1" # Replace with actual path to UTKFace dataset
# Create data generator to load and process images in batches
class FaceAgingDataGenerator(tf.keras.utils.Sequence):
def __init__(self, image_paths, batch_size=BATCH_SIZE, image_size=IMAGE_SIZE, shuffle=True):
self.image_paths = image_paths
self.batch_size = batch_size
self.image_size = image_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.image_paths) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_paths))
if self.shuffle:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
image_paths_temp = [self.image_paths[k] for k in indexes]
# Generate data
X, y = self.__data_generation(image_paths_temp)
return X, y
def __data_generation(self, image_paths_temp):
X = np.empty((self.batch_size, self.image_size, self.image_size, 3))
y = np.empty((self.batch_size, self.image_size, self.image_size, 3))
# Generate data
for i, path in enumerate(image_paths_temp):
# Extract age from filename (UTKFace format: [age]_[gender]_[race]_[date&time].jpg)
filename = os.path.basename(path)
age = int(filename.split('_')[0])
# Load image
img = load_img(path, target_size=(self.image_size, self.image_size))
img_array = img_to_array(img) / 255.0 # Normalize to [0,1]
X[i,] = img_array
# Create aged target - add 20 years
# For simplicity, we're just using the same image but for an older person
# In a real implementation, you would find images of the same person at different ages
# or use a more sophisticated transformation
target_age = min(age + 20, 100) # Cap at 100 years
# Find another image with similar characteristics but older age
# This is a simplified approach - in practice, you might want to use a GAN or more complex model
y[i,] = img_array # For now, just use the same image (will be replaced in training)
return X, y
def build_aging_model(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)):
"""Build a U-Net style model for face aging transformation"""
# Memory efficient model design
def downsample(filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(layers.Conv2D(filters, size, strides=2, padding='same',
kernel_initializer=initializer, use_bias=False))
if apply_batchnorm:
result.add(layers.BatchNormalization())
result.add(layers.LeakyReLU(0.2))
return result
def upsample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(layers.Conv2DTranspose(filters, size, strides=2, padding='same',
kernel_initializer=initializer, use_bias=False))
result.add(layers.BatchNormalization())
if apply_dropout:
result.add(layers.Dropout(0.5))
result.add(layers.ReLU())
return result
# Input
inputs = layers.Input(shape=input_shape)
# Downsampling
down_stack = [
downsample(64, 4, apply_batchnorm=False), # (bs, 100, 100, 64)
downsample(128, 4), # (bs, 50, 50, 128)
downsample(256, 4), # (bs, 25, 25, 256)
downsample(512, 4), # (bs, 12, 12, 512)
downsample(512, 4), # (bs, 6, 6, 512)
]
# Upsampling
up_stack = [
upsample(512, 4, apply_dropout=True), # (bs, 12, 12, 1024)
upsample(256, 4, apply_dropout=True), # (bs, 25, 25, 512)
upsample(128, 4), # (bs, 50, 50, 256)
upsample(64, 4), # (bs, 100, 100, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = layers.Conv2DTranspose(3, 4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation='tanh') # (bs, 200, 200, 3)
x = inputs
# Downsampling and save skip connections
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and connect with skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = layers.Concatenate()([x, skip])
x = last(x)
return Model(inputs=inputs, outputs=x)
def load_utkface_data(data_dir):
"""Load and prepare UTKFace dataset"""
image_paths = []
young_faces = []
older_faces = []
# Collect all valid image paths and organize by age
for filename in os.listdir(data_dir):
if filename.endswith('.jpg') or filename.endswith('.png'):
try:
# Extract age from filename
age = int(filename.split('_')[0])
full_path = os.path.join(data_dir, filename)
# Categorize images by age
if age < 40:
young_faces.append(full_path)
else:
older_faces.append(full_path)
image_paths.append(full_path)
except:
# Skip files with incorrect naming format
continue
print(f"Found {len(image_paths)} images total")
print(f"Young faces: {len(young_faces)}, Older faces: {len(older_faces)}")
# Split data into training and validation sets
train_paths, val_paths = train_test_split(image_paths, test_size=0.1, random_state=42)
return train_paths, val_paths
def train_model():
"""Main function to train the face aging model"""
# Load and prepare data
train_paths, val_paths = load_utkface_data(DATA_DIR)
# Create data generators
train_generator = FaceAgingDataGenerator(train_paths)
val_generator = FaceAgingDataGenerator(val_paths, shuffle=False)
# Build model
model = build_aging_model()
# Compile model
model.compile(
optimizer=optimizers.Adam(learning_rate=LEARNING_RATE),
loss='mean_absolute_error', # L1 loss works well for image-to-image translation
metrics=['mae']
)
# Model summary
model.summary()
# Memory cleanup before training
gc.collect()
tf.keras.backend.clear_session()
# Create TensorBoard callback
log_dir = "logs/face_aging"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# Model checkpoint callback
checkpoint_path = "checkpoints/face_aging_model.h5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=False,
monitor='val_loss',
mode='min',
save_best_only=True
)
# Train model with memory-efficient settings
history = model.fit(
train_generator,
validation_data=val_generator,
epochs=EPOCHS,
callbacks=[
tensorboard_callback,
model_checkpoint_callback,
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-6),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
]
)
# Save the final model
model.save('face_aging_final_model.h5')
# Plot training history
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.subplot(1, 2, 2)
plt.plot(history.history['mae'])
plt.plot(history.history['val_mae'])
plt.title('Model MAE')
plt.ylabel('MAE')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.savefig('training_history.png')
return model
def predict_aged_face(model, image_path):
"""Function to predict aged version of a face"""
# Load and preprocess the image
img = load_img(image_path, target_size=(IMAGE_SIZE, IMAGE_SIZE))
img_array = img_to_array(img) / 255.0 # Normalize to [0,1]
# Add batch dimension
img_batch = np.expand_dims(img_array, axis=0)
# Make prediction
aged_face = model.predict(img_batch)
# Convert back to 0-255 range and proper dtype
aged_face = ((aged_face[0] * 0.5 + 0.5) * 255).astype(np.uint8)
# Display original and aged face
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(img)
plt.title('Original Face')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(aged_face)
plt.title('Aged Face')
plt.axis('off')
plt.savefig('face_aging_result.png')
plt.show()
# Memory-efficient techniques for training
def apply_memory_optimizations():
"""Apply additional memory optimizations"""
# Set TensorFlow memory growth
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
# Limit TensorFlow to use only necessary GPU memory
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use a portion of GPU memory
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)] # Limit to 4GB
)
except RuntimeError as e:
print(e)
# Use mixed precision training for memory efficiency
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
# Main execution
if __name__ == "__main__":
# Apply memory optimizations
apply_memory_optimizations()
# Train the model
model = train_model()
# Test with a sample image
test_image_path = "visage.jpg" # Replace with an actual test image
predict_aged_face(model, test_image_path)

@ -1,137 +0,0 @@
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU, UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import numpy as np
import os
from tensorflow.keras.preprocessing.image import img_to_array, load_img
import cv2 # Importer la bibliothèque OpenCV
# Limitez la croissance de la mémoire GPU
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
# Charger les données UTKFace
def load_utkface_dataset(dataset_path):
images = []
ages = []
for file_name in os.listdir(dataset_path):
if file_name.endswith(".jpg"):
age = int(file_name.split("_")[0]) # Extraire l'âge du nom du fichier
img_path = os.path.join(dataset_path, file_name)
img = load_img(img_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
images.append(img_array)
ages.append(age)
return np.array(images), np.array(ages)
# Définir le générateur
def build_generator():
model = Sequential()
model.add(Dense(128 * 8 * 8, activation="relu", input_dim=100))
model.add(Reshape((8, 8, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(16, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(3, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
return model
# Définir le discriminateur
def build_discriminator():
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=(128, 128, 3), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
return model
# Compiler le modèle GAN
def build_gan(generator, discriminator):
discriminator.trainable = False
z = Input(shape=(100,))
img = generator(z)
valid = discriminator(img)
combined = Model(z, valid)
combined.compile(loss='binary_crossentropy', optimizer=Adam(0.0002, 0.5))
return combined
# Entraîner le modèle GAN
def train_gan(generator, discriminator, combined, epochs, batch_size, save_interval, dataset_path):
X_train, _ = load_utkface_dataset(dataset_path)
X_train = (X_train - 0.5) * 2 # Normaliser les images entre -1 et 1
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# Compiler le discriminateur
discriminator.compile(loss='binary_crossentropy', optimizer=Adam(0.0002, 0.5), metrics=['accuracy'])
for epoch in range(epochs):
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, 100))
gen_imgs = generator.predict(noise)
d_loss_real = discriminator.train_on_batch(imgs, valid)
d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss = combined.train_on_batch(noise, valid)
print(f"{epoch} [D loss: {d_loss[0]} | D accuracy: {100*d_loss[1]}] [G loss: {g_loss}]")
if epoch % save_interval == 0:
save_images(generator, epoch)
save_model(generator, epoch)
# Sauvegarder les images générées
def save_images(generator, epoch, output_dir="images"):
noise = np.random.normal(0, 1, (25, 100))
gen_imgs = generator.predict(noise)
gen_imgs = 0.5 * gen_imgs + 0.5 # Rescale images 0 - 1
os.makedirs(output_dir, exist_ok=True)
for i in range(25):
img = gen_imgs[i]
cv2.imwrite(f"{output_dir}/img_{epoch}_{i}.png", cv2.cvtColor(img * 255, cv2.COLOR_RGB2BGR))
# Sauvegarder le modèle générateur
def save_model(generator, epoch, output_dir="models"):
os.makedirs(output_dir, exist_ok=True)
generator.save(f"{output_dir}/generator_epoch_{epoch}.h5")
print(f"Modèle générateur sauvegardé à l'époque {epoch}")
if __name__ == "__main__":
dataset_path = "./UTKFace/part1/part1" # Modifier selon l'emplacement du dataset
generator = build_generator()
discriminator = build_discriminator()
combined = build_gan(generator, discriminator)
train_gan(generator, discriminator, combined, epochs=10000, batch_size=32, save_interval=200, dataset_path=dataset_path)

@ -1,106 +0,0 @@
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Concatenate, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from sklearn.model_selection import train_test_split
# Charger les données UTKFace
def load_utkface_dataset(dataset_path):
images = []
ages = []
for file_name in os.listdir(dataset_path):
if file_name.endswith(".jpg"):
age = int(file_name.split("_")[0]) # Extraire l'âge du nom du fichier
img_path = os.path.join(dataset_path, file_name)
img = load_img(img_path, target_size=(128, 128))
img_array = img_to_array(img) / 255.0
images.append(img_array)
ages.append(age)
return np.array(images), np.array(ages)
# Définir le modèle U-Net
def build_unet(input_shape):
inputs = Input(shape=input_shape)
# Encoder
c1 = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
c1 = Dropout(0.1)(c1)
c1 = Conv2D(64, (3, 3), activation='relu', padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)
c2 = Conv2D(128, (3, 3), activation='relu', padding='same')(p1)
c2 = Dropout(0.1)(c2)
c2 = Conv2D(128, (3, 3), activation='relu', padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)
c3 = Conv2D(256, (3, 3), activation='relu', padding='same')(p2)
c3 = Dropout(0.2)(c3)
c3 = Conv2D(256, (3, 3), activation='relu', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = Conv2D(512, (3, 3), activation='relu', padding='same')(p3)
c4 = Dropout(0.2)(c4)
c4 = Conv2D(512, (3, 3), activation='relu', padding='same')(c4)
p4 = MaxPooling2D((2, 2))(c4)
c5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(p4)
c5 = Dropout(0.3)(c5)
c5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(c5)
# Decoder
u6 = UpSampling2D((2, 2))(c5)
u6 = Conv2D(512, (2, 2), activation='relu', padding='same')(u6)
u6 = Concatenate()([u6, c4])
c6 = Conv2D(512, (3, 3), activation='relu', padding='same')(u6)
c6 = Dropout(0.2)(c6)
c6 = Conv2D(512, (3, 3), activation='relu', padding='same')(c6)
u7 = UpSampling2D((2, 2))(c6)
u7 = Conv2D(256, (2, 2), activation='relu', padding='same')(u7)
u7 = Concatenate()([u7, c3])
c7 = Conv2D(256, (3, 3), activation='relu', padding='same')(u7)
c7 = Dropout(0.2)(c7)
c7 = Conv2D(256, (3, 3), activation='relu', padding='same')(c7)
u8 = UpSampling2D((2, 2))(c7)
u8 = Conv2D(128, (2, 2), activation='relu', padding='same')(u8)
u8 = Concatenate()([u8, c2])
c8 = Conv2D(128, (3, 3), activation='relu', padding='same')(u8)
c8 = Dropout(0.1)(c8)
c8 = Conv2D(128, (3, 3), activation='relu', padding='same')(c8)
u9 = UpSampling2D((2, 2))(c8)
u9 = Conv2D(64, (2, 2), activation='relu', padding='same')(u9)
u9 = Concatenate()([u9, c1])
c9 = Conv2D(64, (3, 3), activation='relu', padding='same')(u9)
c9 = Dropout(0.1)(c9)
c9 = Conv2D(64, (3, 3), activation='relu', padding='same')(c9)
outputs = Conv2D(3, (1, 1), activation='sigmoid')(c9)
model = Model(inputs, outputs)
return model
# Entraîner le modèle U-Net
def train_unet(model, X_train, X_test, epochs, batch_size):
model.compile(optimizer=Adam(learning_rate=1e-4), loss='mean_squared_error', metrics=['accuracy'])
model.fit(X_train, X_train, validation_data=(X_test, X_test), epochs=epochs, batch_size=batch_size)
model.save("unet_face_aging_model.h5")
print("Modèle entraîné et sauvegardé avec succès !")
if __name__ == "__main__":
dataset_path = "./UTKFace/part1/part1" # Modifier selon l'emplacement du dataset
images, ages = load_utkface_dataset(dataset_path)
# Diviser les données en ensembles d'entraînement et de test
X_train, X_test, _, _ = train_test_split(images, ages, test_size=0.2, random_state=42)
# Construire le modèle U-Net
input_shape = (128, 128, 3)
unet_model = build_unet(input_shape)
# Entraîner le modèle U-Net
train_unet(unet_model, X_train, X_test, epochs=100, batch_size=32)

@ -1,49 +0,0 @@
import numpy as np
import cv2
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
# Load the trained generator model
generator = load_model("aging_generator_model.h5")
# Function to load and preprocess an image
def load_image(image_path, img_size=(200, 200)):
img = cv2.imread(image_path)
img = cv2.resize(img, img_size)
img = img / 255.0 # Normalize
return np.expand_dims(img, axis=0) # Add batch dimension
# Function to save and display the original and aged images
def save_and_display_images(original_img_path, aged_img, output_path):
original_img = cv2.imread(original_img_path)
aged_img = (aged_img[0] * 255).astype(np.uint8) # Denormalize
aged_img = cv2.cvtColor(aged_img, cv2.COLOR_RGB2BGR)
# Save the aged image
cv2.imwrite(output_path, aged_img)
# Display the original and aged images
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
axes[0].imshow(cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB))
axes[0].set_title("Original Image")
axes[0].axis("off")
axes[1].imshow(cv2.cvtColor(aged_img, cv2.COLOR_BGR2RGB))
axes[1].set_title("Aged Image")
axes[1].axis("off")
plt.tight_layout()
plt.show()
# Path to the input image
input_image_path = "visage.jpg" # Update with your input image path
output_image_path = "aged_face_result.jpg"
# Load and preprocess the input image
input_image = load_image(input_image_path)
# Generate the aged face
aged_face = generator.predict(input_image)
# Save and display the original and aged images
save_and_display_images(input_image_path, aged_face, output_image_path)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.3 KiB

After

Width:  |  Height:  |  Size: 6.0 KiB

Loading…
Cancel
Save