You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

110 lines
3.5 KiB

import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
import os
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Parameters
BATCH_SIZE = 16
IMG_SIZE = (200, 200)
DATASET_PATH = "UTKFace/part1/part1"
EPOCHS = 50
# Data Loader
def load_image(image_path):
img = cv2.imread(image_path)
img = cv2.resize(img, IMG_SIZE)
img = img / 255.0 # Normalize
return img
def parse_age(filename):
try:
age = int(filename.split("_")[0])
return age
except:
return None
# Load Data
young_images, old_images = [], []
for filename in os.listdir(DATASET_PATH):
age = parse_age(filename)
if age is not None:
img_path = os.path.join(DATASET_PATH, filename)
img = load_image(img_path)
if age < 30:
young_images.append(img)
elif age > 50:
old_images.append(img)
young_images = np.array(young_images)
old_images = np.array(old_images)
# Define Generator
def build_generator():
model = models.Sequential([
layers.Input(shape=(200, 200, 3)),
layers.Conv2D(64, (3, 3), padding="same", activation="relu"),
layers.Conv2D(128, (3, 3), padding="same", activation="relu"),
layers.Conv2D(256, (3, 3), padding="same", activation="relu"),
layers.Conv2DTranspose(128, (3, 3), strides=1, padding="same", activation="relu"),
layers.Conv2DTranspose(64, (3, 3), strides=1, padding="same", activation="relu"),
layers.Conv2D(3, (3, 3), padding="same", activation="sigmoid") # Ensure output remains 200x200
])
return model
# Define Discriminator
def build_discriminator():
model = models.Sequential([
layers.Input(shape=(200, 200, 3)),
layers.Conv2D(64, (3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(128, (3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(1, activation="sigmoid")
])
return model
# Build and Compile Models
generator = build_generator()
discriminator = build_discriminator()
discriminator.compile(optimizer=tf.keras.optimizers.Adam(0.0002), loss='binary_crossentropy')
def aging_gan(generator, discriminator):
discriminator.trainable = False
gan_input = layers.Input(shape=(200, 200, 3))
generated_image = generator(gan_input)
validity = discriminator(generated_image)
model = models.Model(gan_input, validity)
model.compile(optimizer=tf.keras.optimizers.Adam(0.0002), loss='binary_crossentropy')
return model
# Train the GAN
gan = aging_gan(generator, discriminator)
def train_gan(epochs, batch_size):
for epoch in range(epochs):
idx = np.random.randint(0, young_images.shape[0], batch_size)
young_batch = young_images[idx]
idx = np.random.randint(0, old_images.shape[0], batch_size)
old_batch = old_images[idx]
# Generate aged faces
generated_old = generator.predict(young_batch)
# Train Discriminator
d_loss_real = discriminator.train_on_batch(old_batch, np.ones((batch_size, 1)))
d_loss_fake = discriminator.train_on_batch(generated_old, np.zeros((batch_size, 1)))
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Train Generator
g_loss = gan.train_on_batch(young_batch, np.ones((batch_size, 1)))
print(f"Epoch {epoch+1}/{epochs} - D Loss: {d_loss:.4f}, G Loss: {g_loss:.4f}")
train_gan(EPOCHS, BATCH_SIZE)
# Save Model
generator.save("aging_generator_model.h5")