diff --git a/binary-compile.py b/binary-compile.py deleted file mode 100644 index 587a070..0000000 --- a/binary-compile.py +++ /dev/null @@ -1,147 +0,0 @@ -import tensorflow as tf -import os -from safetensors.numpy import save_file -DIR = "data/binary-classification" - -#Import Data -PATH = os.path.join(os.getcwd(), DIR) - -training_path = os.path.join(PATH, "train") -test_path = os.path.join(PATH, "test") - -BATCH_SIZE = 64 -IMG_SIZE = (224,224) - -#TODO: Import data from both directories, then resplit into test, train, and validation - -training_data = tf.keras.utils.image_dataset_from_directory(training_path, - shuffle=True, - batch_size=BATCH_SIZE, - image_size=IMG_SIZE, - validation_split=0.2, - subset="training", - seed=1234) -validation_data = tf.keras.utils.image_dataset_from_directory(training_path, - shuffle=True, - batch_size=BATCH_SIZE, - image_size=IMG_SIZE, - validation_split=0.2, - subset="validation", - seed=1234) -test_data = tf.keras.utils.image_dataset_from_directory(test_path, - shuffle=True, - batch_size=BATCH_SIZE, - image_size=IMG_SIZE) - - - - -#View Data - - - -#Init Prefetching -AUTOTUNE = tf.data.AUTOTUNE -training_data = training_data.prefetch(buffer_size=AUTOTUNE) -validation_data = validation_data.prefetch(buffer_size=AUTOTUNE) -test_data = test_data.prefetch(buffer_size=AUTOTUNE) - - -#Data Augmentation -data_augmentation = tf.keras.Sequential([ - tf.keras.layers.RandomFlip('horizontal'), - tf.keras.layers.RandomRotation(0.2) - ]) - -#Create Base Model From MobileNetV3 -IMG_SHAPE = IMG_SIZE + (3,) -base_model = tf.keras.applications.MobileNetV3Large( - input_shape=IMG_SHAPE, - include_top=False, - weights="imagenet" - ) - -image_batch, label_batch = next(iter(training_data)) -feature_batch = base_model(image_batch) - -base_model.trainable = False - - -#View Base Model -base_model.summary() - -#Add Classification Header - - -global_avg_layer = tf.keras.layers.GlobalAveragePooling2D() -feature_batch_avg = global_avg_layer(feature_batch) - -prediction_layer = tf.keras.layers.Dense(2, activation="softmax") -predication_batch = prediction_layer(feature_batch_avg) - -inputs = tf.keras.Input(shape=IMG_SHAPE) -x = data_augmentation(inputs) -x = base_model(x, training=False) -x = global_avg_layer(x) -x = tf.keras.layers.Dropout(0.2)(x) -outputs = prediction_layer(x) - -model = tf.keras.Model(inputs, outputs) - -#View Model with Classification Head -model.summary() - - -#Compile the Model -base_learning_rate = 0.0001 - -training_data = training_data.map(lambda x,y: (x, tf.one_hot(y,2))) -validation_data = validation_data.map(lambda x,y: (x, tf.one_hot(y,2))) -test_data = test_data.map(lambda x,y: (x, tf.one_hot(y,2))) - -optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate) -loss = tf.keras.losses.CategoricalCrossentropy() -metrics = [tf.keras.metrics.CategoricalAccuracy()] - -model.compile(optimizer=optimizer, loss=loss, metrics=metrics) - -#Train the Model -initial_epochs = 50 - -loss0, accuracy0 = model.evaluate(validation_data) - -print(f"initial loss: {loss0}") -print(f"initial accuracy: {accuracy0}") - -lr_schedule = tf.keras.callbacks.ReduceLROnPlateau( - monitor="val_loss", - factor=0.1, - patience=5, - min_lr=1e-6 - ) - -early_stopping = tf.keras.callbacks.EarlyStopping( - monitor="val_loss", - patience=10, - restore_best_weights=True - ) - -history = model.fit(training_data, - epochs=initial_epochs, - validation_data=validation_data, - callbacks=[lr_schedule, early_stopping]) - - -#Evaluate Model -results = model.evaluate(validation_data) -print(f"Validation Loss: {results[0]}") -print(f"Validation Accuracy: {results[1]}") - -results = model.evaluate(test_data) -print(f"Test Loss: {results[0]}") -print(f"Test Accuracy: {results[1]}") - -weights = model.get_weights() -weights_dict = {f"weight_{i}": w for i, w in enumerate(weights)} - -save_file(weights_dict, "models/mobilenet_v3.safetensors") diff --git a/compile.py b/compile.py new file mode 100644 index 0000000..a7d8151 --- /dev/null +++ b/compile.py @@ -0,0 +1,136 @@ +import tensorflow as tf +import tensorflow.keras as tfk +import os + + +# Parameters +DATA_DIR = os.path.join(os.getcwd(), "data") +IMG_SIZE = (224, 224) +IMG_SHAPE = IMG_SIZE + (3,) +BATCH_SIZE = 64 +AUTOTUNE = tf.data.AUTOTUNE +BASE_LEARNING_RATE = 0.0001 +CLASSES = 2 + + +# Import Data +training_path = os.path.join(DATA_DIR, "train") +test_path = os.path.join(DATA_DIR, "test") + + +training_data = tfk.utils.image_dataset_from_directory(training_path, + shuffle=True, + batch_size=BATCH_SIZE, + image_size=IMG_SIZE, + validation_split=0.2, + subset="training", + seed=1234) +validation_data = tfk.utils.image_dataset_from_directory(training_path, + shuffle=True, + batch_size=BATCH_SIZE, + image_size=IMG_SIZE, + validation_split=0.2, + subset="validation", + seed=1234) +test_data = tfk.utils.image_dataset_from_directory(test_path, + shuffle=True, + batch_size=BATCH_SIZE, + image_size=IMG_SIZE) + + +# Init Prefetching +training_data = training_data.prefetch(buffer_size=AUTOTUNE) +validation_data = validation_data.prefetch(buffer_size=AUTOTUNE) +test_data = test_data.prefetch(buffer_size=AUTOTUNE) + + +# Data Augmentation Layer +data_augmentation = tf.keras.Sequential([ + tf.keras.layers.RandomFlip('horizontal'), + tf.keras.layers.RandomRotation(0.2) + ]) + + +# Create Base Model From MobileNetV3Large +base_model = tf.keras.applications.MobileNetV3Large( + input_shape=IMG_SHAPE, + include_top=False, + weights="imagenet" + ) + +image_batch, label_batch = next(iter(training_data)) +feature_batch = base_model(image_batch) + +base_model.trainable = False + + +# Add Classification Header +global_avg_layer = tf.keras.layers.GlobalAveragePooling2D() +feature_batch_avg = global_avg_layer(feature_batch) + +prediction_layer = tf.keras.layers.Dense(CLASSES, activation="softmax") +predication_batch = prediction_layer(feature_batch_avg) + +inputs = tf.keras.Input(shape=IMG_SHAPE) +x = data_augmentation(inputs) +x = base_model(x, training=False) +x = global_avg_layer(x) +x = tf.keras.layers.Dropout(0.2)(x) +outputs = prediction_layer(x) + +model = tf.keras.Model(inputs, outputs) + + +# Hotencode Data +training_data = training_data.map(lambda x, y: (x, tf.one_hot(y, CLASSES))) +validation_data = validation_data.map(lambda x, y: (x, tf.one_hot(y, CLASSES))) +test_data = test_data.map(lambda x, y: (x, tf.one_hot(y, CLASSES))) + + +# Compile Model +optimizer = tf.keras.optimizers.Adam(learning_rate=BASE_LEARNING_RATE) +loss = tf.keras.losses.CategoricalCrossentropy() +metrics = [tf.keras.metrics.CategoricalAccuracy()] + +model.compile(optimizer=optimizer, loss=loss, metrics=metrics) + + +# Train the Model +initial_epochs = 50 + +loss0, accuracy0 = model.evaluate(validation_data) + +print(f"initial loss: {loss0}") +print(f"initial accuracy: {accuracy0}") + +lr_schedule = tf.keras.callbacks.ReduceLROnPlateau( + monitor="val_loss", + factor=0.1, + patience=5, + min_lr=1e-6 + ) + +early_stopping = tf.keras.callbacks.EarlyStopping( + monitor="val_loss", + patience=10, + restore_best_weights=True + ) + +history = model.fit(training_data, + epochs=initial_epochs, + validation_data=validation_data, + callbacks=[lr_schedule, early_stopping]) + + +# Evaluate Model +results = model.evaluate(validation_data) +print(f"Validation Loss: {results[0]}") +print(f"Validation Accuracy: {results[1]}") + +results = model.evaluate(test_data) +print(f"Test Loss: {results[0]}") +print(f"Test Accuracy: {results[1]}") + + +# Save Model +model.save("models/mobilenet_v3.keras") diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0f57144 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +tensorflow