diff --git a/.python-version b/.python-version new file mode 100644 index 0000000..371cfe3 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.11.1 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/binary-compile.py b/binary-compile.py index 87f07e3..eccd19c 100644 --- a/binary-compile.py +++ b/binary-compile.py @@ -1,136 +1,34 @@ -import numpy as np -import matplotlib.pyplot as plt -import tensorflow as tf -import os +from tflite_model_maker import model_spec +from tflite_model_maker import image_classifier +from tflite_model_maker.config import ExportFormat +from tflite_model_maker.config import QuantizationConfig +from tflite_model_maker.image_classifier import DataLoader -DIR = "data/binary-classification" +DATA = "data/binary-classification/" +MODELS = ["mobilenet_v2", "efficientnet_lite3", "efficientnet_lite4"] -#Import Data -PATH = os.path.join(os.getcwd(), DIR) - -training_data = os.path.join(PATH, "train") -validation_data = os.path.join(PATH, "valid") -test_data = os.path.join(PATH, "test") - -BATCH_SIZE = 64 -IMG_SIZE = (224,224) - -#TODO: Import data from both directories, then resplit into test, train, and validation - -print(f"Train: {len(training_data)}\nValid: {len(validation_data)}\nTest: {len(test_data)}") +train_data = DataLoader.from_folder(DATA + "train") +test_data = DataLoader.from_folder(DATA + "test") +train_data, valid_data = train_data.split(0.8) -#View Data -plt.figure(figsize=(10,10)) -for images, labels in training_data.take(1): - for i in range(9): - ax = plt.subplot(3, 3, i+1) - plt.imshow(images[i].numpy().astype("uint8")) - plt.title(class_names[labels[i]]) - plt.axis("off") -plt.show() +for i in range(len(MODELS)): + model = image_classifier.create(train_data, + validation_data=valid_data, + model_spec=model_spec.get(MODELS[i]), + epochs=50, + learning_rate=0.0001, + dropout_rate=0.2, + batch_size=64, + use_augmentation=True) + model.summary() + loss, accuracy = model.evaluate(test_data) -#Init Prefetching -AUTOTUNE = tf.data.AUTOTUNE -training_data = training_data.prefetch(buffer_size=AUTOTUNE) -validation_data = validation_data.prefetch(buffer_size=AUTOTUNE) -test_data = test_data.prefetch(buffer_size=AUTOTUNE) + config = QuantizationConfig.for_float16() + filename = f"dermy-binary-classification-{MODELS[i]}.tflite" - -#Data Augmentation -data_augmentation = tf.keras.Sequential([ - tf.keras.layers.RandomFlip('horizontal'), - tf.keras.layers.RandomRotation(0.2) - ]) - -#Create Base Model From MobileNetV3 -IMG_SHAPE = IMG_SIZE + (3,) -base_model = tf.keras.applications.MobileNetV3Large( - input_shape=IMG_SHAPE, - include_top=False, - weights="imagenet" - ) - -image_batch, label_batch = next(iter(training_data)) -feature_batch = base_model(image_batch) - -base_model.trainable = False - - -#View Base Model -base_model.summary() - -#Add Classification Header - - -global_avg_layer = tf.keras.layers.GlobalAveragePooling2D() -feature_batch_avg = global_avg_layer(feature_batch) - -prediction_layer = tf.keras.layers.Dense(38, activation="softmax") -predication_batch = prediction_layer(feature_batch_avg) - -inputs = tf.keras.Input(shape=(160,160,3)) -x = data_augmentation(inputs) -x = base_model(x, training=False) -x = global_avg_layer(x) -x = tf.keras.layers.Dropout(0.2)(x) -outputs = prediction_layer(x) - -model = tf.keras.Model(inputs, outputs) - -#View Model with Classification Head -model.summary() - - -#Compile the Model -base_learning_rate = 0.0001 - -training_data = training_data.map(lambda x,y: (x, tf.one_hot(y,38))) -validation_data = validation_data.map(lambda x,y: (x, tf.one_hot(y,38))) -test_data = test_data.map(lambda x,y: (x, tf.one_hot(y,38))) - -optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate) -loss = tf.keras.losses.CategoricalCrossentropy() -metrics = [tf.keras.metrics.CategoricalAccuracy()] - - - -model.compile(optimizer=optimizer, loss=loss, metrics=metrics) - -#Train the Model -initial_epochs = 50 - -loss0, accuracy0 = model.evaluate(validation_data) - -print(f"initial loss: {loss0}") -print(f"initial accuracy: {accuracy0}") - -lr_schedule = tf.keras.callbacks.ReduceLROnPlateau( - monitor="val_loss", - factor=0.1, - patience=5, - min_lr=1e-6 - ) - -early_stopping = tf.keras.callbacks.EarlyStopping( - monitor="val_loss", - patience=10, - restore_best_weights=True - ) - -history = model.fit(training_data, - epochs=initial_epochs, - validation_data=validation_data, - callbacks=[lr_schedule, early_stopping]) - -model.save("crop-classifier-better-test.keras") - -#Evaluate Model -results = model.evaluate(validation_data) -print(f"Validation Loss: {results[0]}") -print(f"Validation Accuracy: {results[1]}") - -results = model.evaluate(test_data) -print(f"Test Loss: {results[0]}") -print(f"Test Accuracy: {results[1]}") + model.export(export_dir="./models", + export_format=ExportFormat.TFLITE, + tflite_filename=filename, + quantization_config=config) diff --git a/convert.py b/convert.py new file mode 100644 index 0000000..f2cd5f5 --- /dev/null +++ b/convert.py @@ -0,0 +1,17 @@ +import tensorflow as tf + +MODEL = "models/dermy-binary-classifier" + + +model = tf.keras.models.load_model(MODEL + ".keras") + +converter = tf.lite.TFLiteConverter.from_keras_model(model) + +converter.allow_custom_ops = True +converter.experimental_enable_resource_variables = True +converter.experimental_new_converter = True + +converted_model = converter.convert() + +with open(MODEL + ".tflite", "wb") as file: + file.write(converted_model) diff --git a/models/dermy-binary-classification-efficientnet_lite3.tflite b/models/dermy-binary-classification-efficientnet_lite3.tflite new file mode 100644 index 0000000..7c3a7ca Binary files /dev/null and b/models/dermy-binary-classification-efficientnet_lite3.tflite differ diff --git a/models/dermy-binary-classification-efficientnet_lite4.tflite b/models/dermy-binary-classification-efficientnet_lite4.tflite new file mode 100644 index 0000000..12228ae Binary files /dev/null and b/models/dermy-binary-classification-efficientnet_lite4.tflite differ diff --git a/models/dermy-binary-classification-mobilenet_v2.tflite b/models/dermy-binary-classification-mobilenet_v2.tflite new file mode 100644 index 0000000..4aa48ba Binary files /dev/null and b/models/dermy-binary-classification-mobilenet_v2.tflite differ diff --git a/models/dermy-binary-classification.tflite b/models/dermy-binary-classification.tflite new file mode 100644 index 0000000..5974366 Binary files /dev/null and b/models/dermy-binary-classification.tflite differ diff --git a/models/dermy-binary-classifier.keras b/models/dermy-binary-classifier.keras new file mode 100644 index 0000000..b45b063 Binary files /dev/null and b/models/dermy-binary-classifier.keras differ diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index cda0458..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,20 +0,0 @@ -[build-system] -requires = ["setuptools", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -name = "dermy-model" -version = "0.1.0" -description = "A Image Classification Model for classifying Moles" -authors = [{ name = "r0r5chach", email = "r0r-5chach.xyz@proton.me" }] -readme = "README.md" -license = { file = "LICENSE" } -dependencies = [ - "matplotlib", - "numpy", - "tensorflow", -] - -[too.setuptools] -packages = ["dermy-model"] -include_package_data = true diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..2c22694 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,29 @@ +tf-models-official==2.3.0 +# tensorflow-hub is to load Hub model. Specific version is required by TFJS. +tensorflow-hub>=0.7.0,<0.10; python_version < "3" +tensorflow-hub>=0.7.0,<0.13; python_version >= "3" +numpy>=1.17.3,<1.23.4 +pillow>=7.0.0 +sentencepiece>=0.1.91 +tensorflow-datasets>=2.1.0 +fire>=0.3.1 +flatbuffers>=2.0 +absl-py>=0.10.0 +urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 +tflite-support>=0.4.2 +tensorflowjs>=2.4.0,<3.19.0 +tensorflow>=2.6.0 +# b/196287362: This Numba + Librosa combination works for numpy 1.19, introduced +# by TensorFlow 2.6.0. +numba>=0.53 +librosa==0.8.1 +lxml>=4.6.1 +PyYAML>=5.1 +# The following are the requirements of efficientdet. +matplotlib>=3.0.3,<3.5.0 +six>=1.12.0 +tensorflow-addons>=0.11.2 +neural-structured-learning>=1.3.1 +tensorflow-model-optimization>=0.5 +Cython>=0.29.13 +scann==1.2.6