1. 라이브러리 Import
import cv2
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import imageio
from tensorflow.keras.preprocessing.image import ImageDataGenerator
2. Dataset 불러오기
url='https://github.com/******/******/blob/main/derm_dataset.zip?raw=true'
path_to_zip = keras.utils.get_file('dataset.zip', origin=url, extract=True, cache_dir='/content')
# 또는 아래와 같이 리눅스 명령어로 사용
# !mkdir "/content/dataset"
# !wget 'https://github.com/******/******/blob/main/dataset.zip?raw=true' -O "/content/dataset/dataset.zip"
# !unzip "/content/dataset/dataset.zip" -d "/content/dataset"
3. Train Set, Validation Set 지정(Diretory 통채로)
train_dir = "/content/datasets/train"
validation_dir = "/content/datasets/validation"
4.ImageDateGenerarator를 사용한 Augmentation
4-1. 객체 생성
Image Tensor를 Min-Max Scaling (0~1값)
Train Data Set을 Rotation, Shiftm, Zoom, Flip 등을 통한 Image Augmentation 수행
Validation Set은 증식없이 진행
train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1./255)
4-2. 객체 속성 지정
Train set, Valdation set 연결
이미지 사이즈, 배치사이즈, Class Mode 지정
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(128, 128),
batch_size=32,
shuffle=True,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(128, 128),
batch_size=32,
class_mode='categorical')
4-3. Generator로 부터 생성된 Image와 Label을 각각 x,y인자로 받음
x1, y1 = next(train_generator)
x2, y2 = next(validation_generator)
4-4. 결과 확인
print("train_generator 배치수:",len(train_generator))
print("validation_generator 배치수:",len(validation_generator))
print("train_generator (배치크기,x해상도,y해상도, Colorlayer):", x1.shape)
print("train_label (배치크기):", y1.shape)
print("validation_generator (배치크기,x해상도,y해상도, Colorlayer):", x2.shape)
print("validation_label (배치크기):", y2.shape)
stacked = np.hstack((x1[0], x1[1], x1[2], x1[3], x1[4]))
plt.figure(figsize=(20,4))
plt.imshow(stacked)
dict = {0:"acne", 1:"agespot",2:"flakyskin",3:"reddish"}
[dict[np.argmax(y1[i])] for i in range(0,5)]
5. 모델 설계
5-1. Backbone 모델 설정
#InceptionResNetV2 (Acc=0.8~0.9)
conv_base = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
#Exception (Acc=0.8~0.9)
#conv_base = tf.keras.applications.xception.Xception(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
#VGG19 (Acc=0.6~0.7)
#conv_base = tf.keras.applications.vgg19.VGG19(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
#MobileNetV2 (Acc=0.7~0.8)
#conv_base = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
5-2. 분류기(Classifier) 설계
model = keras.Sequential()
model.add(conv_base)
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dense(4, activation='softmax'))
5-3. Backbone 모델의 학습된 Parameter는 추가 학습 진행 안하도록 설정
conv_base.trainable = False
5-4. 설계한 모델확인
model.summary()
5-5. Optimizer 설정
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Nadam(learning_rate=0.001),
metrics=['accuracy'])
6. 모델 학습
학습 Data set 지정: train_generator
배치수 지정(steps_per_epoch)
훈련횟수 지정(epochs)
검증데이터셋 및 배치수 지정
history = model.fit(
train_generator,
steps_per_epoch=len(train_generator),
epochs=50,
validation_data=validation_generator,
validation_steps=len(validation_generator)
)
7. 학습결과 시각화
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
8. Prediction
# 테스트할 이미지 불러들여오기
url2 = "https://github.com/******/******/blob/main/predict.zip?raw=true"
path_to_zip = keras.utils.get_file('predict.zip', origin=url2, extract=True, cache_dir='/content',cache_subdir='datasets/test')
!rm /content/datasets/test/predict.zip
img_stack=[]
predict=[]
for i in range(1,5):
test = f"/content/datasets/test/00{i}.jpg"
img = imageio.imread(test)
img = img/255
res = model.predict(img.reshape(-1,128,128,3))
img_stack.append(img)
predict.append(dict[np.argmax(res)])
plt.figure(figsize=(20,4))
plt.imshow(np.hstack((img_stack[0:4])))
print(predict[0:4])