import os
import sys
import cv2
from PIL import Image
import h5py
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import time
from datetime import datetime
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.applications import inception_v3, xception, resnet50, vgg16, vgg19
from tensorflow.keras.applications import InceptionV3, Xception, ResNet50, VGG16, VGG19
from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D,Dense
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import LabelEncoder
from tensorflow.python.keras.utils import np_utils
import glob
import warnings
warnings.filterwarnings("ignore")
train_file_num = 0
valid_file_num = 0
test_file_num = 0
for f in os.listdir("./train"):
file = glob.glob(pathname="./train/" + f +"/*.jpg")
train_file_num += len(file)
print(train_file_num)
for f in os.listdir("./val"):
file = glob.glob(pathname="./val/" + f +"/*.jpg")
valid_file_num += len(file)
print(valid_file_num)
file = glob.glob(pathname="./test/*.jpg")
test_file_num += len(file)
print(test_file_num)
height = 299
train_labels = np.array([0] * 140 + [1] * 140 + [2] * 140 + [3] * 140 + [4] * 140 + [5] * 140 + [6] * 140 + [7] * 140 + [8] * 140 + [9] * 140)
valid_labels = np.array([0] * 20 + [1] * 20 + [2] * 20 + [3] * 20 + [4] * 20 + [5] * 20 + [6] * 20 + [7] * 20 + [8] * 20 + [9] * 20)
train_dummies = to_categorical(train_labels, 10)
valid_dummies = to_categorical(valid_labels, 10)
train = np.zeros((train_file_num, height, height, 3))
valid = np.zeros((valid_file_num, height, height, 3))
test = np.zeros((test_file_num, height, height, 3))
i = 0
for f1 in tqdm(os.listdir("./train")):
for f2 in os.listdir("./train/" + f1):
img = cv2.imread(f'./train/{f1}/{f2}')
img = cv2.resize(img, (height, height))
train[i] = img[:, :, ::-1]
i += 1
i = 0
for f1 in os.listdir("./val"):
for f2 in os.listdir("./val/" + f1):
img = cv2.imread(f'./val/{f1}/{f2}')
img = cv2.resize(img, (height, height))
valid[i] = img[:, :, ::-1]
i += 1
i = 0
for f1 in os.listdir("./test"):
img = cv2.imread(f'./test/{f1}')
img = cv2.resize(img, (height, height))
test[i] = img[:, :, ::-1]
i += 1
train = (train-125)/125
valid = (valid-125)/125
print('Training Data Size = %.2f GB' % (sys.getsizeof(train)/1024**3))
print('Testing Data Size = %.2f GB' % (sys.getsizeof(valid)/1024**3))
print('Testing Data Size = %.2f GB' % (sys.getsizeof(test)/1024**3))
def setup_to_transfer_learning(model,base_model):
for layer in base_model.layers:
layer.trainable = False
lr=0.005
decay=1e-6
momentum=0.9
sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
def setup_to_fine_tune(model,base_model):
GAP_LAYER = 10
for layer in base_model.layers[:GAP_LAYER+1]:
layer.trainable = False
for layer in base_model.layers[GAP_LAYER+1:]:
layer.trainable = True
model.compile(optimizer=Adagrad(lr=0.005),loss='categorical_crossentropy',metrics=['accuracy'])
base_model = InceptionV3(weights='imagenet',include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024,activation='relu')(x)
predictions = Dense(10,activation='softmax')(x)
model = Model(inputs=base_model.input,outputs=predictions)
setup_to_transfer_learning(model,base_model)
model.fit(x=valid,
y=valid_dummies,
batch_size=16,
epochs=10,
validation_data=(train, train_dummies))
pre = model.predict(train)
idx = []
for i in tqdm(range(len(pre))):
if pre[i].max() >= 0.5:
idx.append(i)
print(idx, len(idx), len(pre))
|