Springboot editor 程序栈 人工智能 ajax validation constructor vue框架 vue入门 pmp视频教程 mysql倒序 mysql统计数量 ajax里面可以嵌套ajax吗 bootstrap颜色 pcm接口 oracle重命名表名 matlab插值函数 java上传图片 python转java docker启动容器 centos定时任务 destoon模板 python教程 javalabel java开发者 javarandom java多线程教程 linux中grep 远程登录linux java电子书下载 系统集成项目管理工程师教程 jb51 源计划卡特 神龙kms maya2008 苹果手机添加邮箱 maven项目打包 mac版matlab pr动态字幕 茸好珠
当前位置: 首页 > 学习教程  > 编程语言

高光谱图像论文代码复现(四)

2020/11/24 10:49:10 文章标签: 测试文章如有侵权请发送至邮箱809451989@qq.com投诉后文章立即删除

Residual Spectral–Spatial Attention Network for Hyperspectral Image Classification 论文分析:链接: 高光谱图像分类论文(八). import keras from tensorflow.keras.layers import Conv2D, Conv3D, Flatten, Dense, MaxPooling2D,Reshape, BatchNormalizatio…

Residual Spectral–Spatial Attention Network for Hyperspectral Image Classification

论文分析:链接: 高光谱图像分类论文(八).

import keras
from tensorflow.keras.layers import Conv2D, Conv3D, Flatten, Dense, MaxPooling2D,Reshape, BatchNormalization,GlobalAveragePooling2D,GlobalAveragePooling3D,Conv1D,GlobalAveragePooling1D
from tensorflow.keras.layers import Dropout, Input,Lambda,Conv2DTranspose,Activation,GlobalMaxPooling2D
from tensorflow.keras.models import Model,load_model
from tensorflow.keras.optimizers import Adam,RMSprop
from tensorflow.keras.callbacks import ModelCheckpoint
from keras.utils import np_utils

from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
import random
from operator import truediv
from plotly.offline import init_notebook_mode

import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.io as sio
import os
import spectral
import glob
from sklearn.preprocessing import MinMaxScaler
from keras_contrib.layers.normalization import instancenormalization
import tensorflow as tf
from tensorflow.keras.initializers import VarianceScaling
from keras.callbacks import EarlyStopping
from keras.optimizers.schedules import ExponentialDecay
initializer = tf.keras.initializers.HeNormal()
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)
def loadData(names):
    data_path=os.path.join(r'D:\\Program Files (x86)\\Anaconda\\jupyter_path','dataset')
    if names=='IP':
        data=sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected'))['indian_pines_corrected']
        labels=sio.loadmat(os.path.join(data_path,'Indian_pines_gt.mat'))['indian_pines_gt']
    elif names=='PU':
        data=sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
        labels=sio.loadmat(os.path.join(data_path,'PaviaU_gt.mat'))['paviaU_gt']
    elif names == 'SA':
        data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
        labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
    return data,labels
def pixel_select(X, Y):
    test_pixels = Y.copy()             #复制Y到test_pixels
    kinds = np.unique(Y).shape[0]-1     #np.unique(Y)=array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16],dtype=uint8) ,kinds=分类种类数
    #print(kinds)
    for i in range(kinds):             #i从0-15
        num = np.sum(Y == (i+1))       #计算每个类总共有多少样本 ,从Y=1到Y=16
#        print(num)
        #print(num)
        train_num=[14,419,265,69,149,219,9,144,5,288,733,175,65,376,119,31]
        temp1 = np.where(Y == (i + 1))  # 返回标签满足第i+1类的位置索引,第一次循环返回第一类的索引
        temp2 = random.sample(range(num), train_num[i])  # get random sequence,random.sample表示从某一序列中随机获取所需个数(train_num)的数并以片段的形式输出,,再这里将随机从每个种类中挑选train_num个样本
        for i in temp2:
            test_pixels[temp1[0][temp2], temp1[1][temp2]] = 0  # 除去训练集样本
    train_pixels = Y - test_pixels
    return train_pixels, test_pixels

# Get the cubes
def GetImageCubes(input_data, pixels_select):                         #这里的label_select就是train_pixels/test_pixels
    Band = input_data.shape[2]
    kind = np.unique(pixels_select).shape[0]-1                     #得到测试或者训练集中的种类数
    #print(kind)
    paddingdata = np.pad(input_data, ((10, 10), (10, 10), (0, 0)), "constant")  # 采用边缘值填充 [203, 203, 200]                 可以作为超参数
    paddinglabel = np.pad(pixels_select, ((10, 10), (10, 10) ), "constant")   
    #得到 label的 pixel坐标位置,去除背景元素
    pixel = np.where(paddinglabel != 0)         # pixel = np.where(label_select != 0)  ,这里的pixel是坐标数据,不是光谱数据
    #the number of batch
    num = np.sum(pixels_select != 0)             # 参与分类的像素点个数
    batch_out = np.zeros([num, 7, 7, Band])
    batch_label = np.zeros([num, kind])
    for i in range(num):                         # 得到每个像素点的batch,在这里为19*19的方块
        row_start = pixel[0][i] - 3
        row_end = pixel[0][i] + 4
        col_start = pixel[1][i] - 3
        col_end = pixel[1][i] + 4
        batch_out[i, :, :, :] = paddingdata[row_start:row_end, col_start:col_end, :]      # 得到一个数据块
        temp = (paddinglabel[pixel[0][i], pixel[1][i]]-1)    #temp = (label_selct[pixel[0][i],pixel[1][i]]-1)
        batch_label[i, temp] = 1                             # 独热编码,并且是从零开始的
    #修改合适三维卷积输入维度 [depth height weight]
    #batch_out = batch_out.swapaxes(1, 3)
    #batch_out = batch_out[:, :, :, :, np.newaxis]           # np.newaxis:增加维度
    return batch_out, batch_label
X,Y=loadData('IP')
X.shape,Y.shape
((145, 145, 200), (145, 145))
train_pixels,test_pixels=pixel_select(X,Y)
X_train,Y_train=GetImageCubes(X,train_pixels)
X_train.shape,Y_train.shape
((3080, 7, 7, 200), (3080, 16))
X_test,Y_test=GetImageCubes(X,test_pixels)
X_test.shape,Y_test.shape
((7169, 7, 7, 200), (7169, 16))


X_,Y_=creatCube(X,Y,windowsize=7)
X_.shape,Y_.shape
((10249, 7, 7, 200), (10249,))
Y_=np_utils.to_categorical(Y_)
X_train,X_test,Y_train,Y_test=splitTrainTest(X_,Y_,Ratio=0.9)
X_train.shape,X_test.shape,Y_train.shape,Y_test.shape
((1024, 7, 7, 200), (9225, 7, 7, 200), (1024, 16), (9225, 16))
input_layer=Input((7,7,200))
def spectral_attention_input(input_layer):
    Globalaver_layer1=GlobalAveragePooling2D()(input_layer)
    Globalmax_layer1=GlobalMaxPooling2D()(input_layer)
    sharelayer1=Dense(units=25,activation='relu')
    sharelayer2=Dense(units=200,activation='sigmoid')
    Dense1_aver=sharelayer1(Globalaver_layer1)
    Dense2_aver=sharelayer2(Dense1_aver)
    Dense1_max=sharelayer1(Globalmax_layer1)
    Dense2_max=sharelayer2(Dense1_max)
    Fuse2=Dense2_max+Dense2_aver
    Fuse2=Reshape((1,1,200))(Fuse2)
    output_layer=Fuse2*input_layer
    output_layer=Activation('relu')(output_layer)
    return output_layer
def spatial_attention_input(input_layer):
    Globalaver_layer1=tf.reduce_mean(input_layer,axis=-1)
    Globalmax_layer1=tf.reduce_max(input_layer,axis=-1)
    sa_avg=Reshape((7,7,1))(Globalaver_layer1)     #(17,17,1)
    sa_max=Reshape((7,7,1))(Globalmax_layer1)     #(17,17,1)
    Fuse1=tf.concat([sa_avg,sa_max],axis=-1)
    Conv=Conv2D(filters=200,kernel_size=(3,3),strides=(1,1),padding='same',activation='sigmoid',use_bias=False,kernel_initializer=initializer)(Fuse1)
    BN1=BatchNormalization(axis=-1)(Conv)
    output_layer=BN1*input_layer
    output_layer=Activation('relu')(output_layer)
    return output_layer
def spectral_attention(input_layer):
    Globalaver_layer1=GlobalAveragePooling2D()(input_layer)
    Globalmax_layer1=GlobalMaxPooling2D()(input_layer)
    sharelayer1=Dense(units=4,activation='relu')
    sharelayer2=Dense(units=32,activation='sigmoid')
    Dense1_aver=sharelayer1(Globalaver_layer1)
    Dense2_aver=sharelayer2(Dense1_aver)
    Dense1_max=sharelayer1(Globalmax_layer1)
    Dense2_max=sharelayer2(Dense1_max)
    Fuse2=Dense2_max+Dense2_aver
    Fuse2=Reshape((1,1,32))(Fuse2)
    output_layer=Fuse2*input_layer
    output_layer=Activation('relu')(output_layer)
    return output_layer
    
def spatial_attention(input_layer):
    Globalaver_layer1=tf.reduce_mean(input_layer,axis=-1)
    Globalmax_layer1=tf.reduce_max(input_layer,axis=-1)
    sa_avg=Reshape((7,7,1))(Globalaver_layer1)     #(17,17,1)
    sa_max=Reshape((7,7,1))(Globalmax_layer1)     #(17,17,1)
    Fuse1=tf.concat([sa_avg,sa_max],axis=-1)
    Conv=Conv2D(filters=32,kernel_size=(3,3),strides=(1,1),padding='same',activation='sigmoid',use_bias=False,kernel_initializer=initializer)(Fuse1)
    BN1=BatchNormalization(axis=-1)(Conv)
    output_layer=BN1*input_layer
    output_layer=Activation('relu')(output_layer)
    return output_layer
def ssa(input_layer):
    layer1=spectral_attention(input_layer)
    layer2=spatial_attention(layer1)
    return layer2
def ssa_input(input_layer):
    layer1=spectral_attention_input(input_layer)
    layer2=spatial_attention_input(layer1)
    return layer2
def RSSA(input_layer):
    conv1=Conv2D(filters=32,kernel_size=(3,3),padding='same',strides=(1,1),activation='relu',kernel_initializer=initializer,bias_initializer=initializer)(input_layer)
    BN1=BatchNormalization()(conv1)
    conv2=Conv2D(filters=32,kernel_size=(3,3),padding='same',strides=(1,1),kernel_initializer=initializer,bias_initializer=initializer)(BN1)
    BN2=BatchNormalization(axis=-1)(conv2)
    atten=ssa(BN2)
    Fuse1=BN2*atten
    Fuse2=Fuse1+input_layer
    Act=Activation('relu')(Fuse2)
    return Act
def net(input_layer):
    layer1=ssa_input(input_layer)
    conv1=Conv2D(filters=32,kernel_size=(3,3),padding='same',strides=(1,1),activation='relu',kernel_initializer=initializer,bias_initializer=initializer)(layer1)
    BN1=BatchNormalization(axis=-1)(conv1)
    layer1=RSSA(BN1)
    layer2=RSSA(layer1)
    layer3=RSSA(layer2)
    avg1=GlobalAveragePooling2D()(layer3)
    output=Dense(units=16,activation='softmax')(avg1)
    return output
output_layer=net(input_layer)
model=Model(inputs=input_layer,outputs=output_layer)
model.summary()
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 7, 7, 200)]  0                                            
__________________________________________________________________________________________________
global_max_pooling2d (GlobalMax (None, 200)          0           input_1[0][0]                    
__________________________________________________________________________________________________
global_average_pooling2d (Globa (None, 200)          0           input_1[0][0]                    
__________________________________________________________________________________________________
dense (Dense)                   (None, 25)           5025        global_average_pooling2d[0][0]   
                                                                 global_max_pooling2d[0][0]       
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 200)          5200        dense[0][0]                      
                                                                 dense[1][0]                      
__________________________________________________________________________________________________
tf_op_layer_AddV2 (TensorFlowOp [(None, 200)]        0           dense_1[1][0]                    
                                                                 dense_1[0][0]                    
__________________________________________________________________________________________________
reshape (Reshape)               (None, 1, 1, 200)    0           tf_op_layer_AddV2[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mul (TensorFlowOpLa [(None, 7, 7, 200)]  0           reshape[0][0]                    
                                                                 input_1[0][0]                    
__________________________________________________________________________________________________
activation (Activation)         (None, 7, 7, 200)    0           tf_op_layer_Mul[0][0]            
__________________________________________________________________________________________________
tf_op_layer_Mean (TensorFlowOpL [(None, 7, 7)]       0           activation[0][0]                 
__________________________________________________________________________________________________
tf_op_layer_Max (TensorFlowOpLa [(None, 7, 7)]       0           activation[0][0]                 
__________________________________________________________________________________________________
reshape_1 (Reshape)             (None, 7, 7, 1)      0           tf_op_layer_Mean[0][0]           
__________________________________________________________________________________________________
reshape_2 (Reshape)             (None, 7, 7, 1)      0           tf_op_layer_Max[0][0]            
__________________________________________________________________________________________________
tf_op_layer_concat (TensorFlowO [(None, 7, 7, 2)]    0           reshape_1[0][0]                  
                                                                 reshape_2[0][0]                  
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 7, 7, 200)    3600        tf_op_layer_concat[0][0]         
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 7, 7, 200)    800         conv2d[0][0]                     
__________________________________________________________________________________________________
tf_op_layer_Mul_1 (TensorFlowOp [(None, 7, 7, 200)]  0           batch_normalization[0][0]        
                                                                 activation[0][0]                 
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 7, 7, 200)    0           tf_op_layer_Mul_1[0][0]          
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 7, 7, 32)     57632       activation_1[0][0]               
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 7, 7, 32)     128         conv2d_1[0][0]                   
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 7, 7, 32)     9248        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 7, 7, 32)     128         conv2d_2[0][0]                   
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 7, 7, 32)     9248        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 7, 7, 32)     128         conv2d_3[0][0]                   
__________________________________________________________________________________________________
global_max_pooling2d_1 (GlobalM (None, 32)           0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 32)           0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 4)            132         global_average_pooling2d_1[0][0] 
                                                                 global_max_pooling2d_1[0][0]     
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 32)           160         dense_2[0][0]                    
                                                                 dense_2[1][0]                    
__________________________________________________________________________________________________
tf_op_layer_AddV2_1 (TensorFlow [(None, 32)]         0           dense_3[1][0]                    
                                                                 dense_3[0][0]                    
__________________________________________________________________________________________________
reshape_3 (Reshape)             (None, 1, 1, 32)     0           tf_op_layer_AddV2_1[0][0]        
__________________________________________________________________________________________________
tf_op_layer_Mul_2 (TensorFlowOp [(None, 7, 7, 32)]   0           reshape_3[0][0]                  
                                                                 batch_normalization_3[0][0]      
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_Mul_2[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mean_1 (TensorFlowO [(None, 7, 7)]       0           activation_2[0][0]               
__________________________________________________________________________________________________
tf_op_layer_Max_1 (TensorFlowOp [(None, 7, 7)]       0           activation_2[0][0]               
__________________________________________________________________________________________________
reshape_4 (Reshape)             (None, 7, 7, 1)      0           tf_op_layer_Mean_1[0][0]         
__________________________________________________________________________________________________
reshape_5 (Reshape)             (None, 7, 7, 1)      0           tf_op_layer_Max_1[0][0]          
__________________________________________________________________________________________________
tf_op_layer_concat_1 (TensorFlo [(None, 7, 7, 2)]    0           reshape_4[0][0]                  
                                                                 reshape_5[0][0]                  
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 7, 7, 32)     576         tf_op_layer_concat_1[0][0]       
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 7, 7, 32)     128         conv2d_4[0][0]                   
__________________________________________________________________________________________________
tf_op_layer_Mul_3 (TensorFlowOp [(None, 7, 7, 32)]   0           batch_normalization_4[0][0]      
                                                                 activation_2[0][0]               
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_Mul_3[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mul_4 (TensorFlowOp [(None, 7, 7, 32)]   0           batch_normalization_3[0][0]      
                                                                 activation_3[0][0]               
__________________________________________________________________________________________________
tf_op_layer_AddV2_2 (TensorFlow [(None, 7, 7, 32)]   0           tf_op_layer_Mul_4[0][0]          
                                                                 batch_normalization_1[0][0]      
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_AddV2_2[0][0]        
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 7, 7, 32)     9248        activation_4[0][0]               
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 7, 7, 32)     128         conv2d_5[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 7, 7, 32)     9248        batch_normalization_5[0][0]      
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 7, 7, 32)     128         conv2d_6[0][0]                   
__________________________________________________________________________________________________
global_max_pooling2d_2 (GlobalM (None, 32)           0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
global_average_pooling2d_2 (Glo (None, 32)           0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
dense_4 (Dense)                 (None, 4)            132         global_average_pooling2d_2[0][0] 
                                                                 global_max_pooling2d_2[0][0]     
__________________________________________________________________________________________________
dense_5 (Dense)                 (None, 32)           160         dense_4[0][0]                    
                                                                 dense_4[1][0]                    
__________________________________________________________________________________________________
tf_op_layer_AddV2_3 (TensorFlow [(None, 32)]         0           dense_5[1][0]                    
                                                                 dense_5[0][0]                    
__________________________________________________________________________________________________
reshape_6 (Reshape)             (None, 1, 1, 32)     0           tf_op_layer_AddV2_3[0][0]        
__________________________________________________________________________________________________
tf_op_layer_Mul_5 (TensorFlowOp [(None, 7, 7, 32)]   0           reshape_6[0][0]                  
                                                                 batch_normalization_6[0][0]      
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_Mul_5[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mean_2 (TensorFlowO [(None, 7, 7)]       0           activation_5[0][0]               
__________________________________________________________________________________________________
tf_op_layer_Max_2 (TensorFlowOp [(None, 7, 7)]       0           activation_5[0][0]               
__________________________________________________________________________________________________
reshape_7 (Reshape)             (None, 7, 7, 1)      0           tf_op_layer_Mean_2[0][0]         
__________________________________________________________________________________________________
reshape_8 (Reshape)             (None, 7, 7, 1)      0           tf_op_layer_Max_2[0][0]          
__________________________________________________________________________________________________
tf_op_layer_concat_2 (TensorFlo [(None, 7, 7, 2)]    0           reshape_7[0][0]                  
                                                                 reshape_8[0][0]                  
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 7, 7, 32)     576         tf_op_layer_concat_2[0][0]       
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 7, 7, 32)     128         conv2d_7[0][0]                   
__________________________________________________________________________________________________
tf_op_layer_Mul_6 (TensorFlowOp [(None, 7, 7, 32)]   0           batch_normalization_7[0][0]      
                                                                 activation_5[0][0]               
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_Mul_6[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mul_7 (TensorFlowOp [(None, 7, 7, 32)]   0           batch_normalization_6[0][0]      
                                                                 activation_6[0][0]               
__________________________________________________________________________________________________
tf_op_layer_AddV2_4 (TensorFlow [(None, 7, 7, 32)]   0           tf_op_layer_Mul_7[0][0]          
                                                                 activation_4[0][0]               
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_AddV2_4[0][0]        
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 7, 7, 32)     9248        activation_7[0][0]               
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 7, 7, 32)     128         conv2d_8[0][0]                   
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 7, 7, 32)     9248        batch_normalization_8[0][0]      
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 7, 7, 32)     128         conv2d_9[0][0]                   
__________________________________________________________________________________________________
global_max_pooling2d_3 (GlobalM (None, 32)           0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
global_average_pooling2d_3 (Glo (None, 32)           0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
dense_6 (Dense)                 (None, 4)            132         global_average_pooling2d_3[0][0] 
                                                                 global_max_pooling2d_3[0][0]     
__________________________________________________________________________________________________
dense_7 (Dense)                 (None, 32)           160         dense_6[0][0]                    
                                                                 dense_6[1][0]                    
__________________________________________________________________________________________________
tf_op_layer_AddV2_5 (TensorFlow [(None, 32)]         0           dense_7[1][0]                    
                                                                 dense_7[0][0]                    
__________________________________________________________________________________________________
reshape_9 (Reshape)             (None, 1, 1, 32)     0           tf_op_layer_AddV2_5[0][0]        
__________________________________________________________________________________________________
tf_op_layer_Mul_8 (TensorFlowOp [(None, 7, 7, 32)]   0           reshape_9[0][0]                  
                                                                 batch_normalization_9[0][0]      
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_Mul_8[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mean_3 (TensorFlowO [(None, 7, 7)]       0           activation_8[0][0]               
__________________________________________________________________________________________________
tf_op_layer_Max_3 (TensorFlowOp [(None, 7, 7)]       0           activation_8[0][0]               
__________________________________________________________________________________________________
reshape_10 (Reshape)            (None, 7, 7, 1)      0           tf_op_layer_Mean_3[0][0]         
__________________________________________________________________________________________________
reshape_11 (Reshape)            (None, 7, 7, 1)      0           tf_op_layer_Max_3[0][0]          
__________________________________________________________________________________________________
tf_op_layer_concat_3 (TensorFlo [(None, 7, 7, 2)]    0           reshape_10[0][0]                 
                                                                 reshape_11[0][0]                 
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 7, 7, 32)     576         tf_op_layer_concat_3[0][0]       
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 7, 7, 32)     128         conv2d_10[0][0]                  
__________________________________________________________________________________________________
tf_op_layer_Mul_9 (TensorFlowOp [(None, 7, 7, 32)]   0           batch_normalization_10[0][0]     
                                                                 activation_8[0][0]               
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 7, 7, 32)     0           tf_op_layer_Mul_9[0][0]          
__________________________________________________________________________________________________
tf_op_layer_Mul_10 (TensorFlowO [(None, 7, 7, 32)]   0           batch_normalization_9[0][0]      
                                                                 activation_9[0][0]               
__________________________________________________________________________________________________
tf_op_layer_AddV2_6 (TensorFlow [(None, 7, 7, 32)]   0           tf_op_layer_Mul_10[0][0]         
                                                                 activation_7[0][0]               
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 7, 7, 32)     0           tf_op_layer_AddV2_6[0][0]        
__________________________________________________________________________________________________
global_average_pooling2d_4 (Glo (None, 32)           0           activation_10[0][0]              
__________________________________________________________________________________________________
dense_8 (Dense)                 (None, 16)           528         global_average_pooling2d_4[0][0] 
==================================================================================================
Total params: 132,157
Trainable params: 131,117
Non-trainable params: 1,040
__________________________________________________________________________________________________
adam=Adam(lr=0.003)
model.compile(loss='categorical_crossentropy',optimizer=adam,metrics=['accuracy'])
filepath='RSSAN.h5'
callback_earlystopping=EarlyStopping(monitor='loss',patience=80,verbose=1,mode='auto')
checkpoint=ModelCheckpoint(filepath,
                           monitor='loss',
                           verbose=1,
                           save_best_only=True,
                           mode='min',
                          )
callback_list=[callback_earlystopping,checkpoint]
history=model.fit(X_train,
                  Y_train,
                  epochs=300,
                  validation_split=0.3,
                  batch_size=16,
                  callbacks=callback_list
                 )
Epoch 1/300
135/135 [==============================] - ETA: 0s - loss: 0.0161 - accuracy: 0.9954
Epoch 00001: loss did not improve from 0.00681
135/135 [==============================] - 1s 10ms/step - loss: 0.0161 - accuracy: 0.9954 - val_loss: 10.5291 - val_accuracy: 0.1158
Epoch 2/300
134/135 [============================>.] - ETA: 0s - loss: 0.0185 - accuracy: 0.9939
Epoch 00002: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0187 - accuracy: 0.9940 - val_loss: 14.9365 - val_accuracy: 0.3842
Epoch 3/300
133/135 [============================>.] - ETA: 0s - loss: 0.0391 - accuracy: 0.9878
Epoch 00003: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0387 - accuracy: 0.9879 - val_loss: 31240.8535 - val_accuracy: 0.3831
Epoch 4/300
133/135 [============================>.] - ETA: 0s - loss: 0.0488 - accuracy: 0.9840
Epoch 00004: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0483 - accuracy: 0.9842 - val_loss: 8.9375 - val_accuracy: 0.4167
Epoch 5/300
134/135 [============================>.] - ETA: 0s - loss: 0.0293 - accuracy: 0.9935
Epoch 00005: loss did not improve from 0.00681
135/135 [==============================] - 1s 10ms/step - loss: 0.0292 - accuracy: 0.9935 - val_loss: 12.5848 - val_accuracy: 0.0963
Epoch 6/300
135/135 [==============================] - ETA: 0s - loss: 0.0166 - accuracy: 0.9958
Epoch 00006: loss did not improve from 0.00681
135/135 [==============================] - 1s 10ms/step - loss: 0.0166 - accuracy: 0.9958 - val_loss: 35360268615680.0000 - val_accuracy: 0.4004
Epoch 7/300
131/135 [============================>.] - ETA: 0s - loss: 0.0257 - accuracy: 0.9933
Epoch 00007: loss did not improve from 0.00681
135/135 [==============================] - 1s 10ms/step - loss: 0.0252 - accuracy: 0.9935 - val_loss: 1556.4509 - val_accuracy: 0.3344
Epoch 8/300
131/135 [============================>.] - ETA: 0s - loss: 0.0466 - accuracy: 0.9843
Epoch 00008: loss did not improve from 0.00681
135/135 [==============================] - 1s 10ms/step - loss: 0.0466 - accuracy: 0.9842 - val_loss: 6.0337 - val_accuracy: 0.4848
Epoch 9/300
133/135 [============================>.] - ETA: 0s - loss: 0.0251 - accuracy: 0.9915
Epoch 00009: loss did not improve from 0.00681
135/135 [==============================] - 1s 10ms/step - loss: 0.0249 - accuracy: 0.9917 - val_loss: 5.3472 - val_accuracy: 0.5552
Epoch 10/300
133/135 [============================>.] - ETA: 0s - loss: 0.0197 - accuracy: 0.9930
Epoch 00010: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0198 - accuracy: 0.9926 - val_loss: 7.0390 - val_accuracy: 0.6288
Epoch 11/300
135/135 [==============================] - ETA: 0s - loss: 0.0157 - accuracy: 0.9949
Epoch 00011: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0157 - accuracy: 0.9949 - val_loss: 1998694055862512304259072.0000 - val_accuracy: 0.1591
Epoch 12/300
132/135 [============================>.] - ETA: 0s - loss: 0.0163 - accuracy: 0.9943
Epoch 00012: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0160 - accuracy: 0.9944 - val_loss: 838174582367524092903424.0000 - val_accuracy: 0.1115
Epoch 13/300
133/135 [============================>.] - ETA: 0s - loss: 0.0143 - accuracy: 0.9967
Epoch 00013: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0144 - accuracy: 0.9968 - val_loss: 309692118046359257350144.0000 - val_accuracy: 0.4719
Epoch 14/300
135/135 [==============================] - ETA: 0s - loss: 0.0362 - accuracy: 0.9884
Epoch 00014: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0362 - accuracy: 0.9884 - val_loss: 2210576640.0000 - val_accuracy: 0.4351
Epoch 15/300
133/135 [============================>.] - ETA: 0s - loss: 0.0172 - accuracy: 0.9934
Epoch 00015: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0171 - accuracy: 0.9935 - val_loss: 37.2551 - val_accuracy: 0.5357
Epoch 16/300
134/135 [============================>.] - ETA: 0s - loss: 0.0201 - accuracy: 0.9935
Epoch 00016: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0203 - accuracy: 0.9935 - val_loss: 9.5309 - val_accuracy: 0.3344
Epoch 17/300
133/135 [============================>.] - ETA: 0s - loss: 0.0486 - accuracy: 0.9854
Epoch 00017: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0492 - accuracy: 0.9852 - val_loss: 17.8245 - val_accuracy: 0.0963
Epoch 18/300
135/135 [==============================] - ETA: 0s - loss: 0.0545 - accuracy: 0.9814
Epoch 00018: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0545 - accuracy: 0.9814 - val_loss: 9.3761 - val_accuracy: 0.3615
Epoch 19/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0267 - accuracy: 0.9893
Epoch 00019: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0268 - accuracy: 0.9889 - val_loss: 7.8320 - val_accuracy: 0.4221
Epoch 20/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0275 - accuracy: 0.9918
Epoch 00020: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0297 - accuracy: 0.9903 - val_loss: 1561.6342 - val_accuracy: 0.1050
Epoch 21/300
131/135 [============================>.] - ETA: 0s - loss: 0.0284 - accuracy: 0.9914
Epoch 00021: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0285 - accuracy: 0.9912 - val_loss: 17.7445 - val_accuracy: 0.3344
Epoch 22/300
133/135 [============================>.] - ETA: 0s - loss: 0.0416 - accuracy: 0.9873
Epoch 00022: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0414 - accuracy: 0.9875 - val_loss: 19.5770 - val_accuracy: 0.2067
Epoch 23/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0273 - accuracy: 0.9908
Epoch 00023: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0267 - accuracy: 0.9907 - val_loss: 8.1226 - val_accuracy: 0.3593
Epoch 24/300
131/135 [============================>.] - ETA: 0s - loss: 0.0085 - accuracy: 0.9971
Epoch 00024: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0084 - accuracy: 0.9972 - val_loss: 7.5742 - val_accuracy: 0.4221
Epoch 25/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0173 - accuracy: 0.9937
Epoch 00025: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0182 - accuracy: 0.9935 - val_loss: 13.0863 - val_accuracy: 0.3041
Epoch 26/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0327 - accuracy: 0.9909
Epoch 00026: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0317 - accuracy: 0.9912 - val_loss: 9.8838 - val_accuracy: 0.6483
Epoch 27/300
135/135 [==============================] - ETA: 0s - loss: 0.0088 - accuracy: 0.9972
Epoch 00027: loss did not improve from 0.00681
135/135 [==============================] - 1s 9ms/step - loss: 0.0088 - accuracy: 0.9972 - val_loss: 5.3829 - val_accuracy: 0.5519
Epoch 28/300
131/135 [============================>.] - ETA: 0s - loss: 0.0029 - accuracy: 0.9990
Epoch 00028: loss improved from 0.00681 to 0.00287, saving model to RSSAN.h5
135/135 [==============================] - 1s 10ms/step - loss: 0.0029 - accuracy: 0.9991 - val_loss: 6.5493 - val_accuracy: 0.4946
Epoch 29/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0052 - accuracy: 0.9981
Epoch 00029: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0050 - accuracy: 0.9981 - val_loss: 8.9408 - val_accuracy: 0.3615
Epoch 30/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0319 - accuracy: 0.9889
Epoch 00030: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0310 - accuracy: 0.9893 - val_loss: 12.8497 - val_accuracy: 0.3344
Epoch 31/300
131/135 [============================>.] - ETA: 0s - loss: 0.0311 - accuracy: 0.9885
Epoch 00031: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0304 - accuracy: 0.9889 - val_loss: 8.5154 - val_accuracy: 0.4978
Epoch 32/300
134/135 [============================>.] - ETA: 0s - loss: 0.0185 - accuracy: 0.9958
Epoch 00032: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0184 - accuracy: 0.9958 - val_loss: 6.0549 - val_accuracy: 0.4794
Epoch 33/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0153 - accuracy: 0.9937
Epoch 00033: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0149 - accuracy: 0.9940 - val_loss: 6.6574 - val_accuracy: 0.5541
Epoch 34/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0328 - accuracy: 0.9889
Epoch 00034: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0319 - accuracy: 0.9893 - val_loss: 10.1170 - val_accuracy: 0.3387
Epoch 35/300
132/135 [============================>.] - ETA: 0s - loss: 0.0455 - accuracy: 0.9848
Epoch 00035: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0452 - accuracy: 0.9847 - val_loss: 21.8273 - val_accuracy: 0.0574
Epoch 36/300
135/135 [==============================] - ETA: 0s - loss: 0.0242 - accuracy: 0.9898
Epoch 00036: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0242 - accuracy: 0.9898 - val_loss: 6.5499 - val_accuracy: 0.4545
Epoch 37/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0106 - accuracy: 0.9962
Epoch 00037: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0123 - accuracy: 0.9954 - val_loss: 15.3080 - val_accuracy: 0.2803
Epoch 38/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0207 - accuracy: 0.9937
Epoch 00038: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0201 - accuracy: 0.9940 - val_loss: 8.4923 - val_accuracy: 0.4535
Epoch 39/300
132/135 [============================>.] - ETA: 0s - loss: 0.0153 - accuracy: 0.9957
Epoch 00039: loss did not improve from 0.00287
135/135 [==============================] - 1s 10ms/step - loss: 0.0155 - accuracy: 0.9954 - val_loss: 14.6043 - val_accuracy: 0.2998
Epoch 40/300
133/135 [============================>.] - ETA: 0s - loss: 0.0290 - accuracy: 0.9911
Epoch 00040: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0286 - accuracy: 0.9912 - val_loss: 8.5050 - val_accuracy: 0.3474
Epoch 41/300
131/135 [============================>.] - ETA: 0s - loss: 0.0170 - accuracy: 0.9957
Epoch 00041: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0183 - accuracy: 0.9954 - val_loss: 11.5179 - val_accuracy: 0.3387
Epoch 42/300
135/135 [==============================] - ETA: 0s - loss: 0.0276 - accuracy: 0.9912
Epoch 00042: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0276 - accuracy: 0.9912 - val_loss: 27.6208 - val_accuracy: 0.0887
Epoch 43/300
135/135 [==============================] - ETA: 0s - loss: 0.0259 - accuracy: 0.9903
Epoch 00043: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0259 - accuracy: 0.9903 - val_loss: 5.3654 - val_accuracy: 0.4686
Epoch 44/300
135/135 [==============================] - ETA: 0s - loss: 0.0559 - accuracy: 0.9847
Epoch 00044: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0559 - accuracy: 0.9847 - val_loss: 26.1419 - val_accuracy: 0.4989
Epoch 45/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0414 - accuracy: 0.9835
Epoch 00045: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0442 - accuracy: 0.9833 - val_loss: 7.8355 - val_accuracy: 0.3290
Epoch 46/300
131/135 [============================>.] - ETA: 0s - loss: 0.0349 - accuracy: 0.9909
Epoch 00046: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0340 - accuracy: 0.9912 - val_loss: 5.8898 - val_accuracy: 0.5812
Epoch 47/300
132/135 [============================>.] - ETA: 0s - loss: 0.0132 - accuracy: 0.9957
Epoch 00047: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0147 - accuracy: 0.9954 - val_loss: 11.3592 - val_accuracy: 0.2652
Epoch 48/300
133/135 [============================>.] - ETA: 0s - loss: 0.0143 - accuracy: 0.9953
Epoch 00048: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0142 - accuracy: 0.9954 - val_loss: 7.6408 - val_accuracy: 0.3333
Epoch 49/300
134/135 [============================>.] - ETA: 0s - loss: 0.0176 - accuracy: 0.9930
Epoch 00049: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0175 - accuracy: 0.9930 - val_loss: 14.8760 - val_accuracy: 0.4848
Epoch 50/300
135/135 [==============================] - ETA: 0s - loss: 0.0284 - accuracy: 0.9903
Epoch 00050: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0284 - accuracy: 0.9903 - val_loss: 6.3561 - val_accuracy: 0.5574
Epoch 51/300
132/135 [============================>.] - ETA: 0s - loss: 0.0065 - accuracy: 0.9967
Epoch 00051: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0064 - accuracy: 0.9968 - val_loss: 5.0950 - val_accuracy: 0.7197
Epoch 52/300
135/135 [==============================] - ETA: 0s - loss: 0.0249 - accuracy: 0.9912
Epoch 00052: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0249 - accuracy: 0.9912 - val_loss: 5.3984 - val_accuracy: 0.5920
Epoch 53/300
132/135 [============================>.] - ETA: 0s - loss: 0.0230 - accuracy: 0.9929
Epoch 00053: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0226 - accuracy: 0.9930 - val_loss: 10.8704 - val_accuracy: 0.3084
Epoch 54/300
133/135 [============================>.] - ETA: 0s - loss: 0.0217 - accuracy: 0.9958
Epoch 00054: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0223 - accuracy: 0.9954 - val_loss: 14.2964 - val_accuracy: 0.3939
Epoch 55/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0186 - accuracy: 0.9942
Epoch 00055: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0179 - accuracy: 0.9944 - val_loss: 10.0088 - val_accuracy: 0.3972
Epoch 56/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0176 - accuracy: 0.9942
Epoch 00056: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0187 - accuracy: 0.9935 - val_loss: 5.7711 - val_accuracy: 0.5152
Epoch 57/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0280 - accuracy: 0.9909
Epoch 00057: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0272 - accuracy: 0.9912 - val_loss: 5.5399 - val_accuracy: 0.4805
Epoch 58/300
133/135 [============================>.] - ETA: 0s - loss: 0.0050 - accuracy: 0.9986
Epoch 00058: loss did not improve from 0.00287
135/135 [==============================] - 1s 9ms/step - loss: 0.0050 - accuracy: 0.9986 - val_loss: 4.7518 - val_accuracy: 0.5530
Epoch 59/300
133/135 [============================>.] - ETA: 0s - loss: 0.0019 - accuracy: 0.9995     ETA: 0s -
Epoch 00059: loss improved from 0.00287 to 0.00195, saving model to RSSAN.h5
135/135 [==============================] - 1s 10ms/step - loss: 0.0019 - accuracy: 0.9995 - val_loss: 7.6957 - val_accuracy: 0.3734
Epoch 60/300
135/135 [==============================] - ETA: 0s - loss: 0.0063 - accuracy: 0.9977
Epoch 00060: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0063 - accuracy: 0.9977 - val_loss: 13.2992 - val_accuracy: 0.3117
Epoch 61/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0062 - accuracy: 0.9971
Epoch 00061: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0060 - accuracy: 0.9972 - val_loss: 14.2350 - val_accuracy: 0.4838
Epoch 62/300
134/135 [============================>.] - ETA: 0s - loss: 0.0608 - accuracy: 0.9804
Epoch 00062: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0613 - accuracy: 0.9801 - val_loss: 66.0996 - val_accuracy: 0.4361
Epoch 63/300
134/135 [============================>.] - ETA: 0s - loss: 0.0320 - accuracy: 0.9897
Epoch 00063: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0319 - accuracy: 0.9898 - val_loss: 12.6089 - val_accuracy: 0.5400
Epoch 64/300
133/135 [============================>.] - ETA: 0s - loss: 0.0587 - accuracy: 0.9812
Epoch 00064: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0607 - accuracy: 0.9810 - val_loss: 3.9850 - val_accuracy: 0.6494
Epoch 65/300
131/135 [============================>.] - ETA: 0s - loss: 0.0219 - accuracy: 0.9924
Epoch 00065: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0290 - accuracy: 0.9907 - val_loss: 11.5488 - val_accuracy: 0.3052
Epoch 66/300
133/135 [============================>.] - ETA: 0s - loss: 0.0341 - accuracy: 0.9873
Epoch 00066: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0337 - accuracy: 0.9875 - val_loss: 25.1842 - val_accuracy: 0.4686
Epoch 67/300
131/135 [============================>.] - ETA: 0s - loss: 0.0144 - accuracy: 0.9962
Epoch 00067: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0140 - accuracy: 0.9963 - val_loss: 6.8119 - val_accuracy: 0.5271
Epoch 68/300
132/135 [============================>.] - ETA: 0s - loss: 0.0151 - accuracy: 0.9948
Epoch 00068: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0149 - accuracy: 0.9949 - val_loss: 13.3199 - val_accuracy: 0.4383
Epoch 69/300
135/135 [==============================] - ETA: 0s - loss: 0.0181 - accuracy: 0.9963
Epoch 00069: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0181 - accuracy: 0.9963 - val_loss: 5.1807 - val_accuracy: 0.5942
Epoch 70/300
132/135 [============================>.] - ETA: 0s - loss: 0.0308 - accuracy: 0.9920
Epoch 00070: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0321 - accuracy: 0.9912 - val_loss: 16.9292 - val_accuracy: 0.3918
Epoch 71/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0324 - accuracy: 0.9861
Epoch 00071: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0322 - accuracy: 0.9861 - val_loss: 9.5167 - val_accuracy: 0.2143
Epoch 72/300
131/135 [============================>.] - ETA: 0s - loss: 0.0243 - accuracy: 0.9919
Epoch 00072: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0236 - accuracy: 0.9921 - val_loss: 6.4489 - val_accuracy: 0.6320
Epoch 73/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0070 - accuracy: 0.9981
Epoch 00073: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0067 - accuracy: 0.9981 - val_loss: 5.6409 - val_accuracy: 0.6472
Epoch 74/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0075 - accuracy: 0.9981
Epoch 00074: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0075 - accuracy: 0.9981 - val_loss: 21.8621 - val_accuracy: 0.2933
Epoch 75/300
135/135 [==============================] - ETA: 0s - loss: 0.0107 - accuracy: 0.9963
Epoch 00075: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0107 - accuracy: 0.9963 - val_loss: 19.9151 - val_accuracy: 0.3799
Epoch 76/300
131/135 [============================>.] - ETA: 0s - loss: 0.0091 - accuracy: 0.9971
Epoch 00076: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0089 - accuracy: 0.9972 - val_loss: 16.0419 - val_accuracy: 0.5000
Epoch 77/300
134/135 [============================>.] - ETA: 0s - loss: 0.0075 - accuracy: 0.9977
Epoch 00077: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0074 - accuracy: 0.9977 - val_loss: 6.6468 - val_accuracy: 0.4751
Epoch 78/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0021 - accuracy: 0.9995
Epoch 00078: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0020 - accuracy: 0.9995 - val_loss: 10.0955 - val_accuracy: 0.3452
Epoch 79/300
135/135 [==============================] - ETA: 0s - loss: 0.0183 - accuracy: 0.9944
Epoch 00079: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0183 - accuracy: 0.9944 - val_loss: 17.0688 - val_accuracy: 0.3604
Epoch 80/300
135/135 [==============================] - ETA: 0s - loss: 0.0404 - accuracy: 0.9865
Epoch 00080: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0404 - accuracy: 0.9865 - val_loss: 11.1925 - val_accuracy: 0.5141
Epoch 81/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0227 - accuracy: 0.9942
Epoch 00081: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0220 - accuracy: 0.9944 - val_loss: 9.5887 - val_accuracy: 0.3409
Epoch 82/300
131/135 [============================>.] - ETA: 0s - loss: 0.0299 - accuracy: 0.9900
Epoch 00082: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0291 - accuracy: 0.9903 - val_loss: 6.5519 - val_accuracy: 0.7370
Epoch 83/300
132/135 [============================>.] - ETA: 0s - loss: 0.0310 - accuracy: 0.9896
Epoch 00083: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0306 - accuracy: 0.9898 - val_loss: 26.2844 - val_accuracy: 0.0357
Epoch 84/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0042 - accuracy: 0.9995
Epoch 00084: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0044 - accuracy: 0.9995 - val_loss: 7.4921 - val_accuracy: 0.4253
Epoch 85/300
134/135 [============================>.] - ETA: 0s - loss: 0.0193 - accuracy: 0.9944
Epoch 00085: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0192 - accuracy: 0.9944 - val_loss: 26.4408 - val_accuracy: 0.3377
Epoch 86/300
134/135 [============================>.] - ETA: 0s - loss: 0.0274 - accuracy: 0.9902
Epoch 00086: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0273 - accuracy: 0.9903 - val_loss: 8.1893 - val_accuracy: 0.4091
Epoch 87/300
135/135 [==============================] - ETA: 0s - loss: 0.0188 - accuracy: 0.9935
Epoch 00087: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0188 - accuracy: 0.9935 - val_loss: 10.5901 - val_accuracy: 0.4708
Epoch 88/300
131/135 [============================>.] - ETA: 0s - loss: 0.0034 - accuracy: 0.9990
Epoch 00088: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0034 - accuracy: 0.9991 - val_loss: 10.6637 - val_accuracy: 0.4177
Epoch 89/300
131/135 [============================>.] - ETA: 0s - loss: 0.0520 - accuracy: 0.9862
Epoch 00089: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0571 - accuracy: 0.9852 - val_loss: 23.1436 - val_accuracy: 0.1526
Epoch 90/300
134/135 [============================>.] - ETA: 0s - loss: 0.0566 - accuracy: 0.9832
Epoch 00090: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0563 - accuracy: 0.9833 - val_loss: 70.8975 - val_accuracy: 0.2013
Epoch 91/300
133/135 [============================>.] - ETA: 0s - loss: 0.0158 - accuracy: 0.9939
Epoch 00091: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0211 - accuracy: 0.9930 - val_loss: 28.2742 - val_accuracy: 0.0325
Epoch 92/300
131/135 [============================>.] - ETA: 0s - loss: 0.0148 - accuracy: 0.9948
Epoch 00092: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0157 - accuracy: 0.9944 - val_loss: 23.2904 - val_accuracy: 0.0411
Epoch 93/300
133/135 [============================>.] - ETA: 0s - loss: 0.0284 - accuracy: 0.9906
Epoch 00093: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0281 - accuracy: 0.9907 - val_loss: 11.1939 - val_accuracy: 0.4177
Epoch 94/300
133/135 [============================>.] - ETA: 0s - loss: 0.0161 - accuracy: 0.9939
Epoch 00094: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0159 - accuracy: 0.9940 - val_loss: 5.4395 - val_accuracy: 0.6483
Epoch 95/300
134/135 [============================>.] - ETA: 0s - loss: 0.0442 - accuracy: 0.9851
Epoch 00095: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0443 - accuracy: 0.9847 - val_loss: 85.0251 - val_accuracy: 0.5725
Epoch 96/300
134/135 [============================>.] - ETA: 0s - loss: 0.0166 - accuracy: 0.9963
Epoch 00096: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0165 - accuracy: 0.9963 - val_loss: 11.5599 - val_accuracy: 0.4416
Epoch 97/300
131/135 [============================>.] - ETA: 0s - loss: 0.0245 - accuracy: 0.9914
Epoch 00097: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0243 - accuracy: 0.9912 - val_loss: 33.7490 - val_accuracy: 0.0390
Epoch 98/300
131/135 [============================>.] - ETA: 0s - loss: 0.0297 - accuracy: 0.9895
Epoch 00098: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0289 - accuracy: 0.9898 - val_loss: 5.8028 - val_accuracy: 0.5022
Epoch 99/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0294 - accuracy: 0.9913
Epoch 00099: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0289 - accuracy: 0.9912 - val_loss: 8.9715 - val_accuracy: 0.4903
Epoch 100/300
132/135 [============================>.] - ETA: 0s - loss: 0.0084 - accuracy: 0.9972
Epoch 00100: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0083 - accuracy: 0.9972 - val_loss: 13.6409 - val_accuracy: 0.3983
Epoch 101/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0039 - accuracy: 0.9981
Epoch 00101: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0038 - accuracy: 0.9981 - val_loss: 8.6826 - val_accuracy: 0.3647
Epoch 102/300
132/135 [============================>.] - ETA: 0s - loss: 0.0113 - accuracy: 0.9957
Epoch 00102: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0112 - accuracy: 0.9958 - val_loss: 6.2320 - val_accuracy: 0.4275
Epoch 103/300
135/135 [==============================] - ETA: 0s - loss: 0.0034 - accuracy: 0.9991
Epoch 00103: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0034 - accuracy: 0.9991 - val_loss: 7.9505 - val_accuracy: 0.5130
Epoch 104/300
131/135 [============================>.] - ETA: 0s - loss: 0.0060 - accuracy: 0.9976
Epoch 00104: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0058 - accuracy: 0.9977 - val_loss: 10.2875 - val_accuracy: 0.4773
Epoch 105/300
132/135 [============================>.] - ETA: 0s - loss: 0.0033 - accuracy: 0.9991
Epoch 00105: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0034 - accuracy: 0.9991 - val_loss: 7.5893 - val_accuracy: 0.5747
Epoch 106/300
133/135 [============================>.] - ETA: 0s - loss: 0.0182 - accuracy: 0.9944
Epoch 00106: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0181 - accuracy: 0.9944 - val_loss: 17.3409 - val_accuracy: 0.2565
Epoch 107/300
135/135 [==============================] - ETA: 0s - loss: 0.0521 - accuracy: 0.9861
Epoch 00107: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0521 - accuracy: 0.9861 - val_loss: 60.2675 - val_accuracy: 0.2229
Epoch 108/300
134/135 [============================>.] - ETA: 0s - loss: 0.0173 - accuracy: 0.9935
Epoch 00108: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0183 - accuracy: 0.9926 - val_loss: 9.2909 - val_accuracy: 0.3528
Epoch 109/300
131/135 [============================>.] - ETA: 0s - loss: 0.0098 - accuracy: 0.9962
Epoch 00109: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0097 - accuracy: 0.9963 - val_loss: 5.0479 - val_accuracy: 0.5887
Epoch 110/300
133/135 [============================>.] - ETA: 0s - loss: 0.0061 - accuracy: 0.9972
Epoch 00110: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0063 - accuracy: 0.9972 - val_loss: 9.0598 - val_accuracy: 0.4329
Epoch 111/300
135/135 [==============================] - ETA: 0s - loss: 0.0038 - accuracy: 0.9991
Epoch 00111: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0038 - accuracy: 0.9991 - val_loss: 6.1405 - val_accuracy: 0.6494
Epoch 112/300
134/135 [============================>.] - ETA: 0s - loss: 0.0480 - accuracy: 0.9841
Epoch 00112: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0482 - accuracy: 0.9842 - val_loss: 24.8903 - val_accuracy: 0.0530
Epoch 113/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0451 - accuracy: 0.9846
Epoch 00113: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0439 - accuracy: 0.9852 - val_loss: 28.0227 - val_accuracy: 0.0779
Epoch 114/300
131/135 [============================>.] - ETA: 0s - loss: 0.0346 - accuracy: 0.9871
Epoch 00114: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0337 - accuracy: 0.9875 - val_loss: 9.0939 - val_accuracy: 0.4545
Epoch 115/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0145 - accuracy: 0.9966
Epoch 00115: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0139 - accuracy: 0.9968 - val_loss: 6.3841 - val_accuracy: 0.4232
Epoch 116/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0180 - accuracy: 0.9957
Epoch 00116: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0200 - accuracy: 0.9944 - val_loss: 7.5673 - val_accuracy: 0.3355
Epoch 117/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0256 - accuracy: 0.9933
Epoch 00117: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0247 - accuracy: 0.9935 - val_loss: 30.3369 - val_accuracy: 0.3225
Epoch 118/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0356 - accuracy: 0.9875
Epoch 00118: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0346 - accuracy: 0.9879 - val_loss: 23.0873 - val_accuracy: 0.2608
Epoch 119/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0220 - accuracy: 0.9918
Epoch 00119: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0214 - accuracy: 0.9921 - val_loss: 7.1689 - val_accuracy: 0.6569
Epoch 120/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0065 - accuracy: 0.9971
Epoch 00120: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0076 - accuracy: 0.9968 - val_loss: 15.8146 - val_accuracy: 0.5660
Epoch 121/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0199 - accuracy: 0.9937
Epoch 00121: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0203 - accuracy: 0.9935 - val_loss: 49.6036 - val_accuracy: 0.0790
Epoch 122/300
134/135 [============================>.] - ETA: 0s - loss: 0.0245 - accuracy: 0.9916
Epoch 00122: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0252 - accuracy: 0.9912 - val_loss: 28.2116 - val_accuracy: 0.5119
Epoch 123/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0108 - accuracy: 0.9952
Epoch 00123: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0105 - accuracy: 0.9954 - val_loss: 24.5964 - val_accuracy: 0.4513
Epoch 124/300
129/135 [===========================>..] - ETA: 0s - loss: 0.0125 - accuracy: 0.9956 ETA: 0s -
Epoch 00124: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0132 - accuracy: 0.9954 - val_loss: 15.0384 - val_accuracy: 0.3117
Epoch 125/300
135/135 [==============================] - ETA: 0s - loss: 0.0176 - accuracy: 0.9954
Epoch 00125: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0176 - accuracy: 0.9954 - val_loss: 11.7673 - val_accuracy: 0.6829
Epoch 126/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0081 - accuracy: 0.9971
Epoch 00126: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0078 - accuracy: 0.9972 - val_loss: 61.0289 - val_accuracy: 0.6591
Epoch 127/300
134/135 [============================>.] - ETA: 0s - loss: 0.0124 - accuracy: 0.9972
Epoch 00127: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0123 - accuracy: 0.9972 - val_loss: 22.5319 - val_accuracy: 0.3734
Epoch 128/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0060 - accuracy: 0.9986
Epoch 00128: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0066 - accuracy: 0.9981 - val_loss: 8.3238 - val_accuracy: 0.6591
Epoch 129/300
130/135 [===========================>..] - ETA: 0s - loss: 0.0056 - accuracy: 0.9986
Epoch 00129: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0054 - accuracy: 0.9986 - val_loss: 10.0794 - val_accuracy: 0.3788
Epoch 130/300
134/135 [============================>.] - ETA: 0s - loss: 0.0306 - accuracy: 0.9902
Epoch 00130: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0308 - accuracy: 0.9898 - val_loss: 36.9072 - val_accuracy: 0.1894
Epoch 131/300
131/135 [============================>.] - ETA: 0s - loss: 0.0575 - accuracy: 0.9833
Epoch 00131: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0561 - accuracy: 0.9838 - val_loss: 45.1773 - val_accuracy: 0.1028
Epoch 132/300
134/135 [============================>.] - ETA: 0s - loss: 0.0153 - accuracy: 0.9949
Epoch 00132: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0152 - accuracy: 0.9949 - val_loss: 18.8779 - val_accuracy: 0.2305
Epoch 133/300
132/135 [============================>.] - ETA: 0s - loss: 0.0199 - accuracy: 0.9934
Epoch 00133: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0197 - accuracy: 0.9935 - val_loss: 16.9598 - val_accuracy: 0.5097
Epoch 134/300
131/135 [============================>.] - ETA: 0s - loss: 0.0281 - accuracy: 0.9933
Epoch 00134: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0274 - accuracy: 0.9935 - val_loss: 14.9126 - val_accuracy: 0.2305
Epoch 135/300
133/135 [============================>.] - ETA: 0s - loss: 0.0115 - accuracy: 0.9958
Epoch 00135: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0113 - accuracy: 0.9958 - val_loss: 18.2324 - val_accuracy: 0.4621
Epoch 136/300
135/135 [==============================] - ETA: 0s - loss: 0.0177 - accuracy: 0.9949
Epoch 00136: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0177 - accuracy: 0.9949 - val_loss: 797.7786 - val_accuracy: 0.3810
Epoch 137/300
135/135 [==============================] - ETA: 0s - loss: 0.0371 - accuracy: 0.9875
Epoch 00137: loss did not improve from 0.00195
135/135 [==============================] - 1s 9ms/step - loss: 0.0371 - accuracy: 0.9875 - val_loss: 26.4572 - val_accuracy: 0.2684
Epoch 138/300
135/135 [==============================] - ETA: 0s - loss: 0.0362 - accuracy: 0.9898
Epoch 00138: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0362 - accuracy: 0.9898 - val_loss: 35.0094 - val_accuracy: 0.4470
Epoch 139/300
134/135 [============================>.] - ETA: 0s - loss: 0.0141 - accuracy: 0.9967
Epoch 00139: loss did not improve from 0.00195
135/135 [==============================] - 1s 10ms/step - loss: 0.0140 - accuracy: 0.9968 - val_loss: 304.8084 - val_accuracy: 0.4794
Epoch 00139: early stopping
plt.figure(figsize=(20,50))
plt.figure(figsize=(10,10))
plt.subplot(2,2,1)
plt.plot(history.history['loss'])
plt.subplot(2,2,2)
plt.plot(history.history['accuracy'])
plt.show()

在这里插入图片描述

不知道网络结构是不是还有问题,在测试集上的结果很不稳定,较好情况可以到达98%,差的话60%,70%都出现过,可能网络中还存在一些问题,欢迎大家批评指正


本文链接: http://www.dtmao.cc/news_show_400355.shtml

附件下载

相关教程

    暂无相关的数据...

共有条评论 网友评论

验证码: 看不清楚?