-
글쓴이글
-
2022년 3월 31일 19:31 #30777
김동범참가자안녕하세요!
다름이 아니라 CNN으로 predict를 했는데, output이 전부 똑같은 값으로 나옵니다...
fitting 이나 evaluate 시에 콘솔에 뜨는 accuracy나 loss는 잘 나오는것 같은데요ㅠㅠㅠ
구글링을 해서 나온 방법을 전부 해보았지만 진전이 없네요.. 어떻게 해야 할지 감이 안잡힙니다...
혹시 몰라 소스 코드도 같이 보낼께요.
우선 파일구조는 다음과 같습니다.
**train용 데이터셋
dataset/train/class1
dataset/train/class2
dataset/val/class1
dataset/val/class2
**predict용 데이터셋
predict_dataset/class1
predict_dataset/class2
-------------------------------------------------------------
------------------------학습 소스--------------------------
-------------------------------------------------------------
############################################################
######################## Preference ########################
######################## Prefix Setting ####################
############################################################
fpath_workspace = './PWD_model/'
fpath_dataset = 'dataset/preprocessing/split/'
fpath_models = 'models/'model_name = 'pwd-cnn-vgg16'
model_version = '_v1.14'##################
## Load Dataset ##
##################
print("============")
print("Load Dataset")
print("============")
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20, ##회전
zoom_range=0.15, ##확대
width_shift_range=0.2, ##이동
height_shift_range=0.2,
shear_range=0.15, ##굴절
horizontal_flip=True, ##가로반전
fill_mode="nearest"
)
train_dataset = train_datagen.flow_from_directory(
fpath_workspace+fpath_dataset+'train/',
target_size=(64, 64),
batch_size=32,
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_dataset = validation_datagen.flow_from_directory(
fpath_workspace+fpath_dataset+'val/',
target_size=(64, 64),
batch_size=32,
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)################################
## Image Classification Model ##
## CNN - VGG16 #################
################################
print("==========================")
print("Image Classification Model")
print("==========================")
model = tf.keras.Sequential([## Convolution layer(VGG-16)
tf.keras.layers.Conv2D(64, (3,3), padding="same", activation='relu', strides=(1,1) , input_shape=(64, 64, 3)),
tf.keras.layers.Conv2D(64, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(128, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(128, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(256, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(256, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(256, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),## Flatten
tf.keras.layers.Flatten(),## Fully-Connected layer
tf.keras.layers.Dense(2*2*512, activation='relu'),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),## Output
tf.keras.layers.Dense(1, activation='sigmoid'),])
## Model Figure Save
plot_model(model,
to_file=
fpath_workspace
+fpath_models
+model_name
+model_version
+'.png',
show_shapes=True
)###################
## Model complie ##
###################
## Complie option
opt = tf.keras.optimizers.Adam(learning_rate=0.00002)
print("=============")
print("model complie")
print("=============")
model.compile(
loss="binary_crossentropy",
optimizer=opt,
metrics=['accuracy']
)##################
## Model Option ##
##################
tensorboard = TensorBoard(log_dir=
fpath_workspace
+fpath_models
+model_name
+model_version
)
es = EarlyStopping(monitor='val_accuracy', patience=30, mode='max')def scheduler(epoch):
if epoch < 10:
return 0.00002
else:
return 0.00002 * tf.math.exp(0.1 * (10 - epoch))lr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler)
###################
## Model fitting ##
###################
print("=============")
print("Model Fitting")
print("=============")
model.fit(
train_dataset,
validation_data=validation_dataset,
callbacks=[tensorboard, lr_schedule, es],
epochs=250
)################
## Model Save ##
################
model.save(
fpath_workspace
+fpath_models
+model_name
+model_version
+'.h5'
)
-------------------------------------------------------------------------------------예측 소스--------------------------
-------------------------------------------------------------
############################################################
######################## Preference ########################
######################## Prefix Setting ####################
############################################################
fpath_workspace = './PWD_model/'
fpath_dataset = 'dataset/predict_dataset/tiff2jpg/'
fpath_models = 'models/'model_name = 'pwd-cnn-vgg16'
model_version = '_v1.14'################
## Load Model ##
################
print("==============")
print("Load Model....")
print("==============")
model = tf.keras.models.load_model(fpath_workspace+fpath_models+model_name+model_version+'.h5')##################
## Load Dataset ##
##################
print("============")
print("Load Dataset")
print("============")fpath_dataset2 = 'dataset/preprocessing/split/'
validation_datagen = ImageDataGenerator(
rescale=1./255
)val_ds = validation_datagen.flow_from_directory(
fpath_workspace+fpath_dataset2+'val/',
target_size=(64, 64),
batch_size=32,
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)
##################
## Predict ##
##################
print("============")
print("Predict")
print("============")pred = model.predict(predict_dataset)
print(pred)
print(" pred ndim : {}".format(pred.ndim))
print(" pred shape : {}".format(pred.shape))---------------------------------------------------------------------------
---------------------------------------------------------------------------
이상입니다..
여기까지 해서
pred, pred.ndim, pred.shape 를 print해보면
[[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
...
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]
[0.3976089]]
pred ndim : 2
pred shape : (283, 1)위와 같이 나오구요,
fitting 할 때 정확도는
val_loss: 0.6218 / val_accuracy: 0.94 입니다....
model 이 잘못되었나 싶어 바꿔보기도 하고
샘플수가 부족했나싶어서 추가해보기도 했는데
도통 감이 안잡히네요.....
살려주세요,,,,
2022년 3월 31일 22:55 #30795
codingapple키 마스터predict_dataset 변수같은거 정의하는 부분이 없는거같은데 변수 어떻게 만들고있는지 확인해봐야할듯요
2022년 3월 31일 23:25 #30800
김동범참가자아! 복붙하다가 오타가 났나봐요ㅠㅠ
마지막 부분에 model.predict에 넣은 predict_dataset은 다음과 같습니다 !
predict_datagen = ImageDataGenerator(
rescale=1./255
)predict_dataset = predict_datagen.flow_from_directory(
'파일경로',
target_size=(64, 64),
batch_size=32,
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)2022년 4월 1일 10:05 #30821
codingapple키 마스터사진데이터를 늘려보거나
batch size를 32말고 더 줄여보거나
early stopping 하지 말고 epoch 수를 늘려보거나
dropout 레이어에서 0.5는 너무 많으니 좀 줄여봅시다
2022년 4월 1일 20:25 #30869
김동범참가자안녕하세요..
말씀하신대로 시도해본 것들 입니다.
--------------------------
learning_rate 0.00002 에서 0.01 로 줄이고 (이건 아래 항목들 해보고 추가로 해본 것 입니다.)
early stopping 없애고
epoch 250에서 1000으로 늘림 (이것도 계속 바꿔봤습니다. 그런데 epoch가 일정 부분(epoch 100번때부터) 에서 부터 accuracy랑 loss 변화가 없는데 의미가 있나 모르겠습니다 ㅠㅠ)
droupout은 0.5씩 2개 layer에 있던 것을 0.2와 0.1로 여러번 줄여봤습니다.
train/val batch사이즈 36 에서 8로 줄이고,
predict batch사이즈로 36 에서 8로 줄여봤습니다 !아 추가로 training dataset도 추가로 어떻게 구해서 10배정도 늘려서 해봤습니다,,,,
-----------------------------
위의 항목들을 각각 오늘 하루종일 해보았는데,,
결과가 똑같습니다...
도대체 어디가 문제일까요,,,
추가로 model.predict(데이터셋) 에 있어서
데이터셋에 대한 문법(배열 혹은 기초 tensorflow dimension) 잘못되었나? 싶어서
단일 이미지 tensor로 각각 몇 개 해보았는데도 output이 동일하게 나오더라구요,,,,
말씀해주신 것 이외에 아래 댓글에서 나온 것들 모두 해봤는데도 똑같은 결과가 나와서 너무 어지럽네요ㅠㅠㅠ
https://github.com/keras-team/keras/issues/6447
혹시 제가 tensorflow 혹은 python에서 문법적으로 틀린 것이 있을까 걱정이 되어서 아래의 최종적으로 돌린 원본 소스를 첨부합니다...
파일 트리는 저번과 똑같습니다.
----------------------------------------------------------
----------------------훈련 소스------------------------
----------------------------------------------------------
##############################################################
## Image Classification for Pine Wilt Disease(PWD) Training ##
## By Dongbeom Kim ###########################################
##################################################################################
## Import Library ##
####################
print("==============")
print("Import Library")
print("==============")
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.utils import plot_model
import os
from tensorflow.keras.callbacks import EarlyStopping
from keras.backend import tensorflow_backend as K
from keras.preprocessing.image import ImageDataGenerator#################
## GPU Setting ##
#################
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
session = tf.compat.v1.Session(config=config)
session.close()############################################################
######################## Preference ########################
######################## Prefix Setting ####################
############################################################
fpath_workspace = './PWD_model/'
fpath_dataset = 'dataset/preprocessing/split/' ##real train/val/test dataset
# fpath_dataset = 'dataset/predict_dataset/split_1_15/' #real predict dataset
fpath_models = 'models/'model_name = 'pwd-cnn-vgg16'
model_version = '_v1.15'##################
## Load Dataset ##
##################
print("============")
print("Load Dataset")
print("============")
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20, ##회전
zoom_range=0.15, ##확대
width_shift_range=0.2, ##이동
height_shift_range=0.2,
shear_range=0.15, ##굴절
horizontal_flip=True, ##가로반전
fill_mode="nearest"
)
train_dataset = train_datagen.flow_from_directory(
fpath_workspace+fpath_dataset+'train/',
target_size=(64, 64),
batch_size=8, #32, ##한번에 넣는 사진 수
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)
print("===================")
print(train_dataset)validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_dataset = validation_datagen.flow_from_directory(
fpath_workspace+fpath_dataset+'val/',
target_size=(64, 64),
batch_size=8, #32,
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)
print("===================")
print(train_dataset)
print(validation_dataset)################################
## Image Classification Model ##
## CNN - VGG16 #################
################################
print("==========================")
print("Image Classification Model")
print("==========================")
model = tf.keras.Sequential([## Convolution layer(VGG-16)
tf.keras.layers.Conv2D(64, (3,3), padding="same", activation='relu', strides=(1,1) , input_shape=(64, 64, 3)),
tf.keras.layers.Conv2D(64, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(128, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(128, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(256, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(256, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(256, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.Conv2D(512, (3,3), padding="same", activation='relu', strides=(1,1)),
tf.keras.layers.MaxPool2D((2,2), strides=(2,2), padding="same"),## Flatten
tf.keras.layers.Flatten(),## Fully-Connected layer
tf.keras.layers.Dense(2*2*512, activation='relu'),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2), #(0.5)
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2), #(0.5)
## Output
tf.keras.layers.Dense(1, activation='sigmoid'),])
## Model Figure Save
plot_model(model,
to_file=
fpath_workspace
+fpath_models
+model_name
+model_version
+'.png',
show_shapes=True
)###################
## Model complie ##
###################
## Complie option
# opt = tf.keras.optimizers.Adam(learning_rate=0.00002)
opt = tf.keras.optimizers.Adam(learning_rate=0.01)
print("=============")
print("model complie")
print("=============")
model.compile(
loss="binary_crossentropy",
optimizer=opt,
metrics=['accuracy']
)##################
## Model Option ##
##################
tensorboard = TensorBoard(log_dir=
fpath_workspace
+fpath_models
+model_name
+model_version
)def scheduler(epoch):
if epoch < 10:
return 0.00002
else:
return 0.00002 * tf.math.exp(0.1 * (10 - epoch))lr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler)
###################
## Model fitting ##
###################
print("=============")
print("Model Fitting")
print("=============")
model.fit(
train_dataset,
validation_data=validation_dataset,
callbacks=[tensorboard, lr_schedule],
epochs=1000 #250
)################
## Model Save ##
################
model.save(
fpath_workspace
+fpath_models
+model_name
+model_version
+'.h5'
)-------------------------------------------------------------
-------------------------예측 소스--------------------------
---------------------------------------------------------------
#############################################################
## Image Classification for Pine Wilt Disease(PWD) Predict ##
## By Dongbeom Kim ##########################################
#################################################################################
## Import Library ##
####################
print("==============")
print("Import Library")
print("==============")
from cgi import test
from unittest import result
from matplotlib import testing
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.utils import plot_model
import os
from tensorflow.keras.callbacks import EarlyStopping
from keras.backend import tensorflow_backend as K
import matplotlib.pyplot as plt
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator#################
## GPU Setting ##
#################
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
session = tf.compat.v1.Session(config=config)
session.close()############################################################
######################## Preference ########################
######################## Prefix Setting ####################
############################################################
fpath_workspace = './PWD_model/'
fpath_dataset = 'dataset/predict_dataset/tiff2jpg/'
# fpath_dataset = 'dataset/preprocessing/not_split/' ##real train/val/test dataset
# fpath_dataset_name = '0/nowpwd_0_1_1045_2085_0.tiff.jpg' #=> [[0.15129317]]
# fpath_dataset_name = '1/pwd_1_72_1177_2458_1.tiff.jpg' #=> [[0.15129317]]
fpath_models = 'models/'model_name = 'pwd-cnn-vgg16'
model_version = '_v1.15'################
## Load Model ##
################
print("==============")
print("Load Model....")
print("==============")
model = tf.keras.models.load_model(
fpath_workspace
+fpath_models
+model_name
+model_version
+'.h5'
)##################
## Load Dataset ##
##################
print("============")
print("Load Dataset")
print("============")#### Single Img Test...source.....start
# # testing_dataset = tf.keras.preprocessing.image_dataset_from_directory(
# # fpath_workspace+fpath_dataset,
# # image_size=(64,64),
# # batch_size=32,
# # # validation_split=0.2,
# # # seed=1234
# # )
# img = tf.keras.preprocessing.image.load_img(
# fpath_workspace + fpath_dataset + fpath_dataset_name
# , target_size=(64,64)
# )
# img_tensor = tf.keras.preprocessing.image.img_to_array(img)
# img_tensor = tf.expand_dims(img_tensor, axis=0)
# img_tensor = img_tensor / 255.0
# print(img_tensor)
#### Single Img Test...source.....end## Predict Dataset...
predict_datagen = ImageDataGenerator(
rescale=1./255
)predict_dataset = predict_datagen.flow_from_directory(
fpath_workspace+fpath_dataset,
# fpath_workspace+fpath_dataset2+'val/',
target_size=(64, 64),
batch_size=8, #32,
class_mode='binary',
seed=123,
color_mode='rgb',
shuffle=False)
print("==============")
print(predict_dataset)
print("==============")
##############
## Predict ###
##############
print("=======")
print("Predict")
print("=======")## Single Img Test....
# pred = model.predict(img_tensor)
# print("===================")## Predict Dataset...
predict_result = model.predict(predict_dataset)
print("===================")
print(predict_result)
print(" result ndim : {}".format(predict_result.ndim))
print(" result shape : {}".format(predict_result.shape))
print("===================")predict_result_df = pd.DataFrame(predict_result)
print("===================")
print(predict_result_df)
predict_result_df.to_csv(
fpath_workspace+"test_cnn"+model_name+model_version+".xlsx"
, sep=','
, na_rep='NaN'
)--------------------------------------------------------------------
--------------------------------------------------------------------
--------------------------------------------------------------------
이상입니다......
2022년 4월 2일 10:44 #30906
codingapple키 마스터- 데이터를 2개 카테고리로 분류하는 문제가 맞는지
- 레이어를 좀 간단하게 구성하거나
- 모델저장이랑 불러오기가 잘되는게 맞는지 확인합시다 모델 트레이닝이 끝나고 저장하지말고 바로 model.predict 해보면 될듯요
2022년 4월 4일 11:20 #31065
김동범참가자안녕하세요.. 항상 감사합니다..
결론부터 말씀드리자면 결과는 똑같았습니다 ㅠㅠㅠ
1. 데이터를 2개 카테고리로 분류하는 문제가 맞는지
- 네. 맞습니다. 기존의 논문을 보면서 재생산한것이고, 제가 생각했을 때도, 맞습니다,,
사진으로 개와 고양이를 분류하는 문제랑 똑같은데, 혹시 그렇다고 했을 때 소스나 dataset의 분류가 잘못된것일까요 ?
소스에서는 마지막 dense 레이어에 output이 1이고 activation이 sigmoid가 맞지 않나요,,
로스함수도 binary_crossentropy이구요,,,
아 최초에 데이터 불러올때도 flow_from_directory의 calass_mode 옵션이 binary이구요,,
2. 레이어를 좀 간단하게 구성하거나
- 이것도 반영하여 흔히 널린 cnn기본구조 예시로 해보았습니다만 똑같았습니다...
3. 모델저장이랑 불러오기가 잘되는게 맞는지 확인합시다 모델 트레이닝이 끝나고 저장하지말고 바로 model.predict 해보면 될듯요
- model.save를 하지않고 fitting한 모델을 바로 밑에서 predict를 해보았습니다.
그런데도 결과가 똑같네요,,,
모델을 저장하고 불러오는 쪽에서 잘못된 느낌이라 주말간 계속 해보았는데도 똑같아서 답답하네요 ㅠㅠ
그리고 모델 훈련시에 잘못된 것이라면 애초에 fitting할때와 evaluate할 때, accuracy가 터미널 콘솔창에 뜨는데 이것도 이해가 가지 않네요,,
혹시나 conda 세팅에서 tensorflow 세팅을 처음부터 잘못했나싶어서 완전히 다시 세팅을 시도해보는 중이긴합니다만 ㅠㅠㅠ
감이 안잡히네요,,,
2023년 2월 13일 16:33 #68187
조대근참가자저는 DNN 을 코딩하던 중인데.. 저도 같은 문제가 발생합니다ㅠㅜ 혹시 category 2개로 한다는 의미가 뭔지 알 수 있을까요 ? ㅠㅠㅜ 제 코드는 이렇습니다..ㅜㅠ
class Classifiers(): def __init__(self, X, Y): self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(X,Y, test_size=0.2, random_state=0) def do_DNN(self):
# datafram 은 .index[] 형태로 호출 dataset = tf.data.Dataset.from_tensor_slices((self.x_train.values,self.y_train.values))
model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(shape=(168,))) # Input tensor model.add(tf.keras.layers.Dense(units=128, activation='relu')) # hidden layer 1 model.add(tf.keras.layers.Dense(units=1024, activation='relu')) #hidden layer 2 model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Dense(units=1024, activation='relu')) #hidden layer 3 model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Dense(units=128, activation='relu')) #hidden layer 4 model.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) #output layer
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
train_dataset=dataset.shuffle(len(self.x_train)).batch(100)
model.fit(train_dataset,epochs=50)
test_loss, test_accuracy = model.evaluate(self.x_test,self.y_test) print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
print("\n\n predict!!!\n") y_pred=model.predict(self.x_test) #y_pred = y_pred.flatten() # 차원 펴주기 #y_pred = np.where(y_pred > 0.5, 1 , 0) #0.5보다크면 1, 작으면 0 print("\n DNN Models :") #print("accuracy_score : ",accuracy_score(self.y_test, y_pred)) for i in range(6): print("test num %d real y : %d"%(i, self.y_test.iloc[i])) print("test num %d pred y : %f"%(i, y_pred[i])) print("") 결과 :
test num 0 real y : 1 test num 0 pred y : 0.950100
test num 1 real y : 1 test num 1 pred y : 0.950100
test num 2 real y : 1 test num 2 pred y : 0.950100
test num 3 real y : 1 test num 3 pred y : 0.950100
test num 4 real y : 1 test num 4 pred y : 0.950100
test num 5 real y : 1 test num 5 pred y : 0.950100
test num 6 real y : 0 test num 6 pred y : 0.950100
-
글쓴이글
- 답변은 로그인 후 가능합니다.