Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
Dear 365 Team,
When I run the code it display the above error, kindly, assist
import io
import itertools
import numpy as np
import sklearn.metrics
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import matplotlib.pyplot as plt
data_train = np.load("Primary categories - Train.npz")
data_val = np.load("Primary categories - Validation.npz")
data_test = np.load("Primary categories - Test.npz")
images_train = data_train['images']
labels_train = data_train['labels']
images_val = data_val['images']
labels_val = data_val['labels']
images_test = data_test['images']
labels_test = data_test['labels']
images_train = images_train/255.0
images_val = images_val/255.0
images_test = images_test/255.0
EPOCHS = 15
BATCH_SIZE = 64
HP_FILTER_SIZE = hp.HParam('filter_size', hp.Discrete([3,5,7]))
HP_FILTER_NUM = hp.HParam('filter_number', hp.Discrete([32,64,96,128]))
METRIC_ACCURACY = 'accuracy'
with tf.summary.create_file_writer('Logs/hparam_tuning').as_default():
hp.hparams_config(
hparams=[HP_FILTER_SIZE, HP_FILTER_NUM],
metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
)
def train_test_model(hparams, session_num):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(hparams[HP_FILTER_NUM], hparams[HP_FILTER_SIZE], activation='relu', input_shape=(120,90,3)),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(hparams[HP_FILTER_NUM], 3, activation = 'relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(3)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer ='adam', loss=loss_fn, metrics=['accuracy'])
log_dir = "Logs\\Model 1\\fit\" + "run-{}".format(session_num)
def plot_confusion_matrix(cm, class_names):
figure = plt.figure(figsize=(12,12))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
#Normalize the confusion matrix
cm=np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
#Use white text if squares are dark; otherwise black
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = 'white' if cm[i,j] > threshold else "black"
plt.text(j, i, cm[i,j], horizontalalignment='center', color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figre' to a PNG image and returns it
The supplied figure is closed and inaccessible after this call"""
#Save the plot to a PNG in memory
buf = io.BytesIO()
plt.savefig(buf, format='png')
#Closing figure prevents it from being displayed directly inside the notebook
plt.close(figure)
buf.seek(0)
#Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
#Add the batch dimension
image= tf.expand_dims(image,0)
return image
#Define a file writer variable for logging purposes
log_dir = 'Logs\\fit\'+ "run-1"
file_writer_cm = tf.summary.create_file_writer(log_dir + '/cm')
def log_confusion_matrix(epoch, logs):
#Use the model to predcit the values from validation dataset
test_pred_raw = model.predict(images_val)
test_pred = np.argmax(test_pred_raw, axis=1)
#Calculate the confusion matrix
cm = sklearn.metrics.confusion_matrix(labels_val, test_pred)
#Log the confusion matrix as an image summary
figure = plot_confusion_matrix(cm, class_names=['Glasses/Sunglasses', 'Trousers/Jeans', 'Shoes'])
cm_image = plot_to_image(figure)
#Log the confusion matrix as an image summary
with file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
cm_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=0)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor = 'val_loss',
mode='auto',
min_delta = 0,
patience =2,
verbose= 0,
restore_best_weights = True
)
model.fit(
images_train,
labels_train,
epochs=EPOCHS,
batch_size = BATCH_SIZE,
callbacks = [tensorboard_callback, cm_callback, early_stopping],
validation_data = (images_val, labels,val),
verbose = 2
)
_, accuracy = model.evaluate(images_val, labels_val)
model. save("saved_models\Model 1\Run-{}".format(session_num))
return accurcay
def run(log_dir, hparams, session_num):
with tf.summary.create_file_writer(log_dir).as_default():
hp.hparams(hparams) #Record the values used in this trial
accuracy = train_test_model(hparams, session_num)
tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)
session_num = 1
for filter_size in HP_FILTER_SIZE.domain.values:
for filter_num in HP_FILTER_NUM.domain.values:
hparams={
HP_FILTER_SIZE: filter_size,
HP_FILTER_NUM: filter_num
}
run_name = 'run-%d' % session_num
print('---Staring trial: %s' % run_name)
print({h.name: hparams[h] for h in hparams})
run('Logs/Model 1/hparam_tuning/' + run_name, hparams, session_num)
session_num += 1