我正在尝试使用LFW数据集运行一个深度学习算法。当我试图运行代码时,我将收到下面列出的路径名的错误消息:
ValueError: Could not find a backend to open `C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/LFW-DeepF-ADT/LFW-DeepF-ADT-Training\Abdullah_Gul\desktop.ini`` with iomode `ri`.我检查了图像,但没有发现有什么问题。要使代码成功运行,还需要检查或添加什么?它们是.jpg图像,图像来自LFW-深漏斗图像.
代码张贴如下:
# Import the packages
from matplotlib import pyplot
import matplotlib.pyplot as plt
from pipeline.nn.conv import mini_lenet
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, \
f1_score
from pipeline.callbacks import train
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from keras.optimizers import SGD
from keras.utils.np_utils import to_categorical
from image_dataset_loader import load
import numpy as np
import argparse
from sklearn.model_selection import train_test_split
import os
import tensorflow as tf
# Construct the argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to output model")
ap.add_argument("-o", "--output", required=True, help="path to output directory (logs, plots, etc.)")
args = vars(ap.parse_args())
# Loading FERET Dataset
print("[INFO] loading LFW Dataset...")
# Josh Plan 1 Pathway: LeNet (LFW/CARE)
# mainPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Images_LeNet-LFW-A/"
# trainPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Images_LeNet-LFW-A/Josh_Training_ADT_8I_100S_227"
# testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/Plan1-DL-LFW-CARE/Images_LeNet-LFW-A/Josh_Testing_ADT_100S_227"
mainPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/LFW-DeepF-ADT"
trainPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/LFW-DeepF-ADT/LFW-DeepF-ADT-Training"
testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_LFW-CARE/LFW-DeepF-ADT/LFW-DeepF-ADT-Testing"
(xtrain, ytrain), (xtest, ytest) = load(mainPath, [trainPath, testPath])
# Josh Plan 1 Pathway: LeNet (FERET)
#mainPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_LeNet"
#trainPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_LeNet/Josh_Training_CHD_8I_24S_227"
#testPath = "C:/Users/JoshG/PycharmProjects/Local-Binary-Patterns/Images_FERET/Plan1-DL/Images_LeNet/Josh_Testing_CHD_24S_227"
#(xtrain, ytrain), (xtest, ytest) = load(mainPath, [trainPath, testPath])
# Plot first few images
for i in range(9):
# define subplot
pyplot.subplot(330 + 1 + i)
pyplot.imshow(xtest[i])
# Show the figure
pyplot.show()
LeNet = Sequential()
LeNet.add(Conv2D(6, (5, 5), strides=(1, 1), padding='valid', activation='relu',
input_shape=(227, 227, 3)))
LeNet.add(MaxPool2D((2, 2), (2, 2)))
LeNet.add(Conv2D(16, (5, 5), (1, 1), padding='valid', activation='relu'))
LeNet.add(MaxPool2D((2, 2), (2, 2)))
LeNet.add(Conv2D(120, (5, 5), (1, 1), padding='valid', activation='relu'))
LeNet.add(Flatten())
LeNet.add(Dense(84, activation='relu'))
LeNet.add(Dense(2, activation='softmax'))
# model.compile(optimizer='adam', loss=keras.losses.binary_crossentropy, metrics=['accuracy'])
LeNet.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy',
metrics=["accuracy"])
LeNet.summary()
# Construct the argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to output model")
ap.add_argument("-o", "--output", required=True, help="path to output directory (logs, plots, etc.)")
args = vars(ap.parse_args())
# Dimension of the FERET dataset
print('----- Dimension of LFW Dataset -----')
print("The shape of the training image:", xtrain.shape)
print("The shape of testing image: ", xtest.shape)
print("Shape of a single image: ", xtest[0].shape)
print('-----------------------------------------')
# Number of epoches & learning rate I use
num_EPOCHS = 75
learn_rate = 5e-3
def decay_poly(epoch):
epochMax = num_EPOCHS
learn_rateBase = learn_rate
power = 1.0
# compute the new learning rate based on polynomial decay
alpha = learn_rateBase * (1 - (epoch / float(epochMax))) ** power
# return the new learning rate
return alpha
# Print out first test to make sure we can see images
xtrain = xtrain.astype("float32")
xtest = xtest.astype("float32")
# apply mean subtraction to the data
mean = np.mean(xtrain, axis=0)
xtrain -= mean
xtest -= mean
# Train-validation-test split
xtrain, x_val, ytrain, y_val = train_test_split(xtrain, ytrain, test_size=.12)
print('')
# Dimension of the FERET dataset
# print('Dimension of FERET Dataset')
print('Dimension of LFW Dataset with validation')
print((xtrain.shape, ytrain.shape))
print((x_val.shape, y_val.shape))
print((xtest.shape, ytest.shape))
# Since we have 10 classes we should expect the shape[1] of ytrain,y_val and ytest to change from 1 to 10
ytrain = to_categorical(ytrain)
y_val = to_categorical(y_val)
ytest = to_categorical(ytest)
print('')
print('Verifying the dimension after one hot encoding')
print((xtrain.shape, ytrain.shape))
print((x_val.shape, y_val.shape))
print((xtest.shape, ytest.shape))
# Image Data Augmentation
train_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
val_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
test_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1)
# Fitting the augmentation defined above to the data
# train_generator.fit(xtrain.reshape(-1, 227, 227, None))
# val_generator.fit(x_val.reshape(-1, 227, 227, None))
# test_generator.fit(xtest.reshape(-1, 227, 227, None))
train_generator.fit(xtrain)
val_generator.fit(x_val)
test_generator.fit(xtest)
# Construct the image generator for data augmentation
aug = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,
horizontal_flip=True, fill_mode="nearest")
# Construct the set of callbacks
figPath = os.path.sep.join([args["output"], "{}.jpg".format(os.getpid())])
jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())])
callbacks = [train(figPath, jsonPath=jsonPath),
LearningRateScheduler(decay_poly)]
print('')
# Initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=learn_rate, momentum=0.9)
LeNet = mini_lenet.build(width=227, height=227, depth=3, classes=100) # LFW
# LeNet = mini_lenet.build(width=227, height=227, depth=3, classes=24) # FERET
LeNet.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
LeNet.evaluate(xtest, ytest)
# Train the network
print("[INFO] training network...")
LeNet.fit(aug.flow(xtrain, ytrain, batch_size=25),
validation_data=(xtest, ytest), steps_per_epoch=len(xtrain) // 64,
epochs=num_EPOCHS, callbacks=callbacks, verbose=1)
# Plotting the training and validation loss
f, ax = plt.subplots(1, 1) # Creates 2 subplots under 1 column
# Assigning the first subplot to graph training loss and validation loss
ax.plot(LeNet.history.history['loss'], color='b', label='Training Loss')
ax.plot(LeNet.history.history['val_loss'], color='r', label='Validation Loss')
ax.set_title('Training & Validation Loss Graph')
plt.legend()
plt.show()
# Plotting the training and validation accuracy
f, ax = plt.subplots(1, 1) # Creates 2 subplots under 1 column
# Plotting the training accuracy and validation accuracy
ax.plot(LeNet.history.history['accuracy'], color='b', label='Training Accuracy')
ax.plot(LeNet.history.history['val_accuracy'], color='r', label='Validation Accuracy')
ax.set_title('Training & Validation Accuracy Graph')
plt.legend()
plt.show()
# Defining function for confusion matrix plot
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
# Print Confusion matrix
fig, ax = plt.subplots(figsize=(5, 5))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", color="white"
if cm[i, j] > thresh else "black")
plt.tight_layout()
return ax
# Making prediction
xtest_arg = np.argmax(ytest, axis=1)
y_pred = np.argmax(LeNet.predict(xtest), axis=1)
y_true = np.argmax(ytest, axis=1)
print(y_pred)
print(y_pred.shape)发布于 2022-06-27 03:41:02
我找到了解决办法。
在培训文件夹中有一个名为"Desktop.ini“的文件。一旦我删除它们并运行代码,它就成功运行了。
https://stackoverflow.com/questions/72765555
复制相似问题