-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpredict.py
140 lines (105 loc) · 5.02 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#---------------- Prepare the command line -----------------------------
import argparse
from os.path import isdir
def get_input_args():
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type = str, default = 'saved_models/checkpoint.pth',
help = 'Specifiy a file that conatins the model state. (Default = checkpoint.pth)')
parser.add_argument('--image_path', type = str, default = '../Face Mask Dataset/Test/WithMask/1175.png',
help = 'Specify an Image file to be used in the prediction process. (Default = ../Face Mask Dataset/Test/WithMask/1175.png)')
parser.add_argument('--top_k', type = int, default = 2,
help = 'Specify the number of top K likely classes to be displayed. (Default = 2)')
parser.add_argument('--gpu', action='store_true',
help = 'Inference using a gpu.')
return parser.parse_args()
#-----------------------------------------------------------------------------------------------------
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
import numpy as np
import json
#---------------- Load Function ------------------------------------------
def load_checkpoint(filepath):
checkpoint = torch.load(filepath, map_location=torch.device('cpu'))
# Download pretrained model
model = models.densenet121(pretrained=True)
# Freeze
for param in model.parameters():
param.requires_grad = False
#Configure The model
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['model_state'])
#Optimizer
optimizer = optim.Adam(model.classifier.parameters(), lr=0.003)
optimizer.state = checkpoint['optimizer_state']
return model, optimizer
#---------------- Image Preprocessing Function ------------------------------------------
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
#Store the image to be processed
image = Image.open(image)
# Convert image values to be between 0 and 1
image = np.array(image) / 255
# Image normalization of colors
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = (image - mean)/std
# Image dimensions reordering
image = image.transpose((2, 0, 1))
return image
#---------------- Predict Function ------------------------------------------
def predict(image_path, model, topk=2):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# Proccess the input
image = process_image(image_path) # Processed Image (Numpy)
image_tensor = torch.from_numpy(image) # Tensor Image (Tensor)
image_feed = image_tensor.unsqueeze(0) # Add a dimension [1,3,224,224]
# Go in evaluation mode
model.eval()
# Move the model to either CPU or GPU
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
selected_device = "cuda" if args.gpu else "cpu"
print(f"\nThe choosen method for predicting is {selected_device}.")
if selected_device == 'cpu':
available_device = 'cpu'
print("\nStart predicting on the CPU.")
else:
available_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if available_device == 'cpu':
print("\nSorry, there no gpu founded. Start predicting on the CPU.")
else:
print("\nStart predicting on the GPU.")
with torch.no_grad():
model.to(available_device)
image_feed.to(available_device)
if available_device == 'cpu':
logits = model.forward(image_feed.type(torch.FloatTensor))
else:
logits = model.forward(image_feed.type(torch.cuda.FloatTensor))
ps = torch.exp(logits)
top_p, top_class = ps.topk(topk, dim=1)
top_p, top_class = top_p.cpu(), top_class.cpu()
print("top class", top_class)
#Invert the dictionary so you get a mapping from index to class
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
print(idx_to_class)
top_classes = [idx_to_class[each] for each in top_class.numpy()[0]]
print(top_classes)
return top_p.numpy()[0].tolist(), top_classes
#----------------------------------------------------------------------------------------------------
args = get_input_args()
model, optimizer = load_checkpoint(args.model_path)
image_path = args.image_path
img = process_image(image_path)
probs, classes = predict(image_path, model, args.top_k)
print(probs)
print(classes)