-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
106 lines (76 loc) · 3.1 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import cv2
import torch
import torchvision
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
import numpy as np
import time
from model import *
#Load data
train_set = datasets.ImageFolder(root='./data/train', transform=transform_train)
val_set = datasets.ImageFolder(root='./data/val', transform=transform_val)
batch_size = 32
train_load = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
val_load = torch.utils.data.DataLoader(dataset=val_set, batch_size=batch_size, shuffle=True)
train_loss = []
val_loss = []
train_acc = []
val_acc = []
#Def training model
def Training_Model(model, epochs, parameters):
#Using CrossEntropyLoss, optim SGD
loss_f = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(parameters, lr=0.07, weight_decay=0.00001)
model = model.cuda()
for epoch in range(epochs):
start = time.time()
correct = 0
iterations = 0
iter_loss = 0.0
model.train() #Set mode Train
for i, (inputs, labels) in enumerate(train_load, 0):
inputs = Variable(inputs)
labels = Variable(labels)
inputs = inputs.cuda()
labels = labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_f(outputs, labels)
iter_loss += loss.item()
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum()
iterations += 1
train_loss.append(iter_loss/iterations)
train_acc.append((100 * correct / len(train_set)))
#val_eval
loss = 0.0
correct = 0
iterations = 0
model.eval() #Set mode evaluation
#No_grad on Val_set
with torch.no_grad():
for i, (inputs, labels) in enumerate(val_load, 0):
inputs = Variable(inputs)
labels = Variable(labels)
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
loss = loss_f(outputs, labels)
loss += loss.item()
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum()
iterations += 1
val_loss.append(loss/iterations)
val_acc.append((100 * correct / len(val_set)))
stop = time.time()
print ('Epoch {}/{}, Training Loss: {:.3f}, Training Accuracy: {:.3f}, Val Loss: {:.3f}, Val Accuracy: {:.3f}, Time: {}s'
.format(epoch+1, epochs, train_loss[-1], train_acc[-1], val_loss[-1], val_acc[-1],stop-start))
# Training model
model = CNN()
model = model.cuda()
# model.load_state_dict(torch.load('Emotion-Detection.pth'))
epochs = 32
Training_Model(model=model, epochs=epochs, parameters=model.parameters())
# torch.save(model.state_dict(),'Emotion-Detection.pth')