IT_World
[PyTorch] 열화상 감지 파일만들기 본문
열화상이 베이스로 자리 잡은 요즘 시대를 맞이해
vgg16 모델과 이 두 가지 코드를 참조하여 thermal face을 만들 것이다.
처음에는 tensorflow로 만들었는데 Tuning 하기 좋은 Torch로 변동해서 만들어봤다.
github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
간략하게 요약 작성했다.
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets,transforms
import time
import os
import copy
from tensorboardX import SummaryWriter
import datetime
from pre.utils import load_state_dict_from_url
from typing import Union, List, Dict, Any, cast
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]),
'val': transforms.Compose([
transforms.ToTensor(),
]),
}
data_dir = '/home/Documents/train_val'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32, shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
patience = 20
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
writer.add_scalar('Loss', epoch_loss,epoch)
writer.add_scalar('Accuracy', epoch_acc, epoch)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
__all__ = [
'vgg19',
]
model_urls = {
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(
self,
features: nn.Module,
num_classes: int = 2,
init_weights: bool = True
) -> None:
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 1 * 1, 1024),
nn.Linear(1024, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor: #실행하는
x = self.features(x)
x = self.classifier(x)
return x
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'A': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
model.load_state_dict(state_dict)
return model
# tensorboard
save_time = f'{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}'
model_ft = vgg16_bn()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.001)
#tensorboard --logdir=./
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
writer = SummaryWriter('thermalface_vgg19_bn')
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=128)
save_path = "thermalface_vgg19_bn.pth"
torch.save(model_ft.state_dict(), save_path)
writer.close()
'''
Epoch 0/127
----------
train Loss: 0.1183 Acc: 0.9557
val Loss: 0.1464 Acc: 0.9767
Epoch 1/127
----------
train Loss: 0.1013 Acc: 0.9677
val Loss: 0.0160 Acc: 0.9939
Epoch 2/127
----------
train Loss: 0.0638 Acc: 0.9792
val Loss: 0.0131 Acc: 0.9965
'''
처음부터 loss, accuracy 값이 뛰어나 의아했지만, 다행히 잘 돌아가는 모델이었다.
조금 더 다듬어서 다시 코드 업데이트할 예정이다.
'Artificial intelligence, AI > Pytorch' 카테고리의 다른 글
L1 & L2 loss/regularization 정규화 방법 (0) | 2021.08.10 |
---|---|
[Pytorch] Python으로 딥 러닝 모델 개발하기 2 (0) | 2021.05.29 |
[Pytorch] Python으로 딥 러닝 모델 개발하기 (0) | 2021.05.12 |
[Pytorch] Image Preprocessing 이미지 전처리 (0) | 2021.05.11 |
Comments