-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathyolo_model.py
70 lines (52 loc) · 3.08 KB
/
yolo_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from transformers import AutoModelForObjectDetection
import torch
import pytorch_lightning as pl
#we wrap our model around pytorch lightning for training
class YoloS(pl.LightningModule):
def __init__(self, lr, weight_decay, num_labels, train_dataloader=None, val_dataloader=None, test_dataloader=None):
super().__init__()
# replace COCO classification head with custom head
self.model = AutoModelForObjectDetection.from_pretrained("hustvl/yolos-tiny",
num_labels=num_labels,
ignore_mismatched_sizes=True)
# see https://github.com/PyTorchLightning/pytorch-lightning/pull/1896
self.lr = lr
self.weight_decay = weight_decay
self.save_hyperparameters() # adding this will save the hyperparameters to W&B too
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
def forward(self, pixel_values):
outputs = self.model(pixel_values=pixel_values)
return outputs
def common_step(self, batch, batch_idx):
pixel_values = batch["pixel_values"]
labels = [{k: v.to(self.device) for k, v in t.items()} for t in batch["labels"]]
outputs = self.model(pixel_values=pixel_values, labels=labels)
loss = outputs.loss
loss_dict = outputs.loss_dict
return loss, loss_dict
def training_step(self, batch, batch_idx):
loss, loss_dict = self.common_step(batch, batch_idx)
# logs metrics for each training_step,
# and the average across the epoch
self.log("train/loss", loss) # logging metrics with a forward slash will ensure the train and validation metrics as split into 2 separate sections in the W&B workspace
for k,v in loss_dict.items():
self.log("train/" + k, v.item()) # logging metrics with a forward slash will ensure the train and validation metrics as split into 2 separate sections in the W&B workspace
return loss
def validation_step(self, batch, batch_idx):
loss, loss_dict = self.common_step(batch, batch_idx)
self.log("validation/loss", loss) # logging metrics with a forward slash will ensure the train and validation metrics as split into 2 separate sections in the W&B workspace
for k,v in loss_dict.items():
self.log("validation/" + k, v.item()) # logging metrics with a forward slash will ensure the train and validation metrics as split into 2 separate sections in the W&B workspace
return loss
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr,
weight_decay=self.weight_decay)
return optimizer
def train_dataloader(self):
return self.train_dataloader
def val_dataloader(self):
return self.val_dataloader
def test_dataloader(self):
return self.test_dataloader