Skip to content

Commit

Permalink
configs | incorporated in main code
Browse files Browse the repository at this point in the history
  • Loading branch information
mustafa1728 committed Jun 1, 2021
1 parent 728c740 commit ab10546
Show file tree
Hide file tree
Showing 4 changed files with 237 additions and 184 deletions.
109 changes: 74 additions & 35 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
"""

import os
from pickle import NONE

from yacs.config import CfgNode as CN

Expand All @@ -13,53 +12,56 @@

_C = CN()

_C.TO_VALIDATE = False # choices = [True, False]
_C.TO_VALIDATE = True # choices = [True, False]


# -----------------------------------------------------------------------------
# Dataset
# Paths
# -----------------------------------------------------------------------------

# dataset paths
_C.DATASET.PATH_DATA_ROOT = "data/" # directory where the feature pickles are stored. Depends on users
_C.DATASET.PATH_LABELS_ROOT = "data/" # directory where the annotations are stored. Depends on users
_C.DATASET.PATH_EXP_ROOT="model/action-model/" # directory where the checkpoints are to be stored. Depends on users
_C.PATHS = CN()
_C.PATHS.PATH_DATA_ROOT = "data/" # directory where the feature pickles are stored. Depends on users
_C.PATHS.PATH_LABELS_ROOT = "annotations/" # directory where the annotations are stored. Depends on users
_C.PATHS.PATH_EXP_ROOT="model/action-model/" # directory where the checkpoints are to be stored. Depends on users


_C.DATASET.DATASET_SOURCE="source_train" # depends on users
_C.DATASET.DATASET_TARGET="target_train" # depends on users
_C.PATHS.DATASET_SOURCE="source_train" # depends on users
_C.PATHS.DATASET_TARGET="target_train" # depends on users
if _C.TO_VALIDATE:
_C.DATASET.VAL_DATASET_SOURCE="source_val" # depends on users
_C.DATASET.VAL_DATASET_TARGET="target_val" # depends on users
_C.PATHS.VAL_DATASET_SOURCE="source_val" # depends on users
_C.PATHS.VAL_DATASET_TARGET="target_val" # depends on users
else:
_C.DATASET.VAL_DATASET_SOURCE= None
_C.DATASET.VAL_DATASET_TARGET= None
_C.DATASET.NUM_SOURCE= 16115 # number of training data (source)
_C.DATASET.NUM_TARGET= 26115 # number of training data (target)
_C.PATHS.VAL_DATASET_SOURCE= None
_C.PATHS.VAL_DATASET_TARGET= None
_C.PATHS.NUM_SOURCE= 16115 # number of training data (source)
_C.PATHS.NUM_TARGET= 26115 # number of training data (target)

_C.DATASET.PATH_DATA_SOURCE=os.path.join(_C.DATASET.PATH_DATA_ROOT, _C.DATASET.DATASET_SOURCE)
_C.DATASET.PATH_DATA_TARGET=os.path.join(_C.DATASET.PATH_DATA_ROOT, _C.DATASET.DATASET_TARGET)
_C.PATHS.PATH_DATA_SOURCE=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.DATASET_SOURCE)
_C.PATHS.PATH_DATA_TARGET=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.DATASET_TARGET)
if _C.TO_VALIDATE:
_C.DATASET.PATH_VAL_DATA_SOURCE=os.path.join(_C.DATASET.PATH_DATA_ROOT, _C.DATASET.VAL_DATASET_SOURCE)
_C.DATASET.PATH_VAL_DATA_TARGET=os.path.join(_C.DATASET.PATH_DATA_ROOT, _C.DATASET.VAL_DATASET_TARGET)
_C.PATHS.PATH_VAL_DATA_SOURCE=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.VAL_DATASET_SOURCE)
_C.PATHS.PATH_VAL_DATA_TARGET=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.VAL_DATASET_TARGET)
else:
_C.DATASET.PATH_VAL_DATA_SOURCE= None
_C.DATASET.PATH_VAL_DATA_SOURCE= None
_C.PATHS.PATH_VAL_DATA_SOURCE= None
_C.PATHS.PATH_VAL_DATA_SOURCE= None

_C.DATASET.TRAIN_SOURCE_LIST=os.path.join(_C.DATASET.PATH_LABELS_ROOT, 'EPIC_100_uda_source_train.pkl') # '/domain_adaptation_source_train_pre-release_v3.pkl'
_C.DATASET.TRAIN_TARGET_LIST=os.path.join(_C.DATASET.PATH_LABELS_ROOT, 'EPIC_100_uda_target_train_timestamps.pkl') # '/domain_adaptation_target_train_pre-release_v6.pkl'
_C.PATHS.TRAIN_SOURCE_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, 'EPIC_100_uda_source_train.pkl') # '/domain_adaptation_source_train_pre-release_v3.pkl'
_C.PATHS.TRAIN_TARGET_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, 'EPIC_100_uda_target_train_timestamps.pkl') # '/domain_adaptation_target_train_pre-release_v6.pkl'
if _C.TO_VALIDATE:
_C.DATASET.VAL_SOURCE_LIST=os.path.join(_C.DATASET.PATH_LABELS_ROOT, "EPIC_100_uda_source_val.pkl")
_C.DATASET.VAL_TARGET_LIST=os.path.join(_C.DATASET.PATH_LABELS_ROOT, "EPIC_100_uda_target_val.pkl")
_C.PATHS.VAL_SOURCE_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_source_val.pkl")
_C.PATHS.VAL_TARGET_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_target_val.pkl")
else:
_C.DATASET.VAL_SOURCE_LIST= None
_C.DATASET.VAL_TARGET_LIST= None
_C.DATASET.VAL_LIST=os.path.join(_C.DATASET.PATH_LABELS_ROOT, "EPIC_100_uda_target_test_timestamps.pkl")
_C.DATASET.PATH_EXP=os.path.join(_C.DATASET.PATH_EXP_ROOT, "Testexp")
_C.PATHS.VAL_SOURCE_LIST= None
_C.PATHS.VAL_TARGET_LIST= None
_C.PATHS.VAL_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_target_test_timestamps.pkl")
_C.PATHS.PATH_EXP=os.path.join(_C.PATHS.PATH_EXP_ROOT, "Testexp")

# dataset parameters

# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.DATASET = "epic" # dataset choices = [hmdb_ucf, hmdb_ucf_small, ucf_olympic]
_C.DATASET.NUM_CLASSES = 97300
_C.DATASET.NUM_CLASSES = "97,300"
_C.DATASET.MODALITY = "ALL" # choices = [RGB ]
_C.DATASET.FRAME_TYPE = "feature" # choices = [frame]
_C.DATASET.NUM_SEGMENTS = 5 # sample frame # of each video for training
Expand All @@ -80,6 +82,10 @@
_C.MODEL.WEIGHTED_CLASS_LOSS_DA = "N" # choices = [Y, N]
_C.MODEL.WEIGHTED_CLASS_LOSS = "N" # choices = [Y, N]

_C.MODEL.DROPOUT_I = 0.8
_C.MODEL.DROPOUT_V = 0.8
_C.MODEL.NO_PARTIALBN = True


# DA configs
if _C.MODEL.USE_TARGET == "none":
Expand All @@ -96,14 +102,17 @@
_C.MODEL.USE_ATTN = "TransAttn" # choices = [None, TransAttn, general]
_C.MODEL.USE_ATTN_FRAME = None # choices = [None, TransAttn, general]
_C.MODEL.USE_BN = None # choices = [None, AdaBN, AutoDIAL]
_C.MODEL.N_ATTN = 1
_C.MODEL.PLACE_DIS = ["Y", "Y", "N"]
_C.MODEL.PLACE_ADV = ["Y", "Y", "Y"]


# ---------------------------------------------------------------------------- #
# Hyperparameters
# ---------------------------------------------------------------------------- #
_C.HYPERPARAMETERS = CN()
_C.HYPERPARAMETERS.ALPHA = 0
_C.HYPERPARAMETERS.BETA = [0.75, 0.75, 0.5]
_C.HYPERPARAMETERS.N_ATTN = 1
_C.HYPERPARAMETERS.GAMMA = 0.003 # U->H: 0.003 | H->U: 0.3
_C.HYPERPARAMETERS.MU = 0

Expand All @@ -117,21 +126,51 @@
_C.TRAINER.ARCH = "TBN" # choices = [resnet50]
_C.TRAINER.USE_TARGET = "uSv" # choices = [uSv, Sv, none]
_C.TRAINER.SHARE_PARAMS = "Y" # choices = [Y, N]
_C.TRAINER.PRETRAIN_SOURCE = False
_C.TRAINER.VERBOSE = True

# Learning configs
_C.TRAINER.LOSS_TYPE = 'nll'
_C.TRAINER.LR = 0.003
_C.TRAINER.LR_DECAY = 10
_C.TRAINER.LR_ADAPTIVE = None # choices = [None, loss, dann]
_C.TRAINER.LR_STEPS = [10, 20]
_C.TRAINER.MOMENTUM = 0.9
_C.TRAINER.WEIGHT_DECAY = 0.0001
_C.TRAINER.BATCH_SIZE = [128, 128*(_C.DATASET.NUM_TARGET/_C.DATASET.NUM_SOURCE), 128]
_C.TRAINER.OPTIMIZER_NAME = "SGD" # choices = [SGD, Adam]
_C.TRAINER.GD = 20
_C.TRAINER.CLIP_GRADIENT = 20

_C.TRAINER.PRETRAINED = None
_C.TRAINER.RESUME = ""
_C.TRAINER.RESUME_HP = ""

_C.TRAINER.MIN_EPOCHS = 25
_C.TRAINER.MAX_EPOCHS = 30

_C.TRAINER.ACCELERATOR = "ddp"



_C.PATHS.EXP_PATH = os.path.join(_C.DATASET.PATH_EXP + '_' + _C.TRAINER.OPTIMIZER_NAME + '-share_params_' + _C.MODEL.SHARE_PARAMS + '-lr_' + str(_C.TRAINER.LR) + '-bS_' + str(_C.TRAINER.BATCH_SIZE[0]), _C.DATASET.DATASET + '-'+ str(_C.DATASET.NUM_SEGMENTS) + '-seg-disDA_' + _C.MODEL.DIS_DA + '-alpha_' + str(_C.HYPERPARAMETERS.ALPHA) + '-advDA_' + _C.MODEL.ADV_DA + '-beta_' + str(_C.HYPERPARAMETERS.BETA[0])+ '_'+ str(_C.HYPERPARAMETERS.BETA[1])+'_'+ str(_C.HYPERPARAMETERS.BETA[2])+"_gamma_" + str(_C.HYPERPARAMETERS.GAMMA) + "_mu_" + str(_C.HYPERPARAMETERS.MU))


# ---------------------------------------------------------------------------- #
# Tester
# ---------------------------------------------------------------------------- #
_C.TESTER = CN()

_C.TESTER.TEST_TARGET_DATA = os.path.join(_C.PATHS.PATH_DATA_ROOT, "target_test")

_C.TESTER.WEIGHTS = os.path.join(_C.EXP_PATH , "checkpoint.pth.tar")
_C.TESTER.NOUN_WEIGHTS = None
_C.TESTER.RESULT_JSON = "test.json"
_C.TESTER.TEST_SEGMENTS = 5 # sample frame # of each video for testing
_C.TESTER.SAVE_SCORES = os.path.join(_C.EXP_PATH , "scores")
_C.TESTER.SAVE_CONFUSION = os.path.join(_C.EXP_PATH , "confusion_matrix")

_C.TESTER.VERBOSE = True

# ---------------------------------------------------------------------------- #
# Miscellaneous configs
# ---------------------------------------------------------------------------- #
Expand All @@ -146,10 +185,10 @@
_C.TRAINER.PF = 50
_C.TRAINER.SF = 50
_C.TRAINER.COPY_LIST = ["N", "N"]
_C.TRAINER.SAVE_MODEL = True



_C.DATASET.EXP_PATH = os.path.join(_C.DATASET.PATH_EXP + '_' + _C.TRAINER.OPTIMIZER_NAME + '-share_params_' + _C.MODEL.SHARE_PARAMS + '-lr_' + str(_C.TRAINER.LR) + '-bS_' + str(_C.TRAINER.BATCH_SIZE[0]), _C.DATASET.DATASET + '-'+ str(_C.DATASET.NUM_SEGMENTS) + '-seg-disDA_' + _C.MODEL.DIS_DA + '-alpha_' + str(_C.HYPERPARAMETERS.ALPHA) + '-advDA_' + _C.MODEL.ADV_DA + '-beta_' + str(_C.HYPERPARAMETERS.BETA[0])+ '_'+ str(_C.HYPERPARAMETERS.BETA[1])+'_'+ str(_C.HYPERPARAMETERS.BETA[2])+"_gamma_" + str(_C.HYPERPARAMETERS.GAMMA) + "_mu_" + str(_C.HYPERPARAMETERS.MU))



Expand Down
64 changes: 37 additions & 27 deletions main_lightning.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,19 @@
import os
import numpy as np
import time
import argparse

import torch
import torch.nn.parallel
import torch.optim

from colorama import init
from colorama import Fore, Back, Style


from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from tensorboardX import SummaryWriter

from utils.loss import *
from utils.opts import parser
from utils.model_init import initialise_trainer
from config import get_cfg_defaults
from utils.data_loaders import get_train_data_loaders, get_val_data_loaders
from utils.logging import *

Expand All @@ -25,47 +23,53 @@
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)

init(autoreset=True)

best_prec1 = 0
gpu_count = torch.cuda.device_count()
log_info( "Number of GPUS available: " + str(gpu_count))

def arg_parse():
"""Parsing arguments"""
parser = argparse.ArgumentParser(description="TA3N Domain Adaptation")
parser.add_argument("--cfg", required=True, help="path to config file", type=str)
parser.add_argument("--gpus", default="0", help="gpu id(s) to use", type=str)
parser.add_argument("--resume", default="", type=str)
args = parser.parse_args()
return args

def main():
args = parser.parse_args()
args = arg_parse()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.cfg)
cfg.freeze()

path_exp = args.exp_path + args.modality + '/'
# log_info(str(cfg))

path_exp = os.path.join(cfg.PATHS.EXP_PATH, cfg.DATASET.MODALITY)

#========== model init ========#

log_info('Initialising model......')
model = initialise_trainer(args)
model = initialise_trainer(cfg)

#========== log files init ========#

open_log_files(args)
open_log_files(cfg)

#========== Data loading ========#

log_info('Loading data......')

if args.use_opencv:
log_debug("use opencv functions")

source_loader, target_loader = get_train_data_loaders(args)
source_loader, target_loader = get_train_data_loaders(cfg)

to_validate = args.val_source_data != "none" and args.val_target_data != "none"
if(to_validate):
if(cfg.TO_VALIDATE):
log_info('Loading validation data......')
source_loader_val, target_loader_val = get_val_data_loaders(args)
source_loader_val, target_loader_val = get_val_data_loaders(cfg)

#========== Callbacks and checkpoints ========#

if args.train_metric == "all":
if cfg.TRAINER.TRAIN_METRIC == "all":
monitor = "Prec@1 Action"
elif args.train_metric == "noun":
elif cfg.TRAINER.TRAIN_METRIC == "noun":
monitor = "Prec@1 Noun"
elif args.train_metric == "verb":
elif cfg.TRAINER.TRAIN_METRIC == "verb":
monitor = "Prec@1 Verb"
else:
log_error("invalid metric to train")
Expand All @@ -81,12 +85,18 @@ def main():

#========== Actual Training ========#

trainer = Trainer(min_epochs=20, max_epochs=30, callbacks=[checkpoint_callback], gpus = gpu_count, accelerator='ddp')
trainer = Trainer(
min_epochs=cfg.TRAINER.MIN_EPOCHS,
max_epochs=cfg.TRAINER.MAX_EPOCHS,
callbacks=[checkpoint_callback],
gpus = args.gpus,
accelerator=cfg.TRAINER.ACCELERATOR
)

log_info('Starting training......')
start_train = time.time()

if(to_validate):
if(cfg.TO_VALIDATE):
trainer.fit(model, (source_loader, target_loader), (source_loader_val, target_loader_val))
else:
trainer.fit(model, (source_loader, target_loader))
Expand All @@ -95,14 +105,14 @@ def main():

#========== Logging ========#

write_log_files('total time: {:.3f} '.format(end_train - start_train), best_prec1)
write_log_files('total time: {:.3f} '.format(end_train - start_train), model.best_prec1)
model.writer_train.close()
model.writer_val.close()

log_info('Training complete')
log_info('Total training time:' + str(end_train - start_train))

if(to_validate):
if(cfg.TO_VALIDATE):
log_info('Validation scores:\n | Prec@1 Verb: ' + str(model.prec1_verb_val) + "\n | Prec@1 Noun: " + str(model.prec1_noun_val)+ "\n | Prec@1 Action: " + str(model.prec1_val) + "\n | Prec@5 Verb: " + str(model.prec5_verb_val) + "\n | Prec@5 Noun: " + str(model.prec5_noun_val) + "\n | Prec@5 Action: " + str(model.prec5_val) + "\n | Loss total: " + str(model.losses_val))


Expand Down
41 changes: 27 additions & 14 deletions test_models_lightning.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import numpy as np
import argparse
import json
from json import encoder

Expand All @@ -9,37 +9,50 @@

from pytorch_lightning import Trainer

from colorama import init
from colorama import Fore, Back, Style

from utils.opts_test import parser
from utils.model_init import initialise_tester
from utils.data_loaders import get_test_data_loaders

from config import get_cfg_defaults
from utils.logging import *

encoder.FLOAT_REPR = lambda o: format(o, '.3f')
init(autoreset=True)

def main():
def arg_parse():
"""Parsing arguments"""
parser = argparse.ArgumentParser(description="TA3N Domain Adaptation Testing")
parser.add_argument("--cfg", required=True, help="path to config file", type=str)
parser.add_argument("--gpus", default="0", help="gpu id(s) to use", type=str)
parser.add_argument("--ckpt", default=None, help="pre-trained parameters for the model (ckpt files)", type=str)
args = parser.parse_args()
return args

def main():
args = arg_parse()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.cfg)
cfg.freeze()

#========== model init ========#

log_info('Preparing the model......')
verb_net, noun_net = initialise_tester(args)
verb_net, noun_net = initialise_tester(cfg)

#========== Data loading ========#

log_info('Loading data......')
data_loader = get_test_data_loaders(args)
log_info('Data loaded from: ' + args.test_target_data+".pkl")
data_loader = get_test_data_loaders(cfg)
log_info('Data loaded from: ' + cfg.TESTER.TEST_TARGET_DATA+".pkl")

#========== Actual Testing ========#
log_info('starting validation......')
trainer = Trainer(gpus = torch.cuda.device_count())

trainer.test(model = verb_net, test_dataloaders=data_loader, ckpt_path=args.weights, verbose = True)
trainer = Trainer(gpus = args.gpus)

if args.ckpt is None:
ckpt_path = cfg.TESTER.WEIGHTS
else:
ckpt_path = args.ckpt
trainer.test(model = verb_net, test_dataloaders=data_loader, ckpt_path=ckpt_path, verbose = cfg.TESTER.VERBOSE)
if noun_net is not None:
trainer.test(model = noun_net, test_dataloaders=data_loader, ckpt_path=ckpt_path, verbose = cfg.TESTER.VERBOSE)

log_info('validation complete')

Expand Down
Loading

0 comments on commit ab10546

Please sign in to comment.