diff --git a/demo/predict.py b/demo/predict.py index ca666db2..f600612b 100644 --- a/demo/predict.py +++ b/demo/predict.py @@ -10,7 +10,7 @@ from datetime import datetime import mindspore as ms -from mindspore import Tensor, context, nn +from mindspore import Tensor, nn from mindyolo.data import COCO80_TO_COCO91_CLASS from mindyolo.models import create_model @@ -52,11 +52,13 @@ def get_parser_infer(parents=None): def set_default_infer(args): # Set Context - context.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) + ms.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) + if args.ms_mode == 0: + ms.set_context(jit_config={"jit_level": "O2"}) if args.device_target == "Ascend": - context.set_context(device_id=int(os.getenv("DEVICE_ID", 0))) + ms.set_context(device_id=int(os.getenv("DEVICE_ID", 0))) elif args.device_target == "GPU" and args.ms_enable_graph_kernel: - context.set_context(enable_graph_kernel=True) + ms.set_context(enable_graph_kernel=True) args.rank, args.rank_size = 0, 1 # Set Data args.data.nc = 1 if args.single_cls else int(args.data.nc) # number of classes diff --git a/mindyolo/utils/utils.py b/mindyolo/utils/utils.py index b03d707e..41b85dcb 100644 --- a/mindyolo/utils/utils.py +++ b/mindyolo/utils/utils.py @@ -6,9 +6,9 @@ import numpy as np import mindspore as ms -from mindspore import context, ops, Tensor, nn +from mindspore import ops, Tensor, nn from mindspore.communication.management import get_group_size, get_rank, init -from mindspore.context import ParallelMode +from mindspore import ParallelMode from mindyolo.utils import logger @@ -21,22 +21,24 @@ def set_seed(seed=2): def set_default(args): # Set Context - context.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) + ms.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) + if args.ms_mode == 0: + ms.set_context(jit_config={"jit_level": "O2"}) if args.device_target == "Ascend": device_id = int(os.getenv("DEVICE_ID", 0)) - context.set_context(device_id=device_id) + ms.set_context(device_id=device_id) elif args.device_target == "GPU" and args.ms_enable_graph_kernel: - context.set_context(enable_graph_kernel=True) + ms.set_context(enable_graph_kernel=True) # Set Parallel if args.is_parallel: init() args.rank, args.rank_size, parallel_mode = get_rank(), get_group_size(), ParallelMode.DATA_PARALLEL - context.set_auto_parallel_context(device_num=args.rank_size, parallel_mode=parallel_mode, gradients_mean=True) + ms.set_auto_parallel_context(device_num=args.rank_size, parallel_mode=parallel_mode, gradients_mean=True) else: args.rank, args.rank_size = 0, 1 # Set Default args.total_batch_size = args.per_batch_size * args.rank_size - args.sync_bn = args.sync_bn and context.get_context("device_target") == "Ascend" and args.rank_size > 1 + args.sync_bn = args.sync_bn and ms.get_context("device_target") == "Ascend" and args.rank_size > 1 args.accumulate = max(1, np.round(args.nbs / args.total_batch_size)) if args.auto_accumulate else args.accumulate # optimizer args.optimizer.warmup_epochs = args.optimizer.get("warmup_epochs", 0) diff --git a/test.py b/test.py index c81f11f9..3a6f0860 100644 --- a/test.py +++ b/test.py @@ -13,7 +13,7 @@ from pycocotools.mask import encode import mindspore as ms -from mindspore import Tensor, context, nn, ParallelMode +from mindspore import Tensor, nn, ParallelMode from mindspore.communication import init, get_rank, get_group_size from mindyolo.data import COCO80_TO_COCO91_CLASS, COCODataset, create_loader @@ -70,16 +70,18 @@ def get_parser_test(parents=None): def set_default_test(args): # Set Context - context.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) + ms.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) + if args.ms_mode == 0: + ms.set_context(jit_config={"jit_level": "O2"}) if args.device_target == "Ascend": - context.set_context(device_id=int(os.getenv("DEVICE_ID", 0))) + ms.set_context(device_id=int(os.getenv("DEVICE_ID", 0))) elif args.device_target == "GPU" and args.ms_enable_graph_kernel: - context.set_context(enable_graph_kernel=True) + ms.set_context(enable_graph_kernel=True) # Set Parallel if args.is_parallel: init() args.rank, args.rank_size, parallel_mode = get_rank(), get_group_size(), ParallelMode.DATA_PARALLEL - context.set_auto_parallel_context(device_num=args.rank_size, parallel_mode=parallel_mode) + ms.set_auto_parallel_context(device_num=args.rank_size, parallel_mode=parallel_mode) else: args.rank, args.rank_size = 0, 1 # Set Data