diff --git a/vllm_ascend/worker.py b/vllm_ascend/worker.py index 98f53aca..a3b4f19d 100644 --- a/vllm_ascend/worker.py +++ b/vllm_ascend/worker.py @@ -85,9 +85,6 @@ def __init__( self.distributed_init_method = distributed_init_method self.is_driver_worker = is_driver_worker - if is_driver_worker: - assert rank % self.parallel_config.tensor_parallel_size == 0, \ - "Driver worker should be rank 0 of tensor parallel group." if self.model_config.trust_remote_code: # note: lazy import to avoid importing torch before initializing from vllm.utils import init_cached_hf_modules