Skip to content

Commit

Permalink
[CI] upgrade to newest pta
Browse files Browse the repository at this point in the history
Co-authored-by: angazenn <zengyanjia@huawei.com>
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
  • Loading branch information
wangxiyuan and angazenn committed Feb 27, 2025
1 parent fd18ae6 commit 5d5acd9
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 56 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/vllm_ascend_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ jobs:
run: |
mkdir pta
cd pta
wget https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250218.4/pytorch_v2.5.1_py310.tar.gz
wget https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250226.4/pytorch_v2.5.1_py310.tar.gz
tar -xvf pytorch_v2.5.1_py310.tar.gz
pip install ./torch_npu-2.5.1.dev20250218-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
pip install ./torch_npu-2.5.1.dev20250226-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
cd ..
rm -rf pta
Expand Down
10 changes: 5 additions & 5 deletions docs/source/installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ This document describes how to install vllm-ascend manually.
| Software | Supported version | Note |
| ------------ | ----------------- | ---- |
| CANN | >= 8.0.0 | Required for vllm-ascend and torch-npu |
| torch-npu | >= 2.5.1.dev20250218 | Required for vllm-ascend |
| torch-npu | >= 2.5.1.dev20250226 | Required for vllm-ascend |
| torch | >= 2.5.1 | Required for torch-npu and vllm |

You have 2 way to install:
Expand Down Expand Up @@ -134,15 +134,15 @@ pip install vllm-ascend==|pip_vllm_ascend_version| --extra-index https://downloa
#
# Here we take python 3.10 on aarch64 as an example. Feel free to install the correct version for your environment. See:
#
# https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250218.4/pytorch_v2.5.1_py39.tar.gz
# https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250218.4/pytorch_v2.5.1_py310.tar.gz
# https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250218.4/pytorch_v2.5.1_py311.tar.gz
# https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250226.4/pytorch_v2.5.1_py39.tar.gz
# https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250226.4/pytorch_v2.5.1_py310.tar.gz
# https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250226.4/pytorch_v2.5.1_py311.tar.gz
#
mkdir pta
cd pta
wget https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250218.4/pytorch_v2.5.1_py310.tar.gz
tar -xvf pytorch_v2.5.1_py310.tar.gz
pip install ./torch_npu-2.5.1.dev20250218-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
pip install ./torch_npu-2.5.1.dev20250226-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
```

or build from **source code**:
Expand Down
67 changes: 19 additions & 48 deletions vllm_ascend/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,13 +577,11 @@ def forward(
self.num_kv_heads,
self.head_size)
slots = attn_metadata.slot_mapping
torch_npu.npu_reshapecache(key=key,
value=value,
keyCache=key_cache,
valueCache=value_cache,
slotMapping=slots,
compressType=0,
kvCacheCfg=0)
torch_npu._npu_reshape_and_cache(key=key,
value=value,
key_cache=key_cache,
value_cache=value_cache,
slot_indices=slots)

if attn_metadata.num_prefills > 0:

Expand All @@ -596,32 +594,15 @@ def forward(
np.array(
attn_metadata.prefill_metadata.seq_lens).astype(
np.int32))
torch_npu.npu_selfattention(
torch_npu._npu_flash_attention(
query=query,
key=key,
value=value,
mask=mask,
maskType=1,
isTriuMask=0,
seqLen=self.seq_lens_tensor_cpu,
scale=self.scale,
qScale=1,
headNum=self.num_heads,
kvHeadNum=self.num_kv_heads,
mlaVHeadSize=0,
calcType=3,
kernelType=0,
clampType=0,
scaleType=0,
quantType=0,
cacheType=0,
batchRunStatusEnable=False,
kvcacheCfg=0,
clampMin=0,
clampMax=0,
inputLayout=0,
windowSize=0,
outDataType=0,
seq_len=self.seq_lens_tensor_cpu,
scale_value=self.scale,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
out=output)
else:
# TODO: Will support prefix cache and chunked prefill soon.
Expand All @@ -634,26 +615,16 @@ def forward(
np.array(attn_metadata.decode_metadata.seq_lens).astype(
np.int32))
block_tables = attn_metadata.decode_metadata.block_tables
torch_npu.npu_pagedattention(
torch_npu._npu_paged_attention(
query=query,
keyCache=key_cache,
valueCache=value_cache,
contextLens=self.seq_lens_tensor_cpu,
maskType=0,
kvHeadNum=self.num_kv_heads,
headNum=self.num_heads,
mlaVHeadSize=0,
qkScale=self.scale,
scaleType=0,
blockTables=block_tables,
batchRunStatusEnable=False,
hasQuantOffset=False,
calcType=3,
quantType=0,
compressType=0,
inputLayout=0,
outDataType=0,
attnOut=output)
key_cache=key_cache,
value_cache=value_cache,
num_kv_heads=self.num_kv_heads,
num_heads=self.num_heads,
scale_value=self.scale,
block_table=block_tables,
context_lens=self.seq_lens_tensor_cpu,
out=output)

return output.view(num_tokens, self.hidden_size)

Expand Down
2 changes: 1 addition & 1 deletion vllm_ascend/ops/rotary_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def rope_forward_oot(
# TODO: Remove the contiguous in the future.
query = query.contiguous()
key = key.contiguous()
torch_npu.npu_rope(
torch_npu._npu_rotary_embedding(
positions,
query,
key,
Expand Down

0 comments on commit 5d5acd9

Please sign in to comment.