Skip to content

Commit

Permalink
add more models into daily testcases (InternLM#717)
Browse files Browse the repository at this point in the history
Co-authored-by: zhulin1 <zhulin1@pjlab.org.cn>
  • Loading branch information
zhulinJulia24 and zhulin1 authored Mar 6, 2024
1 parent bd57ff3 commit 43b7582
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 9 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/daily_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
source activate internlm-model-latest
pip install transformers==${{ matrix.transformers-version }}
pip install sentencepiece auto-gptq
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
conda deactivate
- name: load_latest_hf_model
Expand All @@ -36,7 +36,7 @@ jobs:
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
source activate internlm-model-latest
pip install transformers
pip install sentencepiece auto-gptq
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
conda deactivate
- name: remove_env
Expand Down
Binary file modified tests/bamboo.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified tests/panda.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
65 changes: 58 additions & 7 deletions tests/test_hf_model.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import pytest
import torch
from auto_gptq.modeling import BaseGPTQForCausalLM
from lmdeploy import TurbomindEngineConfig, pipeline
from PIL import Image
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer

Expand All @@ -9,6 +11,8 @@
def assert_model(response):
assert len(response) != 0
assert 'UNUSED_TOKEN' not in response
assert 'Mynameis' not in response
assert 'Iama' not in response


class TestChat:
Expand All @@ -18,6 +22,7 @@ class TestChat:
'model_name',
[
'internlm/internlm2-chat-7b', 'internlm/internlm2-chat-7b-sft',
'internlm/internlm2-chat-20b', 'internlm/internlm2-chat-20b-sft',
'internlm/internlm2-chat-1_8b', 'internlm/internlm2-chat-1_8b-sft'
],
)
Expand Down Expand Up @@ -55,13 +60,31 @@ def test_demo_default(self, model_name, usefast):
assert_model(response)


class TestChatAwq:
"""Test cases for chat model."""

@pytest.mark.parametrize(
'model_name',
['internlm/internlm2-chat-20b-4bits'],
)
def test_demo_default(self, model_name):
engine_config = TurbomindEngineConfig(model_format='awq')
pipe = pipeline('internlm/internlm2-chat-20b-4bits',
backend_config=engine_config)
responses = pipe(['Hi, pls intro yourself', 'Shanghai is'])
print(responses)
for response in responses:
assert_model(response.text)


class TestBase:
"""Test cases for base model."""

@pytest.mark.parametrize(
'model_name',
[
'internlm/internlm2-7b', 'internlm/internlm2-base-7b',
'internlm/internlm2-20b', 'internlm/internlm2-base-20b',
'internlm/internlm2-1_8b'
],
)
Expand Down Expand Up @@ -140,6 +163,7 @@ class TestMMModel:
'model_name',
[
'internlm/internlm-xcomposer2-7b',
'internlm/internlm-xcomposer2-7b-4bit'
],
)
def test_demo_default(self, model_name):
Expand All @@ -148,12 +172,16 @@ def test_demo_default(self, model_name):
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
# it will be loaded as float32 and might cause OOM Error.

model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float32,
trust_remote_code=True).cuda()
if '4bit' in model_name:
model = InternLMXComposer2QForCausalLM.from_quantized(
model_name, trust_remote_code=True, device='cuda:0').eval()
else:
model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float32,
trust_remote_code=True).cuda()

tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True)

model = model.eval()
img_path_list = [
'tests/panda.jpg',
Expand All @@ -175,7 +203,7 @@ def test_demo_default(self, model_name):
do_sample=False)
print(response)
assert len(response) != 0
assert 'panda' in response
assert ' panda' in response

query = '<ImageHere> <ImageHere>请根据图片写一篇作文:我最喜欢的小动物。' \
+ '要求:选准角度,确定立意,明确文体,自拟标题。'
Expand All @@ -197,6 +225,7 @@ class TestMMVlModel:
'model_name',
[
'internlm/internlm-xcomposer2-vl-7b',
'internlm/internlm-xcomposer2-vl-7b-4bit'
],
)
def test_demo_default(self, model_name):
Expand All @@ -206,8 +235,13 @@ def test_demo_default(self, model_name):
torch.set_grad_enabled(False)

# init model and tokenizer
model = AutoModel.from_pretrained(
model_name, trust_remote_code=True).cuda().eval()
if '4bit' in model_name:
model = InternLMXComposer2QForCausalLM.from_quantized(
model_name, trust_remote_code=True, device='cuda:0').eval()
else:
model = AutoModel.from_pretrained(
model_name, trust_remote_code=True).cuda().eval()

tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True)

Expand All @@ -223,3 +257,20 @@ def test_demo_default(self, model_name):
assert len(response) != 0
assert 'Oscar Wilde' in response
assert 'Live life with no excuses, travel with no regret' in response


class InternLMXComposer2QForCausalLM(BaseGPTQForCausalLM):
layers_block_name = 'model.layers'
outside_layer_modules = [
'vit',
'vision_proj',
'model.tok_embeddings',
'model.norm',
'output',
]
inside_layer_modules = [
['attention.wqkv.linear'],
['attention.wo.linear'],
['feed_forward.w1.linear', 'feed_forward.w3.linear'],
['feed_forward.w2.linear'],
]

0 comments on commit 43b7582

Please sign in to comment.