Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into pufanyi/few_shot
Browse files Browse the repository at this point in the history
  • Loading branch information
pufanyi committed Apr 17, 2024
2 parents 7c03d17 + 1141765 commit 60e0ba9
Show file tree
Hide file tree
Showing 31 changed files with 1,614 additions and 46 deletions.
40 changes: 28 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -201,14 +201,21 @@ We also provide the raw data exported from Weights & Biases for the detailed res
- OKVQA Validation 2014 (ok_vqa_val2014)
- POPE (pope)
- RefCOCO (refcoco)
- refcoco_seg_test
- refcoco_seg_val
- refcoco_seg_testA
- refcoco_seg_testB
- refcoco_bbox_test
- refcoco_bbox_val
- refcoco_bbox_testA
- refcoco_bbox_testB
- refcoco_seg
- refcoco_seg_test
- refcoco_seg_val
- refcoco_seg_testA
- refcoco_seg_testB
- refcoco_bbox
- refcoco_bbox_test
- refcoco_bbox_val
- refcoco_bbox_testA
- refcoco_bbox_testB
- refcoco_bbox_rec
- refcoco_bbox_rec_test
- refcoco_bbox_rec_val
- refcoco_bbox_rec_testA
- refcoco_bbox_rec_testB
- RefCOCO+ (refcoco+)
- refcoco+_seg
- refcoco+_seg_val
Expand All @@ -218,11 +225,20 @@ We also provide the raw data exported from Weights & Biases for the detailed res
- refcoco+_bbox_val
- refcoco+_bbox_testA
- refcoco+_bbox_testB
- refcoco+_bbox_rec
- refcoco+_bbox_rec_val
- refcoco+_bbox_rec_testA
- refcoco+_bbox_rec_testB
- RefCOCOg (refcocog)
- refcocog_seg_test
- refcocog_seg_val
- refcocog_bbox_test
- refcocog_bbox_val
- refcocog_seg
- refcocog_seg_test
- refcocog_seg_val
- refcocog_bbox
- refcocog_bbox_test
- refcocog_bbox_val
- refcocog_bbox_rec
- refcocog_bbox_rec_test
- refcocog_bbox_rec_val
- ScienceQA (scienceqa_full)
- ScienceQA Full (scienceqa)
- ScienceQA IMG (scienceqa_img)
Expand Down
21 changes: 9 additions & 12 deletions lmms_eval/api/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -700,12 +700,15 @@ def download(self, dataset_kwargs=None) -> None:
download_mode=datasets.DownloadMode.REUSE_DATASET_IF_EXISTS,
**dataset_kwargs if dataset_kwargs is not None else {},
)
self.dataset_no_image = datasets.load_dataset(
path=self.DATASET_PATH,
name=self.DATASET_NAME,
download_mode=datasets.DownloadMode.REUSE_DATASET_IF_EXISTS,
**dataset_kwargs if dataset_kwargs is not None else {},
)
if self.config.process_docs is not None:
for split in self.dataset:
if split in [
self.config.training_split, self.config.validation_split, self.config.test_split, self.config.fewshot_split
]:
self.dataset[split] = self.config.process_docs(self.dataset[split])

# copy dataset, remove image features
self.dataset_no_image = self.dataset.copy()
for doc_name in self.dataset_no_image:
remove_cols = []
features = self.dataset_no_image[doc_name].features
Expand Down Expand Up @@ -738,20 +741,14 @@ def has_test_docs(self) -> bool:

def training_docs(self) -> datasets.Dataset:
if self.has_training_docs():
if self.config.process_docs is not None:
return self.config.process_docs(self.dataset[self.config.training_split])
return self.dataset[self.config.training_split]

def validation_docs(self) -> datasets.Dataset:
if self.has_validation_docs():
if self.config.process_docs is not None:
return self.config.process_docs(self.dataset[self.config.validation_split])
return self.dataset[self.config.validation_split]

def test_docs(self) -> datasets.Dataset:
if self.has_test_docs():
if self.config.process_docs is not None:
return self.config.process_docs(self.dataset[self.config.test_split])
return self.dataset[self.config.test_split]

def fewshot_docs(self):
Expand Down
1 change: 1 addition & 0 deletions lmms_eval/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

AVAILABLE_MODELS = {
"llava": "Llava",
"llava_hf": "LlavaHf",
"qwen_vl": "Qwen_VL",
"fuyu": "Fuyu",
"gpt4v": "GPT4V",
Expand Down
6 changes: 3 additions & 3 deletions lmms_eval/models/llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(
trust_remote_code: Optional[bool] = False,
revision=None,
use_flash_attention_2=True,
device_map="",
device_map="auto",
conv_template="vicuna_v1",
use_cache=True,
truncate_context=False, # whether to truncate the context in generation, set it False for LLaVA-1.6
Expand Down Expand Up @@ -204,7 +204,7 @@ def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
else:
image = None

prompts_input = contexts[0]
prompts_input = contexts[0] if isinstance(contexts, list) else contexts

if image is not None and len(image) != 0 and DEFAULT_IMAGE_TOKEN not in prompts_input:
"""
Expand All @@ -215,7 +215,7 @@ def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
"""
image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visuals)
image_tokens = " ".join(image_tokens)
prompts_input = image_tokens + "\n" + contexts[0]
prompts_input = image_tokens + "\n" + (contexts[0] if isinstance(contexts, list) else contexts)

conv = conv_templates[self.conv_template].copy()
conv.append_message(conv.roles[0], prompts_input)
Expand Down
Loading

0 comments on commit 60e0ba9

Please sign in to comment.