-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathextract_clip_feature.py
71 lines (53 loc) · 2.05 KB
/
extract_clip_feature.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import clip
from PIL import Image
import os
import glob
import argparse
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
def extract(args, VAL):
save = args.save_dir
batch_size = args.batch
if VAL:
save +="_val"
os.makedirs(save,exist_ok=True)
def data_generator(root = args.source_dir, index = 0):
subfolder = "val" if VAL else "train"
folders = [folder for folder in os.listdir(os.path.join(root, subfolder)) if folder[0] == "n"]
folder = folders[index]
path = os.path.join(root,subfolder,folder)
imgs = glob.glob(os.path.join(path, "*.JPEG"))
for img in imgs:
file = img.split(os.sep)[-1][:-5]
img = preprocess(Image.open(img)).unsqueeze(0).to(device)
yield img,folder, file
with torch.no_grad():
for i in range(1000):
x = []
emb = []
files = []
import tqdm
for img, name,file in tqdm.tqdm(data_generator(index = i)):
x.append(img)
files.append(file)
if len(x) == batch_size:
x = torch.cat(x)
emb.append(model.encode_image(x))
x = []
if len(x) != 0:
x = torch.cat(x)
emb.append(model.encode_image(x))
emb = torch.cat(emb).to("cpu")
emb = dict(zip(files,emb))
torch.save(emb, os.path.join(save, name + ".pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch", default = 128, type = int)
parser.add_argument("--source_dir", default = None, type = str)
parser.add_argument("--save_dir", default = None, type = str)
args = parser.parse_args()
extract(args, False)
extract(args, True)