Qnancy commited on
Commit
bc68240
·
verified ·
1 Parent(s): 244b6e8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. external/Grounded-Segment-Anything/.gitignore +135 -0
  2. external/Grounded-Segment-Anything/automatic_label_demo.py +323 -0
  3. external/Grounded-Segment-Anything/automatic_label_ram_demo.py +324 -0
  4. external/Grounded-Segment-Anything/automatic_label_tag2text_demo.py +352 -0
  5. external/Grounded-Segment-Anything/chatbot.py +1460 -0
  6. external/Grounded-Segment-Anything/cog.yaml +27 -0
  7. external/Grounded-Segment-Anything/gradio_app.py +400 -0
  8. external/Grounded-Segment-Anything/grounded_sam.ipynb +0 -0
  9. external/Grounded-Segment-Anything/grounded_sam_inpainting_demo.py +216 -0
  10. external/Grounded-Segment-Anything/grounded_sam_multi_gpu_demo.py +265 -0
  11. external/Grounded-Segment-Anything/grounded_sam_simple_demo.py +107 -0
  12. external/Grounded-Segment-Anything/grounded_sam_visam.py +265 -0
  13. external/Grounded-Segment-Anything/grounded_sam_whisper_demo.py +260 -0
  14. external/Grounded-Segment-Anything/grounded_sam_whisper_inpainting_demo.py +286 -0
  15. external/Grounded-Segment-Anything/playground/README.md +19 -0
  16. external/Grounded-Segment-Anything/recognize-anything/.gitignore +140 -0
  17. external/Grounded-Segment-Anything/recognize-anything/LICENSE +202 -0
  18. external/Grounded-Segment-Anything/recognize-anything/MANIFEST.in +3 -0
  19. external/Grounded-Segment-Anything/recognize-anything/NOTICE.txt +481 -0
  20. external/Grounded-Segment-Anything/recognize-anything/README.md +601 -0
  21. external/Grounded-Segment-Anything/recognize-anything/batch_inference.py +491 -0
  22. external/Grounded-Segment-Anything/recognize-anything/finetune.py +291 -0
  23. external/Grounded-Segment-Anything/recognize-anything/generate_tag_des_llm.py +68 -0
  24. external/Grounded-Segment-Anything/recognize-anything/gui_demo.ipynb +0 -0
  25. external/Grounded-Segment-Anything/recognize-anything/inference_ram.py +54 -0
  26. external/Grounded-Segment-Anything/recognize-anything/inference_ram_openset.py +68 -0
  27. external/Grounded-Segment-Anything/recognize-anything/inference_ram_plus.py +54 -0
  28. external/Grounded-Segment-Anything/recognize-anything/inference_ram_plus_openset.py +76 -0
  29. external/Grounded-Segment-Anything/recognize-anything/inference_tag2text.py +69 -0
  30. external/Grounded-Segment-Anything/recognize-anything/pretrain.py +303 -0
  31. external/Grounded-Segment-Anything/recognize-anything/recognize_anything_demo.ipynb +0 -0
  32. external/Grounded-Segment-Anything/recognize-anything/requirements.txt +9 -0
  33. external/Grounded-Segment-Anything/recognize-anything/setup.cfg +15 -0
  34. external/Grounded-Segment-Anything/recognize-anything/setup.py +2 -0
  35. external/Grounded-Segment-Anything/recognize-anything/utils.py +279 -0
  36. external/Grounded-Segment-Anything/voxelnext_3d_box/README.md +72 -0
  37. external/Grounded-Segment-Anything/voxelnext_3d_box/__init__.py +0 -0
  38. external/Grounded-Segment-Anything/voxelnext_3d_box/config.yaml +56 -0
  39. external/Grounded-Segment-Anything/voxelnext_3d_box/model.py +142 -0
  40. external/Grounded-Segment-Anything/voxelnext_3d_box/requirements.txt +10 -0
  41. external/PerspectiveFields/.gitattributes +6 -0
  42. external/PerspectiveFields/.gitignore +10 -0
  43. external/PerspectiveFields/LICENSE +15 -0
  44. external/PerspectiveFields/README.md +220 -0
  45. external/PerspectiveFields/demo/demo.py +165 -0
  46. external/PerspectiveFields/notebooks/camera2perspective.ipynb +0 -0
  47. external/PerspectiveFields/notebooks/predict_perspective_fields.ipynb +0 -0
  48. external/PerspectiveFields/perspective2d/__init__.py +2 -0
  49. external/PerspectiveFields/perspective2d/config/__init__.py +1 -0
  50. external/PerspectiveFields/perspective2d/config/config.py +137 -0
external/Grounded-Segment-Anything/.gitignore ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # checkpoint
132
+ *.pth
133
+ outputs/
134
+
135
+ .idea/
external/Grounded-Segment-Anything/automatic_label_demo.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import copy
4
+
5
+ import numpy as np
6
+ import json
7
+ import torch
8
+ import torchvision
9
+ from PIL import Image, ImageDraw, ImageFont
10
+ import nltk
11
+ import litellm
12
+
13
+ # Grounding DINO
14
+ import GroundingDINO.groundingdino.datasets.transforms as T
15
+ from GroundingDINO.groundingdino.models import build_model
16
+ from GroundingDINO.groundingdino.util import box_ops
17
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
18
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
19
+
20
+ # segment anything
21
+ from segment_anything import build_sam, SamPredictor
22
+ import cv2
23
+ import numpy as np
24
+ import matplotlib.pyplot as plt
25
+
26
+ # BLIP
27
+ from transformers import BlipProcessor, BlipForConditionalGeneration
28
+
29
+ # ChatGPT
30
+ import openai
31
+
32
+
33
+ def load_image(image_path):
34
+ # load image
35
+ image_pil = Image.open(image_path).convert("RGB") # load image
36
+
37
+ transform = T.Compose(
38
+ [
39
+ T.RandomResize([800], max_size=1333),
40
+ T.ToTensor(),
41
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
42
+ ]
43
+ )
44
+ image, _ = transform(image_pil, None) # 3, h, w
45
+ return image_pil, image
46
+
47
+
48
+ def generate_caption(raw_image, device):
49
+ # unconditional image captioning
50
+ if device == "cuda":
51
+ inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
52
+ else:
53
+ inputs = processor(raw_image, return_tensors="pt")
54
+ out = blip_model.generate(**inputs)
55
+ caption = processor.decode(out[0], skip_special_tokens=True)
56
+ return caption
57
+
58
+
59
+ def generate_tags(caption, split=',', max_tokens=100, model="gpt-3.5-turbo"):
60
+ lemma = nltk.wordnet.WordNetLemmatizer()
61
+ if openai_key:
62
+ prompt = [
63
+ {
64
+ 'role': 'system',
65
+ 'content': 'Extract the unique nouns in the caption. Remove all the adjectives. ' + \
66
+ f'List the nouns in singular form. Split them by "{split} ". ' + \
67
+ f'Caption: {caption}.'
68
+ }
69
+ ]
70
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
71
+ reply = response['choices'][0]['message']['content']
72
+ # sometimes return with "noun: xxx, xxx, xxx"
73
+ tags = reply.split(':')[-1].strip()
74
+ else:
75
+ nltk.download(['punkt', 'averaged_perceptron_tagger', 'wordnet'])
76
+ tags_list = [word for (word, pos) in nltk.pos_tag(nltk.word_tokenize(caption)) if pos[0] == 'N']
77
+ tags_lemma = [lemma.lemmatize(w) for w in tags_list]
78
+ tags = ', '.join(map(str, tags_lemma))
79
+ return tags
80
+
81
+
82
+ def check_caption(caption, pred_phrases, max_tokens=100, model="gpt-3.5-turbo"):
83
+ object_list = [obj.split('(')[0] for obj in pred_phrases]
84
+ object_num = []
85
+ for obj in set(object_list):
86
+ object_num.append(f'{object_list.count(obj)} {obj}')
87
+ object_num = ', '.join(object_num)
88
+ print(f"Correct object number: {object_num}")
89
+
90
+ if openai_key:
91
+ prompt = [
92
+ {
93
+ 'role': 'system',
94
+ 'content': 'Revise the number in the caption if it is wrong. ' + \
95
+ f'Caption: {caption}. ' + \
96
+ f'True object number: {object_num}. ' + \
97
+ 'Only give the revised caption: '
98
+ }
99
+ ]
100
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
101
+ reply = response['choices'][0]['message']['content']
102
+ # sometimes return with "Caption: xxx, xxx, xxx"
103
+ caption = reply.split(':')[-1].strip()
104
+ return caption
105
+
106
+
107
+ def load_model(model_config_path, model_checkpoint_path, device):
108
+ args = SLConfig.fromfile(model_config_path)
109
+ args.device = device
110
+ model = build_model(args)
111
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
112
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
113
+ print(load_res)
114
+ _ = model.eval()
115
+ return model
116
+
117
+
118
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold,device="cpu"):
119
+ caption = caption.lower()
120
+ caption = caption.strip()
121
+ if not caption.endswith("."):
122
+ caption = caption + "."
123
+ model = model.to(device)
124
+ image = image.to(device)
125
+ with torch.no_grad():
126
+ outputs = model(image[None], captions=[caption])
127
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
128
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
129
+ logits.shape[0]
130
+
131
+ # filter output
132
+ logits_filt = logits.clone()
133
+ boxes_filt = boxes.clone()
134
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
135
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
136
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
137
+ logits_filt.shape[0]
138
+
139
+ # get phrase
140
+ tokenlizer = model.tokenizer
141
+ tokenized = tokenlizer(caption)
142
+ # build pred
143
+ pred_phrases = []
144
+ scores = []
145
+ for logit, box in zip(logits_filt, boxes_filt):
146
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
147
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
148
+ scores.append(logit.max().item())
149
+
150
+ return boxes_filt, torch.Tensor(scores), pred_phrases
151
+
152
+
153
+ def show_mask(mask, ax, random_color=False):
154
+ if random_color:
155
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
156
+ else:
157
+ color = np.array([30/255, 144/255, 255/255, 0.6])
158
+ h, w = mask.shape[-2:]
159
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
160
+ ax.imshow(mask_image)
161
+
162
+
163
+ def show_box(box, ax, label):
164
+ x0, y0 = box[0], box[1]
165
+ w, h = box[2] - box[0], box[3] - box[1]
166
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
167
+ ax.text(x0, y0, label)
168
+
169
+
170
+ def save_mask_data(output_dir, caption, mask_list, box_list, label_list):
171
+ value = 0 # 0 for background
172
+
173
+ mask_img = torch.zeros(mask_list.shape[-2:])
174
+ for idx, mask in enumerate(mask_list):
175
+ mask_img[mask.cpu().numpy()[0] == True] = value + idx + 1
176
+ plt.figure(figsize=(10, 10))
177
+ plt.imshow(mask_img.numpy())
178
+ plt.axis('off')
179
+ plt.savefig(os.path.join(output_dir, 'mask.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)
180
+
181
+ json_data = {
182
+ 'caption': caption,
183
+ 'mask':[{
184
+ 'value': value,
185
+ 'label': 'background'
186
+ }]
187
+ }
188
+ for label, box in zip(label_list, box_list):
189
+ value += 1
190
+ name, logit = label.split('(')
191
+ logit = logit[:-1] # the last is ')'
192
+ json_data['mask'].append({
193
+ 'value': value,
194
+ 'label': name,
195
+ 'logit': float(logit),
196
+ 'box': box.numpy().tolist(),
197
+ })
198
+ with open(os.path.join(output_dir, 'label.json'), 'w') as f:
199
+ json.dump(json_data, f)
200
+
201
+
202
+ if __name__ == "__main__":
203
+
204
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
205
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
206
+ parser.add_argument(
207
+ "--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
208
+ )
209
+ parser.add_argument(
210
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
211
+ )
212
+ parser.add_argument("--input_image", type=str, required=True, help="path to image file")
213
+ parser.add_argument("--split", default=",", type=str, help="split for text prompt")
214
+ parser.add_argument("--openai_key", type=str, help="key for chatgpt")
215
+ parser.add_argument("--openai_proxy", default=None, type=str, help="proxy for chatgpt")
216
+ parser.add_argument(
217
+ "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
218
+ )
219
+
220
+ parser.add_argument("--box_threshold", type=float, default=0.25, help="box threshold")
221
+ parser.add_argument("--text_threshold", type=float, default=0.2, help="text threshold")
222
+ parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold")
223
+
224
+ parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
225
+ args = parser.parse_args()
226
+
227
+ # cfg
228
+ config_file = args.config # change the path of the model config file
229
+ grounded_checkpoint = args.grounded_checkpoint # change the path of the model
230
+ sam_checkpoint = args.sam_checkpoint
231
+ image_path = args.input_image
232
+ split = args.split
233
+ openai_key = args.openai_key
234
+ openai_proxy = args.openai_proxy
235
+ output_dir = args.output_dir
236
+ box_threshold = args.box_threshold
237
+ text_threshold = args.text_threshold
238
+ iou_threshold = args.iou_threshold
239
+ device = args.device
240
+
241
+ openai.api_key = openai_key
242
+ if openai_proxy:
243
+ openai.proxy = {"http": openai_proxy, "https": openai_proxy}
244
+
245
+ # make dir
246
+ os.makedirs(output_dir, exist_ok=True)
247
+ # load image
248
+ image_pil, image = load_image(image_path)
249
+ # load model
250
+ model = load_model(config_file, grounded_checkpoint, device=device)
251
+
252
+ # visualize raw image
253
+ image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
254
+
255
+ # generate caption and tags
256
+ # use Tag2Text can generate better captions
257
+ # https://huggingface.co/spaces/xinyu1205/Tag2Text
258
+ # but there are some bugs...
259
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
260
+ if device == "cuda":
261
+ blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", torch_dtype=torch.float16).to("cuda")
262
+ else:
263
+ blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
264
+ caption = generate_caption(image_pil, device=device)
265
+ # Currently ", " is better for detecting single tags
266
+ # while ". " is a little worse in some case
267
+ text_prompt = generate_tags(caption, split=split)
268
+ print(f"Caption: {caption}")
269
+ print(f"Tags: {text_prompt}")
270
+
271
+ # run grounding dino model
272
+ boxes_filt, scores, pred_phrases = get_grounding_output(
273
+ model, image, text_prompt, box_threshold, text_threshold, device=device
274
+ )
275
+
276
+ # initialize SAM
277
+ predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))
278
+ image = cv2.imread(image_path)
279
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
280
+ predictor.set_image(image)
281
+
282
+ size = image_pil.size
283
+ H, W = size[1], size[0]
284
+ for i in range(boxes_filt.size(0)):
285
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
286
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
287
+ boxes_filt[i][2:] += boxes_filt[i][:2]
288
+
289
+ boxes_filt = boxes_filt.cpu()
290
+ # use NMS to handle overlapped boxes
291
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
292
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
293
+ boxes_filt = boxes_filt[nms_idx]
294
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
295
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
296
+ caption = check_caption(caption, pred_phrases)
297
+ print(f"Revise caption with number: {caption}")
298
+
299
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
300
+
301
+ masks, _, _ = predictor.predict_torch(
302
+ point_coords = None,
303
+ point_labels = None,
304
+ boxes = transformed_boxes.to(device),
305
+ multimask_output = False,
306
+ )
307
+
308
+ # draw output image
309
+ plt.figure(figsize=(10, 10))
310
+ plt.imshow(image)
311
+ for mask in masks:
312
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
313
+ for box, label in zip(boxes_filt, pred_phrases):
314
+ show_box(box.numpy(), plt.gca(), label)
315
+
316
+ plt.title(caption)
317
+ plt.axis('off')
318
+ plt.savefig(
319
+ os.path.join(output_dir, "automatic_label_output.jpg"),
320
+ bbox_inches="tight", dpi=300, pad_inches=0.0
321
+ )
322
+
323
+ save_mask_data(output_dir, caption, masks, boxes_filt, pred_phrases)
external/Grounded-Segment-Anything/automatic_label_ram_demo.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ import numpy as np
5
+ import json
6
+ import torch
7
+ import torchvision
8
+ from PIL import Image
9
+ import litellm
10
+
11
+ # Grounding DINO
12
+ import GroundingDINO.groundingdino.datasets.transforms as T
13
+ from GroundingDINO.groundingdino.models import build_model
14
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
15
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
16
+
17
+ # segment anything
18
+ from segment_anything import (
19
+ build_sam,
20
+ build_sam_hq,
21
+ SamPredictor
22
+ )
23
+ import cv2
24
+ import numpy as np
25
+ import matplotlib.pyplot as plt
26
+
27
+ # Recognize Anything Model & Tag2Text
28
+ from ram.models import ram
29
+ from ram import inference_ram
30
+ import torchvision.transforms as TS
31
+
32
+ # ChatGPT or nltk is required when using tags_chineses
33
+ # import openai
34
+ # import nltk
35
+
36
+ def load_image(image_path):
37
+ # load image
38
+ image_pil = Image.open(image_path).convert("RGB") # load image
39
+
40
+ transform = T.Compose(
41
+ [
42
+ T.RandomResize([800], max_size=1333),
43
+ T.ToTensor(),
44
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
45
+ ]
46
+ )
47
+ image, _ = transform(image_pil, None) # 3, h, w
48
+ return image_pil, image
49
+
50
+
51
+ def check_tags_chinese(tags_chinese, pred_phrases, max_tokens=100, model="gpt-3.5-turbo"):
52
+ object_list = [obj.split('(')[0] for obj in pred_phrases]
53
+ object_num = []
54
+ for obj in set(object_list):
55
+ object_num.append(f'{object_list.count(obj)} {obj}')
56
+ object_num = ', '.join(object_num)
57
+ print(f"Correct object number: {object_num}")
58
+
59
+ if openai_key:
60
+ prompt = [
61
+ {
62
+ 'role': 'system',
63
+ 'content': 'Revise the number in the tags_chinese if it is wrong. ' + \
64
+ f'tags_chinese: {tags_chinese}. ' + \
65
+ f'True object number: {object_num}. ' + \
66
+ 'Only give the revised tags_chinese: '
67
+ }
68
+ ]
69
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
70
+ reply = response['choices'][0]['message']['content']
71
+ # sometimes return with "tags_chinese: xxx, xxx, xxx"
72
+ tags_chinese = reply.split(':')[-1].strip()
73
+ return tags_chinese
74
+
75
+
76
+ def load_model(model_config_path, model_checkpoint_path, device):
77
+ args = SLConfig.fromfile(model_config_path)
78
+ args.device = device
79
+ model = build_model(args)
80
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
81
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
82
+ print(load_res)
83
+ _ = model.eval()
84
+ return model
85
+
86
+
87
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold,device="cpu"):
88
+ caption = caption.lower()
89
+ caption = caption.strip()
90
+ if not caption.endswith("."):
91
+ caption = caption + "."
92
+ model = model.to(device)
93
+ image = image.to(device)
94
+ with torch.no_grad():
95
+ outputs = model(image[None], captions=[caption])
96
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
97
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
98
+ logits.shape[0]
99
+
100
+ # filter output
101
+ logits_filt = logits.clone()
102
+ boxes_filt = boxes.clone()
103
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
104
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
105
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
106
+ logits_filt.shape[0]
107
+
108
+ # get phrase
109
+ tokenlizer = model.tokenizer
110
+ tokenized = tokenlizer(caption)
111
+ # build pred
112
+ pred_phrases = []
113
+ scores = []
114
+ for logit, box in zip(logits_filt, boxes_filt):
115
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
116
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
117
+ scores.append(logit.max().item())
118
+
119
+ return boxes_filt, torch.Tensor(scores), pred_phrases
120
+
121
+
122
+ def show_mask(mask, ax, random_color=False):
123
+ if random_color:
124
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
125
+ else:
126
+ color = np.array([30/255, 144/255, 255/255, 0.6])
127
+ h, w = mask.shape[-2:]
128
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
129
+ ax.imshow(mask_image)
130
+
131
+
132
+ def show_box(box, ax, label):
133
+ x0, y0 = box[0], box[1]
134
+ w, h = box[2] - box[0], box[3] - box[1]
135
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
136
+ ax.text(x0, y0, label)
137
+
138
+
139
+ def save_mask_data(output_dir, tags_chinese, mask_list, box_list, label_list):
140
+ value = 0 # 0 for background
141
+
142
+ mask_img = torch.zeros(mask_list.shape[-2:])
143
+ for idx, mask in enumerate(mask_list):
144
+ mask_img[mask.cpu().numpy()[0] == True] = value + idx + 1
145
+ plt.figure(figsize=(10, 10))
146
+ plt.imshow(mask_img.numpy())
147
+ plt.axis('off')
148
+ plt.savefig(os.path.join(output_dir, 'mask.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)
149
+
150
+ json_data = {
151
+ 'tags_chinese': tags_chinese,
152
+ 'mask':[{
153
+ 'value': value,
154
+ 'label': 'background'
155
+ }]
156
+ }
157
+ for label, box in zip(label_list, box_list):
158
+ value += 1
159
+ name, logit = label.split('(')
160
+ logit = logit[:-1] # the last is ')'
161
+ json_data['mask'].append({
162
+ 'value': value,
163
+ 'label': name,
164
+ 'logit': float(logit),
165
+ 'box': box.numpy().tolist(),
166
+ })
167
+ with open(os.path.join(output_dir, 'label.json'), 'w') as f:
168
+ json.dump(json_data, f)
169
+
170
+
171
+ if __name__ == "__main__":
172
+
173
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
174
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
175
+ parser.add_argument(
176
+ "--ram_checkpoint", type=str, required=True, help="path to checkpoint file"
177
+ )
178
+ parser.add_argument(
179
+ "--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
180
+ )
181
+ parser.add_argument(
182
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
183
+ )
184
+ parser.add_argument(
185
+ "--sam_hq_checkpoint", type=str, default=None, help="path to sam-hq checkpoint file"
186
+ )
187
+ parser.add_argument(
188
+ "--use_sam_hq", action="store_true", help="using sam-hq for prediction"
189
+ )
190
+ parser.add_argument("--input_image", type=str, required=True, help="path to image file")
191
+ parser.add_argument("--split", default=",", type=str, help="split for text prompt")
192
+ parser.add_argument("--openai_key", type=str, help="key for chatgpt")
193
+ parser.add_argument("--openai_proxy", default=None, type=str, help="proxy for chatgpt")
194
+ parser.add_argument(
195
+ "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
196
+ )
197
+
198
+ parser.add_argument("--box_threshold", type=float, default=0.25, help="box threshold")
199
+ parser.add_argument("--text_threshold", type=float, default=0.2, help="text threshold")
200
+ parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold")
201
+
202
+ parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
203
+ args = parser.parse_args()
204
+
205
+ # cfg
206
+ config_file = args.config # change the path of the model config file
207
+ ram_checkpoint = args.ram_checkpoint # change the path of the model
208
+ grounded_checkpoint = args.grounded_checkpoint # change the path of the model
209
+ sam_checkpoint = args.sam_checkpoint
210
+ sam_hq_checkpoint = args.sam_hq_checkpoint
211
+ use_sam_hq = args.use_sam_hq
212
+ image_path = args.input_image
213
+ split = args.split
214
+ openai_key = args.openai_key
215
+ openai_proxy = args.openai_proxy
216
+ output_dir = args.output_dir
217
+ box_threshold = args.box_threshold
218
+ text_threshold = args.text_threshold
219
+ iou_threshold = args.iou_threshold
220
+ device = args.device
221
+
222
+ # ChatGPT or nltk is required when using tags_chineses
223
+ # openai.api_key = openai_key
224
+ # if openai_proxy:
225
+ # openai.proxy = {"http": openai_proxy, "https": openai_proxy}
226
+
227
+ # make dir
228
+ os.makedirs(output_dir, exist_ok=True)
229
+ # load image
230
+ image_pil, image = load_image(image_path)
231
+ # load model
232
+ model = load_model(config_file, grounded_checkpoint, device=device)
233
+
234
+ # visualize raw image
235
+ image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
236
+
237
+ # initialize Recognize Anything Model
238
+ normalize = TS.Normalize(mean=[0.485, 0.456, 0.406],
239
+ std=[0.229, 0.224, 0.225])
240
+ transform = TS.Compose([
241
+ TS.Resize((384, 384)),
242
+ TS.ToTensor(), normalize
243
+ ])
244
+
245
+ # load model
246
+ ram_model = ram(pretrained=ram_checkpoint,
247
+ image_size=384,
248
+ vit='swin_l')
249
+ # threshold for tagging
250
+ # we reduce the threshold to obtain more tags
251
+ ram_model.eval()
252
+
253
+ ram_model = ram_model.to(device)
254
+ raw_image = image_pil.resize(
255
+ (384, 384))
256
+ raw_image = transform(raw_image).unsqueeze(0).to(device)
257
+
258
+ res = inference_ram(raw_image , ram_model)
259
+
260
+ # Currently ", " is better for detecting single tags
261
+ # while ". " is a little worse in some case
262
+ tags=res[0].replace(' |', ',')
263
+ tags_chinese=res[1].replace(' |', ',')
264
+
265
+ print("Image Tags: ", res[0])
266
+ print("图像标签: ", res[1])
267
+
268
+ # run grounding dino model
269
+ boxes_filt, scores, pred_phrases = get_grounding_output(
270
+ model, image, tags, box_threshold, text_threshold, device=device
271
+ )
272
+
273
+ # initialize SAM
274
+ if use_sam_hq:
275
+ print("Initialize SAM-HQ Predictor")
276
+ predictor = SamPredictor(build_sam_hq(checkpoint=sam_hq_checkpoint).to(device))
277
+ else:
278
+ predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))
279
+ image = cv2.imread(image_path)
280
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
281
+ predictor.set_image(image)
282
+
283
+ size = image_pil.size
284
+ H, W = size[1], size[0]
285
+ for i in range(boxes_filt.size(0)):
286
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
287
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
288
+ boxes_filt[i][2:] += boxes_filt[i][:2]
289
+
290
+ boxes_filt = boxes_filt.cpu()
291
+ # use NMS to handle overlapped boxes
292
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
293
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
294
+ boxes_filt = boxes_filt[nms_idx]
295
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
296
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
297
+ tags_chinese = check_tags_chinese(tags_chinese, pred_phrases)
298
+ print(f"Revise tags_chinese with number: {tags_chinese}")
299
+
300
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
301
+
302
+ masks, _, _ = predictor.predict_torch(
303
+ point_coords = None,
304
+ point_labels = None,
305
+ boxes = transformed_boxes.to(device),
306
+ multimask_output = False,
307
+ )
308
+
309
+ # draw output image
310
+ plt.figure(figsize=(10, 10))
311
+ plt.imshow(image)
312
+ for mask in masks:
313
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
314
+ for box, label in zip(boxes_filt, pred_phrases):
315
+ show_box(box.numpy(), plt.gca(), label)
316
+
317
+ # plt.title('RAM-tags' + tags + '\n' + 'RAM-tags_chineseing: ' + tags_chinese + '\n')
318
+ plt.axis('off')
319
+ plt.savefig(
320
+ os.path.join(output_dir, "automatic_label_output.jpg"),
321
+ bbox_inches="tight", dpi=300, pad_inches=0.0
322
+ )
323
+
324
+ save_mask_data(output_dir, tags_chinese, masks, boxes_filt, pred_phrases)
external/Grounded-Segment-Anything/automatic_label_tag2text_demo.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import copy
4
+
5
+ import numpy as np
6
+ import json
7
+ import torch
8
+ import torchvision
9
+ from PIL import Image, ImageDraw, ImageFont
10
+ import litellm
11
+
12
+ # Grounding DINO
13
+ import GroundingDINO.groundingdino.datasets.transforms as T
14
+ from GroundingDINO.groundingdino.models import build_model
15
+ from GroundingDINO.groundingdino.util import box_ops
16
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
17
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
18
+
19
+ # segment anything
20
+ from segment_anything import build_sam, SamPredictor
21
+ import cv2
22
+ import numpy as np
23
+ import matplotlib.pyplot as plt
24
+
25
+ # Tag2Text
26
+ from ram.models import tag2text
27
+ from ram import inference_tag2text
28
+ import torchvision.transforms as TS
29
+
30
+ # ChatGPT or nltk is required when using captions
31
+ # import openai
32
+ # import nltk
33
+
34
+ def load_image(image_path):
35
+ # load image
36
+ image_pil = Image.open(image_path).convert("RGB") # load image
37
+
38
+ transform = T.Compose(
39
+ [
40
+ T.RandomResize([800], max_size=1333),
41
+ T.ToTensor(),
42
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
43
+ ]
44
+ )
45
+ image, _ = transform(image_pil, None) # 3, h, w
46
+ return image_pil, image
47
+
48
+
49
+ def generate_caption(raw_image, device):
50
+ # unconditional image captioning
51
+ if device == "cuda":
52
+ inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
53
+ else:
54
+ inputs = processor(raw_image, return_tensors="pt")
55
+ out = blip_model.generate(**inputs)
56
+ caption = processor.decode(out[0], skip_special_tokens=True)
57
+ return caption
58
+
59
+
60
+ def generate_tags(caption, split=',', max_tokens=100, model="gpt-3.5-turbo"):
61
+ lemma = nltk.wordnet.WordNetLemmatizer()
62
+ if openai_key:
63
+ prompt = [
64
+ {
65
+ 'role': 'system',
66
+ 'content': 'Extract the unique nouns in the caption. Remove all the adjectives. ' + \
67
+ f'List the nouns in singular form. Split them by "{split} ". ' + \
68
+ f'Caption: {caption}.'
69
+ }
70
+ ]
71
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
72
+ reply = response['choices'][0]['message']['content']
73
+ # sometimes return with "noun: xxx, xxx, xxx"
74
+ tags = reply.split(':')[-1].strip()
75
+ else:
76
+ nltk.download(['punkt', 'averaged_perceptron_tagger', 'wordnet'])
77
+ tags_list = [word for (word, pos) in nltk.pos_tag(nltk.word_tokenize(caption)) if pos[0] == 'N']
78
+ tags_lemma = [lemma.lemmatize(w) for w in tags_list]
79
+ tags = ', '.join(map(str, tags_lemma))
80
+ return tags
81
+
82
+
83
+ def check_caption(caption, pred_phrases, max_tokens=100, model="gpt-3.5-turbo"):
84
+ object_list = [obj.split('(')[0] for obj in pred_phrases]
85
+ object_num = []
86
+ for obj in set(object_list):
87
+ object_num.append(f'{object_list.count(obj)} {obj}')
88
+ object_num = ', '.join(object_num)
89
+ print(f"Correct object number: {object_num}")
90
+
91
+ if openai_key:
92
+ prompt = [
93
+ {
94
+ 'role': 'system',
95
+ 'content': 'Revise the number in the caption if it is wrong. ' + \
96
+ f'Caption: {caption}. ' + \
97
+ f'True object number: {object_num}. ' + \
98
+ 'Only give the revised caption: '
99
+ }
100
+ ]
101
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
102
+ reply = response['choices'][0]['message']['content']
103
+ # sometimes return with "Caption: xxx, xxx, xxx"
104
+ caption = reply.split(':')[-1].strip()
105
+ return caption
106
+
107
+
108
+ def load_model(model_config_path, model_checkpoint_path, device):
109
+ args = SLConfig.fromfile(model_config_path)
110
+ args.device = device
111
+ model = build_model(args)
112
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
113
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
114
+ print(load_res)
115
+ _ = model.eval()
116
+ return model
117
+
118
+
119
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold,device="cpu"):
120
+ caption = caption.lower()
121
+ caption = caption.strip()
122
+ if not caption.endswith("."):
123
+ caption = caption + "."
124
+ model = model.to(device)
125
+ image = image.to(device)
126
+ with torch.no_grad():
127
+ outputs = model(image[None], captions=[caption])
128
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
129
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
130
+ logits.shape[0]
131
+
132
+ # filter output
133
+ logits_filt = logits.clone()
134
+ boxes_filt = boxes.clone()
135
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
136
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
137
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
138
+ logits_filt.shape[0]
139
+
140
+ # get phrase
141
+ tokenlizer = model.tokenizer
142
+ tokenized = tokenlizer(caption)
143
+ # build pred
144
+ pred_phrases = []
145
+ scores = []
146
+ for logit, box in zip(logits_filt, boxes_filt):
147
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
148
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
149
+ scores.append(logit.max().item())
150
+
151
+ return boxes_filt, torch.Tensor(scores), pred_phrases
152
+
153
+
154
+ def show_mask(mask, ax, random_color=False):
155
+ if random_color:
156
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
157
+ else:
158
+ color = np.array([30/255, 144/255, 255/255, 0.6])
159
+ h, w = mask.shape[-2:]
160
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
161
+ ax.imshow(mask_image)
162
+
163
+
164
+ def show_box(box, ax, label):
165
+ x0, y0 = box[0], box[1]
166
+ w, h = box[2] - box[0], box[3] - box[1]
167
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
168
+ ax.text(x0, y0, label)
169
+
170
+
171
+ def save_mask_data(output_dir, caption, mask_list, box_list, label_list):
172
+ value = 0 # 0 for background
173
+
174
+ mask_img = torch.zeros(mask_list.shape[-2:])
175
+ for idx, mask in enumerate(mask_list):
176
+ mask_img[mask.cpu().numpy()[0] == True] = value + idx + 1
177
+ plt.figure(figsize=(10, 10))
178
+ plt.imshow(mask_img.numpy())
179
+ plt.axis('off')
180
+ plt.savefig(os.path.join(output_dir, 'mask.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)
181
+
182
+ json_data = {
183
+ 'caption': caption,
184
+ 'mask':[{
185
+ 'value': value,
186
+ 'label': 'background'
187
+ }]
188
+ }
189
+ for label, box in zip(label_list, box_list):
190
+ value += 1
191
+ name, logit = label.split('(')
192
+ logit = logit[:-1] # the last is ')'
193
+ json_data['mask'].append({
194
+ 'value': value,
195
+ 'label': name,
196
+ 'logit': float(logit),
197
+ 'box': box.numpy().tolist(),
198
+ })
199
+ with open(os.path.join(output_dir, 'label.json'), 'w') as f:
200
+ json.dump(json_data, f)
201
+
202
+
203
+ if __name__ == "__main__":
204
+
205
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
206
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
207
+ parser.add_argument(
208
+ "--tag2text_checkpoint", type=str, required=True, help="path to checkpoint file"
209
+ )
210
+ parser.add_argument(
211
+ "--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
212
+ )
213
+ parser.add_argument(
214
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
215
+ )
216
+ parser.add_argument("--input_image", type=str, required=True, help="path to image file")
217
+ parser.add_argument("--split", default=",", type=str, help="split for text prompt")
218
+ parser.add_argument("--openai_key", type=str, help="key for chatgpt")
219
+ parser.add_argument("--openai_proxy", default=None, type=str, help="proxy for chatgpt")
220
+ parser.add_argument(
221
+ "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
222
+ )
223
+
224
+ parser.add_argument("--box_threshold", type=float, default=0.25, help="box threshold")
225
+ parser.add_argument("--text_threshold", type=float, default=0.2, help="text threshold")
226
+ parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold")
227
+
228
+ parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
229
+ args = parser.parse_args()
230
+
231
+ # cfg
232
+ config_file = args.config # change the path of the model config file
233
+ tag2text_checkpoint = args.tag2text_checkpoint # change the path of the model
234
+ grounded_checkpoint = args.grounded_checkpoint # change the path of the model
235
+ sam_checkpoint = args.sam_checkpoint
236
+ image_path = args.input_image
237
+ split = args.split
238
+ openai_key = args.openai_key
239
+ openai_proxy = args.openai_proxy
240
+ output_dir = args.output_dir
241
+ box_threshold = args.box_threshold
242
+ text_threshold = args.text_threshold
243
+ iou_threshold = args.iou_threshold
244
+ device = args.device
245
+
246
+ # ChatGPT or nltk is required when using captions
247
+ # openai.api_key = openai_key
248
+ # if openai_proxy:
249
+ # openai.proxy = {"http": openai_proxy, "https": openai_proxy}
250
+
251
+ # make dir
252
+ os.makedirs(output_dir, exist_ok=True)
253
+ # load image
254
+ image_pil, image = load_image(image_path)
255
+ # load model
256
+ model = load_model(config_file, grounded_checkpoint, device=device)
257
+
258
+ # visualize raw image
259
+ image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
260
+
261
+ # initialize Tag2Text
262
+ normalize = TS.Normalize(mean=[0.485, 0.456, 0.406],
263
+ std=[0.229, 0.224, 0.225])
264
+ transform = TS.Compose([
265
+ TS.Resize((384, 384)),
266
+ TS.ToTensor(), normalize
267
+ ])
268
+
269
+ # filter out attributes and action categories which are difficult to grounding
270
+ delete_tag_index = []
271
+ for i in range(3012, 3429):
272
+ delete_tag_index.append(i)
273
+
274
+ specified_tags='None'
275
+ # load model
276
+ tag2text_model = tag2text(pretrained=tag2text_checkpoint,
277
+ image_size=384,
278
+ vit='swin_b',
279
+ delete_tag_index=delete_tag_index)
280
+ # threshold for tagging
281
+ # we reduce the threshold to obtain more tags
282
+ tag2text_model.threshold = 0.64
283
+ tag2text_model.eval()
284
+
285
+ tag2text_model = tag2text_model.to(device)
286
+ raw_image = image_pil.resize(
287
+ (384, 384))
288
+ raw_image = transform(raw_image).unsqueeze(0).to(device)
289
+
290
+ res = inference_tag2text(raw_image , tag2text_model, specified_tags)
291
+
292
+ # Currently ", " is better for detecting single tags
293
+ # while ". " is a little worse in some case
294
+ text_prompt=res[0].replace(' |', ',')
295
+ caption=res[2]
296
+
297
+ print(f"Caption: {caption}")
298
+ print(f"Tags: {text_prompt}")
299
+
300
+ # run grounding dino model
301
+ boxes_filt, scores, pred_phrases = get_grounding_output(
302
+ model, image, text_prompt, box_threshold, text_threshold, device=device
303
+ )
304
+
305
+ # initialize SAM
306
+ predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))
307
+ image = cv2.imread(image_path)
308
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
309
+ predictor.set_image(image)
310
+
311
+ size = image_pil.size
312
+ H, W = size[1], size[0]
313
+ for i in range(boxes_filt.size(0)):
314
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
315
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
316
+ boxes_filt[i][2:] += boxes_filt[i][:2]
317
+
318
+ boxes_filt = boxes_filt.cpu()
319
+ # use NMS to handle overlapped boxes
320
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
321
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
322
+ boxes_filt = boxes_filt[nms_idx]
323
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
324
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
325
+ caption = check_caption(caption, pred_phrases)
326
+ print(f"Revise caption with number: {caption}")
327
+
328
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
329
+
330
+ masks, _, _ = predictor.predict_torch(
331
+ point_coords = None,
332
+ point_labels = None,
333
+ boxes = transformed_boxes.to(device),
334
+ multimask_output = False,
335
+ )
336
+
337
+ # draw output image
338
+ plt.figure(figsize=(10, 10))
339
+ plt.imshow(image)
340
+ for mask in masks:
341
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
342
+ for box, label in zip(boxes_filt, pred_phrases):
343
+ show_box(box.numpy(), plt.gca(), label)
344
+
345
+ plt.title('Tag2Text-Captioning: ' + caption + '\n' + 'Tag2Text-Tagging' + text_prompt + '\n')
346
+ plt.axis('off')
347
+ plt.savefig(
348
+ os.path.join(output_dir, "automatic_label_output.jpg"),
349
+ bbox_inches="tight", dpi=300, pad_inches=0.0
350
+ )
351
+
352
+ save_mask_data(output_dir, caption, masks, boxes_filt, pred_phrases)
external/Grounded-Segment-Anything/chatbot.py ADDED
@@ -0,0 +1,1460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+ import os
3
+ import gradio as gr
4
+ import random
5
+ import torch
6
+ import cv2
7
+ import re
8
+ import uuid
9
+ from PIL import Image, ImageDraw, ImageOps
10
+ import math
11
+ import numpy as np
12
+ import argparse
13
+ import inspect
14
+
15
+ import shutil
16
+ import torchvision
17
+ import whisper
18
+ import matplotlib.pyplot as plt
19
+ from automatic_label_demo import load_model, load_image, get_grounding_output, show_box, show_mask, generate_tags, check_caption
20
+ from grounding_dino_demo import plot_boxes_to_image
21
+ from segment_anything import build_sam, SamAutomaticMaskGenerator, SamPredictor
22
+ from segment_anything.utils.amg import remove_small_regions
23
+
24
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
25
+ from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
26
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
27
+
28
+ from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline
29
+ from diffusers import EulerAncestralDiscreteScheduler
30
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
31
+ from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector
32
+
33
+ from langchain.agents.initialize import initialize_agent
34
+ from langchain.agents.tools import Tool
35
+ from langchain.chains.conversation.memory import ConversationBufferMemory
36
+ from langchain.llms.openai import OpenAI
37
+
38
+ VISUAL_CHATGPT_PREFIX = """Visual ChatGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Visual ChatGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
39
+
40
+ Visual ChatGPT is able to process and understand large amounts of text and images. As a language model, Visual ChatGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and Visual ChatGPT can invoke different tools to indirectly understand pictures. When talking about images, Visual ChatGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Visual ChatGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Visual ChatGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
41
+
42
+ Human may provide new figures to Visual ChatGPT with a description. The description helps Visual ChatGPT to understand this image, but Visual ChatGPT should use tools to finish following tasks, rather than directly imagine from the description.
43
+
44
+ Overall, Visual ChatGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics.
45
+
46
+
47
+ TOOLS:
48
+ ------
49
+
50
+ Visual ChatGPT has access to the following tools:"""
51
+
52
+ VISUAL_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
53
+
54
+ ```
55
+ Thought: Do I need to use a tool? Yes
56
+ Action: the action to take, should be one of [{tool_names}]
57
+ Action Input: the input to the action
58
+ Observation: the result of the action
59
+ ```
60
+
61
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
62
+
63
+ ```
64
+ Thought: Do I need to use a tool? No
65
+ {ai_prefix}: [your response here]
66
+ ```
67
+ """
68
+
69
+ VISUAL_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist.
70
+ You will remember to provide the image file name loyally if it's provided in the last tool observation.
71
+
72
+ Begin!
73
+
74
+ Previous conversation history:
75
+ {chat_history}
76
+
77
+ New input: {input}
78
+ Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to observe images rather than imagination.
79
+ The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human.
80
+ Thought: Do I need to use a tool? {agent_scratchpad} Let's think step by step.
81
+ """
82
+
83
+ VISUAL_CHATGPT_PREFIX_CN = """Visual ChatGPT 旨在能够协助完成范围广泛的文本和视觉相关任务,从回答简单的问题到提供对广泛主题的深入解释和讨论。 Visual ChatGPT 能够根据收到的输入生成类似人类的文本,使其能够进行听起来自然的对话,并提供连贯且与手头主题相关的响应。
84
+
85
+ Visual ChatGPT 能够处理和理解大量文本和图像。作为一种语言模型,Visual ChatGPT 不能直接读取图像,但它有一系列工具来完成不同的视觉任务。每张图片都会有一个文件名,格式为“image/xxx.png”,Visual ChatGPT可以调用不同的工具来间接理解图片。在谈论图片时,Visual ChatGPT 对文件名的要求非常严格,绝不会伪造不存在的文件。在使用工具生成新的图像文件时,Visual ChatGPT也知道图像可能与用户需求不一样,会使用其他视觉问答工具或描述工具来观察真实图像。 Visual ChatGPT 能够按顺序使用工具,并且忠于工具观察输出,而不是伪造图像内容和图像文件名。如果生成新图像,它将记得提供上次工具观察的文件名。
86
+
87
+ Human 可能会向 Visual ChatGPT 提供带有描述的新图形。描述帮助 Visual ChatGPT 理解这个图像,但 Visual ChatGPT 应该使用工具来完成以下任务,而不是直接从描述中想象。有些工具将会返回英文描述,但你对用户的聊天应当采用中文。
88
+
89
+ 总的来说,Visual ChatGPT 是一个强大的可视化对话辅助工具,可以帮助处理范围广泛的任务,并提供关于范围广泛的主题的有价值的见解和信息。
90
+
91
+ 工具列表:
92
+ ------
93
+
94
+ Visual ChatGPT 可以使用这些工具:"""
95
+
96
+ VISUAL_CHATGPT_FORMAT_INSTRUCTIONS_CN = """用户使用中文和你进行聊天,但是工具的参数应当使用英文。如果要调用工具,你必须遵循如下格式:
97
+
98
+ ```
99
+ Thought: Do I need to use a tool? Yes
100
+ Action: the action to take, should be one of [{tool_names}]
101
+ Action Input: the input to the action
102
+ Observation: the result of the action
103
+ ```
104
+
105
+ 当你不再需要继续调用工具,而是对观察结果进行总结回复时,你必须使用如下格式:
106
+
107
+
108
+ ```
109
+ Thought: Do I need to use a tool? No
110
+ {ai_prefix}: [your response here]
111
+ ```
112
+ """
113
+
114
+ VISUAL_CHATGPT_SUFFIX_CN = """你对文件名的正确性非常严格,而且永远不会伪造不存在的文件。
115
+
116
+ 开始!
117
+
118
+ 因为Visual ChatGPT是一个文本语言模型,必须使用工具去观察图片而不是依靠想象。
119
+ 推理想法和观察结果只对Visual ChatGPT可见,需要记得在最终回复时把重要的信息重复给用户,你只能给用户返回中文句子。我们一步一步思考。在你使用工具时,工具的参数只能是英文。
120
+
121
+ 聊天历史:
122
+ {chat_history}
123
+
124
+ 新输入: {input}
125
+ Thought: Do I need to use a tool? {agent_scratchpad}
126
+ """
127
+
128
+ os.makedirs('image', exist_ok=True)
129
+
130
+
131
+ def seed_everything(seed):
132
+ random.seed(seed)
133
+ np.random.seed(seed)
134
+ torch.manual_seed(seed)
135
+ torch.cuda.manual_seed_all(seed)
136
+ return seed
137
+
138
+
139
+ def prompts(name, description):
140
+ def decorator(func):
141
+ func.name = name
142
+ func.description = description
143
+ return func
144
+
145
+ return decorator
146
+
147
+
148
+ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
149
+ new_size = new_image.size
150
+ old_size = old_image.size
151
+ easy_img = np.array(new_image)
152
+ gt_img_array = np.array(old_image)
153
+ pos_w = (new_size[0] - old_size[0]) // 2
154
+ pos_h = (new_size[1] - old_size[1]) // 2
155
+
156
+ kernel_h = cv2.getGaussianKernel(old_size[1], old_size[1] * sigma)
157
+ kernel_w = cv2.getGaussianKernel(old_size[0], old_size[0] * sigma)
158
+ kernel = np.multiply(kernel_h, np.transpose(kernel_w))
159
+
160
+ kernel[steps:-steps, steps:-steps] = 1
161
+ kernel[:steps, :steps] = kernel[:steps, :steps] / kernel[steps - 1, steps - 1]
162
+ kernel[:steps, -steps:] = kernel[:steps, -steps:] / kernel[steps - 1, -(steps)]
163
+ kernel[-steps:, :steps] = kernel[-steps:, :steps] / kernel[-steps, steps - 1]
164
+ kernel[-steps:, -steps:] = kernel[-steps:, -steps:] / kernel[-steps, -steps]
165
+ kernel = np.expand_dims(kernel, 2)
166
+ kernel = np.repeat(kernel, 3, 2)
167
+
168
+ weight = np.linspace(0, 1, steps)
169
+ top = np.expand_dims(weight, 1)
170
+ top = np.repeat(top, old_size[0] - 2 * steps, 1)
171
+ top = np.expand_dims(top, 2)
172
+ top = np.repeat(top, 3, 2)
173
+
174
+ weight = np.linspace(1, 0, steps)
175
+ down = np.expand_dims(weight, 1)
176
+ down = np.repeat(down, old_size[0] - 2 * steps, 1)
177
+ down = np.expand_dims(down, 2)
178
+ down = np.repeat(down, 3, 2)
179
+
180
+ weight = np.linspace(0, 1, steps)
181
+ left = np.expand_dims(weight, 0)
182
+ left = np.repeat(left, old_size[1] - 2 * steps, 0)
183
+ left = np.expand_dims(left, 2)
184
+ left = np.repeat(left, 3, 2)
185
+
186
+ weight = np.linspace(1, 0, steps)
187
+ right = np.expand_dims(weight, 0)
188
+ right = np.repeat(right, old_size[1] - 2 * steps, 0)
189
+ right = np.expand_dims(right, 2)
190
+ right = np.repeat(right, 3, 2)
191
+
192
+ kernel[:steps, steps:-steps] = top
193
+ kernel[-steps:, steps:-steps] = down
194
+ kernel[steps:-steps, :steps] = left
195
+ kernel[steps:-steps, -steps:] = right
196
+
197
+ pt_gt_img = easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]]
198
+ gaussian_gt_img = kernel * gt_img_array + (1 - kernel) * pt_gt_img # gt img with blur img
199
+ gaussian_gt_img = gaussian_gt_img.astype(np.int64)
200
+ easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]] = gaussian_gt_img
201
+ gaussian_img = Image.fromarray(easy_img)
202
+ return gaussian_img
203
+
204
+
205
+ def cut_dialogue_history(history_memory, keep_last_n_words=500):
206
+ if history_memory is None or len(history_memory) == 0:
207
+ return history_memory
208
+ tokens = history_memory.split()
209
+ n_tokens = len(tokens)
210
+ print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
211
+ if n_tokens < keep_last_n_words:
212
+ return history_memory
213
+ paragraphs = history_memory.split('\n')
214
+ last_n_tokens = n_tokens
215
+ while last_n_tokens >= keep_last_n_words:
216
+ last_n_tokens -= len(paragraphs[0].split(' '))
217
+ paragraphs = paragraphs[1:]
218
+ return '\n' + '\n'.join(paragraphs)
219
+
220
+
221
+ def get_new_image_name(org_img_name, func_name="update"):
222
+ head_tail = os.path.split(org_img_name)
223
+ head = head_tail[0]
224
+ tail = head_tail[1]
225
+ name_split = tail.split('.')[0].split('_')
226
+ this_new_uuid = str(uuid.uuid4())[:4]
227
+ if len(name_split) == 1:
228
+ most_org_file_name = name_split[0]
229
+ else:
230
+ assert len(name_split) == 4
231
+ most_org_file_name = name_split[3]
232
+ recent_prev_file_name = name_split[0]
233
+ new_file_name = f'{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.png'
234
+ return os.path.join(head, new_file_name)
235
+
236
+
237
+
238
+ class MaskFormer:
239
+ def __init__(self, device):
240
+ print(f"Initializing MaskFormer to {device}")
241
+ self.device = device
242
+ self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
243
+ self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device)
244
+
245
+ def inference(self, image_path, text):
246
+ threshold = 0.5
247
+ min_area = 0.02
248
+ padding = 20
249
+ original_image = Image.open(image_path)
250
+ image = original_image.resize((512, 512))
251
+ inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt").to(self.device)
252
+ with torch.no_grad():
253
+ outputs = self.model(**inputs)
254
+ mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
255
+ area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
256
+ if area_ratio < min_area:
257
+ return None
258
+ true_indices = np.argwhere(mask)
259
+ mask_array = np.zeros_like(mask, dtype=bool)
260
+ for idx in true_indices:
261
+ padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
262
+ mask_array[padded_slice] = True
263
+ visual_mask = (mask_array * 255).astype(np.uint8)
264
+ image_mask = Image.fromarray(visual_mask)
265
+ return image_mask.resize(original_image.size)
266
+
267
+
268
+ class ImageEditing:
269
+ def __init__(self, device):
270
+ print(f"Initializing ImageEditing to {device}")
271
+ self.device = device
272
+ self.mask_former = MaskFormer(device=self.device)
273
+ self.revision = 'fp16' if 'cuda' in device else None
274
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
275
+ self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
276
+ "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype=self.torch_dtype).to(device)
277
+
278
+ @prompts(name="Replace Something From The Photo",
279
+ description="useful when you want to replace an object from the object description or "
280
+ "location with another object from its description. "
281
+ "The input to this tool should be a comma separated string of three, "
282
+ "representing the image_path, the object to be replaced, the object to be replaced with ")
283
+ def inference_replace(self, inputs):
284
+ image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
285
+ original_image = Image.open(image_path)
286
+ original_size = original_image.size
287
+ mask_image = self.mask_former.inference(image_path, to_be_replaced_txt)
288
+ updated_image = self.inpaint(prompt=replace_with_txt, image=original_image.resize((512, 512)),
289
+ mask_image=mask_image.resize((512, 512))).images[0]
290
+ updated_image_path = get_new_image_name(image_path, func_name="replace-something")
291
+ updated_image = updated_image.resize(original_size)
292
+ updated_image.save(updated_image_path)
293
+ print(
294
+ f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, "
295
+ f"Output Image: {updated_image_path}")
296
+ return updated_image_path
297
+
298
+
299
+ class InstructPix2Pix:
300
+ def __init__(self, device):
301
+ print(f"Initializing InstructPix2Pix to {device}")
302
+ self.device = device
303
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
304
+ self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix",
305
+ safety_checker=None,
306
+ torch_dtype=self.torch_dtype).to(device)
307
+ self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
308
+
309
+ @prompts(name="Instruct Image Using Text",
310
+ description="useful when you want to the style of the image to be like the text. "
311
+ "like: make it look like a painting. or make it like a robot. "
312
+ "The input to this tool should be a comma separated string of two, "
313
+ "representing the image_path and the text. ")
314
+ def inference(self, inputs):
315
+ """Change style of image."""
316
+ print("===>Starting InstructPix2Pix Inference")
317
+ image_path, text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
318
+ original_image = Image.open(image_path)
319
+ image = self.pipe(text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2).images[0]
320
+ updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
321
+ image.save(updated_image_path)
322
+ print(f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, "
323
+ f"Output Image: {updated_image_path}")
324
+ return updated_image_path
325
+
326
+
327
+ class Text2Image:
328
+ def __init__(self, device):
329
+ print(f"Initializing Text2Image to {device}")
330
+ self.device = device
331
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
332
+ self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
333
+ torch_dtype=self.torch_dtype)
334
+ self.pipe.to(device)
335
+ self.a_prompt = 'best quality, extremely detailed'
336
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
337
+ 'fewer digits, cropped, worst quality, low quality'
338
+
339
+ @prompts(name="Generate Image From User Input Text",
340
+ description="useful when you want to generate an image from a user input text and save it to a file. "
341
+ "like: generate an image of an object or something, or generate an image that includes some objects. "
342
+ "The input to this tool should be a string, representing the text used to generate image. ")
343
+ def inference(self, text):
344
+ image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
345
+ prompt = text + ', ' + self.a_prompt
346
+ image = self.pipe(prompt, negative_prompt=self.n_prompt).images[0]
347
+ image.save(image_filename)
348
+ print(
349
+ f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}")
350
+ return image_filename
351
+
352
+
353
+ class ImageCaptioning:
354
+ def __init__(self, device):
355
+ print(f"Initializing ImageCaptioning to {device}")
356
+ self.device = device
357
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
358
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
359
+ self.model = BlipForConditionalGeneration.from_pretrained(
360
+ "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype).to(self.device)
361
+
362
+ @prompts(name="Get Photo Description",
363
+ description="useful when you want to know what is inside the photo. receives image_path as input. "
364
+ "The input to this tool should be a string, representing the image_path. ")
365
+ def inference(self, image_path):
366
+ inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype)
367
+ out = self.model.generate(**inputs)
368
+ captions = self.processor.decode(out[0], skip_special_tokens=True)
369
+ print(f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}")
370
+ return captions
371
+
372
+
373
+ class Image2Canny:
374
+ def __init__(self, device):
375
+ print("Initializing Image2Canny")
376
+ self.low_threshold = 100
377
+ self.high_threshold = 200
378
+
379
+ @prompts(name="Edge Detection On Image",
380
+ description="useful when you want to detect the edge of the image. "
381
+ "like: detect the edges of this image, or canny detection on image, "
382
+ "or perform edge detection on this image, or detect the canny image of this image. "
383
+ "The input to this tool should be a string, representing the image_path")
384
+ def inference(self, inputs):
385
+ image = Image.open(inputs)
386
+ image = np.array(image)
387
+ canny = cv2.Canny(image, self.low_threshold, self.high_threshold)
388
+ canny = canny[:, :, None]
389
+ canny = np.concatenate([canny, canny, canny], axis=2)
390
+ canny = Image.fromarray(canny)
391
+ updated_image_path = get_new_image_name(inputs, func_name="edge")
392
+ canny.save(updated_image_path)
393
+ print(f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text: {updated_image_path}")
394
+ return updated_image_path
395
+
396
+
397
+ class CannyText2Image:
398
+ def __init__(self, device):
399
+ print(f"Initializing CannyText2Image to {device}")
400
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
401
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-canny",
402
+ torch_dtype=self.torch_dtype)
403
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
404
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
405
+ torch_dtype=self.torch_dtype)
406
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
407
+ self.pipe.to(device)
408
+ self.seed = -1
409
+ self.a_prompt = 'best quality, extremely detailed'
410
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
411
+ 'fewer digits, cropped, worst quality, low quality'
412
+
413
+ @prompts(name="Generate Image Condition On Canny Image",
414
+ description="useful when you want to generate a new real image from both the user description and a canny image."
415
+ " like: generate a real image of a object or something from this canny image,"
416
+ " or generate a new real image of a object or something from this edge image. "
417
+ "The input to this tool should be a comma separated string of two, "
418
+ "representing the image_path and the user description. ")
419
+ def inference(self, inputs):
420
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
421
+ image = Image.open(image_path)
422
+ self.seed = random.randint(0, 65535)
423
+ seed_everything(self.seed)
424
+ prompt = f'{instruct_text}, {self.a_prompt}'
425
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
426
+ guidance_scale=9.0).images[0]
427
+ updated_image_path = get_new_image_name(image_path, func_name="canny2image")
428
+ image.save(updated_image_path)
429
+ print(f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text: {instruct_text}, "
430
+ f"Output Text: {updated_image_path}")
431
+ return updated_image_path
432
+
433
+
434
+ class Image2Line:
435
+ def __init__(self, device):
436
+ print("Initializing Image2Line")
437
+ self.detector = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
438
+
439
+ @prompts(name="Line Detection On Image",
440
+ description="useful when you want to detect the straight line of the image. "
441
+ "like: detect the straight lines of this image, or straight line detection on image, "
442
+ "or perform straight line detection on this image, or detect the straight line image of this image. "
443
+ "The input to this tool should be a string, representing the image_path")
444
+ def inference(self, inputs):
445
+ image = Image.open(inputs)
446
+ mlsd = self.detector(image)
447
+ updated_image_path = get_new_image_name(inputs, func_name="line-of")
448
+ mlsd.save(updated_image_path)
449
+ print(f"\nProcessed Image2Line, Input Image: {inputs}, Output Line: {updated_image_path}")
450
+ return updated_image_path
451
+
452
+
453
+ class LineText2Image:
454
+ def __init__(self, device):
455
+ print(f"Initializing LineText2Image to {device}")
456
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
457
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-mlsd",
458
+ torch_dtype=self.torch_dtype)
459
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
460
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
461
+ torch_dtype=self.torch_dtype
462
+ )
463
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
464
+ self.pipe.to(device)
465
+ self.seed = -1
466
+ self.a_prompt = 'best quality, extremely detailed'
467
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
468
+ 'fewer digits, cropped, worst quality, low quality'
469
+
470
+ @prompts(name="Generate Image Condition On Line Image",
471
+ description="useful when you want to generate a new real image from both the user description "
472
+ "and a straight line image. "
473
+ "like: generate a real image of a object or something from this straight line image, "
474
+ "or generate a new real image of a object or something from this straight lines. "
475
+ "The input to this tool should be a comma separated string of two, "
476
+ "representing the image_path and the user description. ")
477
+ def inference(self, inputs):
478
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
479
+ image = Image.open(image_path)
480
+ self.seed = random.randint(0, 65535)
481
+ seed_everything(self.seed)
482
+ prompt = f'{instruct_text}, {self.a_prompt}'
483
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
484
+ guidance_scale=9.0).images[0]
485
+ updated_image_path = get_new_image_name(image_path, func_name="line2image")
486
+ image.save(updated_image_path)
487
+ print(f"\nProcessed LineText2Image, Input Line: {image_path}, Input Text: {instruct_text}, "
488
+ f"Output Text: {updated_image_path}")
489
+ return updated_image_path
490
+
491
+
492
+ class Image2Hed:
493
+ def __init__(self, device):
494
+ print("Initializing Image2Hed")
495
+ self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
496
+
497
+ @prompts(name="Hed Detection On Image",
498
+ description="useful when you want to detect the soft hed boundary of the image. "
499
+ "like: detect the soft hed boundary of this image, or hed boundary detection on image, "
500
+ "or perform hed boundary detection on this image, or detect soft hed boundary image of this image. "
501
+ "The input to this tool should be a string, representing the image_path")
502
+ def inference(self, inputs):
503
+ image = Image.open(inputs)
504
+ hed = self.detector(image)
505
+ updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
506
+ hed.save(updated_image_path)
507
+ print(f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed: {updated_image_path}")
508
+ return updated_image_path
509
+
510
+
511
+ class HedText2Image:
512
+ def __init__(self, device):
513
+ print(f"Initializing HedText2Image to {device}")
514
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
515
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-hed",
516
+ torch_dtype=self.torch_dtype)
517
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
518
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
519
+ torch_dtype=self.torch_dtype
520
+ )
521
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
522
+ self.pipe.to(device)
523
+ self.seed = -1
524
+ self.a_prompt = 'best quality, extremely detailed'
525
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
526
+ 'fewer digits, cropped, worst quality, low quality'
527
+
528
+ @prompts(name="Generate Image Condition On Soft Hed Boundary Image",
529
+ description="useful when you want to generate a new real image from both the user description "
530
+ "and a soft hed boundary image. "
531
+ "like: generate a real image of a object or something from this soft hed boundary image, "
532
+ "or generate a new real image of a object or something from this hed boundary. "
533
+ "The input to this tool should be a comma separated string of two, "
534
+ "representing the image_path and the user description")
535
+ def inference(self, inputs):
536
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
537
+ image = Image.open(image_path)
538
+ self.seed = random.randint(0, 65535)
539
+ seed_everything(self.seed)
540
+ prompt = f'{instruct_text}, {self.a_prompt}'
541
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
542
+ guidance_scale=9.0).images[0]
543
+ updated_image_path = get_new_image_name(image_path, func_name="hed2image")
544
+ image.save(updated_image_path)
545
+ print(f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text: {instruct_text}, "
546
+ f"Output Image: {updated_image_path}")
547
+ return updated_image_path
548
+
549
+
550
+ class Image2Scribble:
551
+ def __init__(self, device):
552
+ print("Initializing Image2Scribble")
553
+ self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
554
+
555
+ @prompts(name="Sketch Detection On Image",
556
+ description="useful when you want to generate a scribble of the image. "
557
+ "like: generate a scribble of this image, or generate a sketch from this image, "
558
+ "detect the sketch from this image. "
559
+ "The input to this tool should be a string, representing the image_path")
560
+ def inference(self, inputs):
561
+ image = Image.open(inputs)
562
+ scribble = self.detector(image, scribble=True)
563
+ updated_image_path = get_new_image_name(inputs, func_name="scribble")
564
+ scribble.save(updated_image_path)
565
+ print(f"\nProcessed Image2Scribble, Input Image: {inputs}, Output Scribble: {updated_image_path}")
566
+ return updated_image_path
567
+
568
+
569
+ class ScribbleText2Image:
570
+ def __init__(self, device):
571
+ print(f"Initializing ScribbleText2Image to {device}")
572
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
573
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-scribble",
574
+ torch_dtype=self.torch_dtype)
575
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
576
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
577
+ torch_dtype=self.torch_dtype
578
+ )
579
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
580
+ self.pipe.to(device)
581
+ self.seed = -1
582
+ self.a_prompt = 'best quality, extremely detailed'
583
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
584
+ 'fewer digits, cropped, worst quality, low quality'
585
+
586
+ @prompts(name="Generate Image Condition On Sketch Image",
587
+ description="useful when you want to generate a new real image from both the user description and "
588
+ "a scribble image or a sketch image. "
589
+ "The input to this tool should be a comma separated string of two, "
590
+ "representing the image_path and the user description")
591
+ def inference(self, inputs):
592
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
593
+ image = Image.open(image_path)
594
+ self.seed = random.randint(0, 65535)
595
+ seed_everything(self.seed)
596
+ prompt = f'{instruct_text}, {self.a_prompt}'
597
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
598
+ guidance_scale=9.0).images[0]
599
+ updated_image_path = get_new_image_name(image_path, func_name="scribble2image")
600
+ image.save(updated_image_path)
601
+ print(f"\nProcessed ScribbleText2Image, Input Scribble: {image_path}, Input Text: {instruct_text}, "
602
+ f"Output Image: {updated_image_path}")
603
+ return updated_image_path
604
+
605
+
606
+ class Image2Pose:
607
+ def __init__(self, device):
608
+ print("Initializing Image2Pose")
609
+ self.detector = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
610
+
611
+ @prompts(name="Pose Detection On Image",
612
+ description="useful when you want to detect the human pose of the image. "
613
+ "like: generate human poses of this image, or generate a pose image from this image. "
614
+ "The input to this tool should be a string, representing the image_path")
615
+ def inference(self, inputs):
616
+ image = Image.open(inputs)
617
+ pose = self.detector(image)
618
+ updated_image_path = get_new_image_name(inputs, func_name="human-pose")
619
+ pose.save(updated_image_path)
620
+ print(f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}")
621
+ return updated_image_path
622
+
623
+
624
+ class PoseText2Image:
625
+ def __init__(self, device):
626
+ print(f"Initializing PoseText2Image to {device}")
627
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
628
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose",
629
+ torch_dtype=self.torch_dtype)
630
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
631
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
632
+ torch_dtype=self.torch_dtype)
633
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
634
+ self.pipe.to(device)
635
+ self.num_inference_steps = 20
636
+ self.seed = -1
637
+ self.unconditional_guidance_scale = 9.0
638
+ self.a_prompt = 'best quality, extremely detailed'
639
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
640
+ ' fewer digits, cropped, worst quality, low quality'
641
+
642
+ @prompts(name="Generate Image Condition On Pose Image",
643
+ description="useful when you want to generate a new real image from both the user description "
644
+ "and a human pose image. "
645
+ "like: generate a real image of a human from this human pose image, "
646
+ "or generate a new real image of a human from this pose. "
647
+ "The input to this tool should be a comma separated string of two, "
648
+ "representing the image_path and the user description")
649
+ def inference(self, inputs):
650
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
651
+ image = Image.open(image_path)
652
+ self.seed = random.randint(0, 65535)
653
+ seed_everything(self.seed)
654
+ prompt = f'{instruct_text}, {self.a_prompt}'
655
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
656
+ guidance_scale=9.0).images[0]
657
+ updated_image_path = get_new_image_name(image_path, func_name="pose2image")
658
+ image.save(updated_image_path)
659
+ print(f"\nProcessed PoseText2Image, Input Pose: {image_path}, Input Text: {instruct_text}, "
660
+ f"Output Image: {updated_image_path}")
661
+ return updated_image_path
662
+
663
+
664
+ class Image2Seg:
665
+ def __init__(self, device):
666
+ print("Initializing Image2Seg")
667
+ self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
668
+ self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
669
+ self.ade_palette = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
670
+ [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
671
+ [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
672
+ [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
673
+ [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
674
+ [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
675
+ [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
676
+ [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
677
+ [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
678
+ [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
679
+ [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
680
+ [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
681
+ [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
682
+ [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
683
+ [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
684
+ [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
685
+ [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
686
+ [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
687
+ [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
688
+ [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
689
+ [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
690
+ [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
691
+ [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
692
+ [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
693
+ [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
694
+ [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
695
+ [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
696
+ [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
697
+ [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
698
+ [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
699
+ [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
700
+ [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
701
+ [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
702
+ [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
703
+ [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
704
+ [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
705
+ [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
706
+ [102, 255, 0], [92, 0, 255]]
707
+
708
+ @prompts(name="Segmentation On Image",
709
+ description="useful when you want to detect segmentations of the image. "
710
+ "like: segment this image, or generate segmentations on this image, "
711
+ "or perform segmentation on this image. "
712
+ "The input to this tool should be a string, representing the image_path")
713
+ def inference(self, inputs):
714
+ image = Image.open(inputs)
715
+ pixel_values = self.image_processor(image, return_tensors="pt").pixel_values
716
+ with torch.no_grad():
717
+ outputs = self.image_segmentor(pixel_values)
718
+ seg = self.image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
719
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
720
+ palette = np.array(self.ade_palette)
721
+ for label, color in enumerate(palette):
722
+ color_seg[seg == label, :] = color
723
+ color_seg = color_seg.astype(np.uint8)
724
+ segmentation = Image.fromarray(color_seg)
725
+ updated_image_path = get_new_image_name(inputs, func_name="segmentation")
726
+ segmentation.save(updated_image_path)
727
+ print(f"\nProcessed Image2Seg, Input Image: {inputs}, Output Pose: {updated_image_path}")
728
+ return updated_image_path
729
+
730
+
731
+ class SegText2Image:
732
+ def __init__(self, device):
733
+ print(f"Initializing SegText2Image to {device}")
734
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
735
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-seg",
736
+ torch_dtype=self.torch_dtype)
737
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
738
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
739
+ torch_dtype=self.torch_dtype)
740
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
741
+ self.pipe.to(device)
742
+ self.seed = -1
743
+ self.a_prompt = 'best quality, extremely detailed'
744
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
745
+ ' fewer digits, cropped, worst quality, low quality'
746
+
747
+ @prompts(name="Generate Image Condition On Segmentations",
748
+ description="useful when you want to generate a new real image from both the user description and segmentations. "
749
+ "like: generate a real image of a object or something from this segmentation image, "
750
+ "or generate a new real image of a object or something from these segmentations. "
751
+ "The input to this tool should be a comma separated string of two, "
752
+ "representing the image_path and the user description")
753
+ def inference(self, inputs):
754
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
755
+ image = Image.open(image_path)
756
+ self.seed = random.randint(0, 65535)
757
+ seed_everything(self.seed)
758
+ prompt = f'{instruct_text}, {self.a_prompt}'
759
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
760
+ guidance_scale=9.0).images[0]
761
+ updated_image_path = get_new_image_name(image_path, func_name="segment2image")
762
+ image.save(updated_image_path)
763
+ print(f"\nProcessed SegText2Image, Input Seg: {image_path}, Input Text: {instruct_text}, "
764
+ f"Output Image: {updated_image_path}")
765
+ return updated_image_path
766
+
767
+
768
+ class Image2Depth:
769
+ def __init__(self, device):
770
+ print("Initializing Image2Depth")
771
+ self.depth_estimator = pipeline('depth-estimation')
772
+
773
+ @prompts(name="Predict Depth On Image",
774
+ description="useful when you want to detect depth of the image. like: generate the depth from this image, "
775
+ "or detect the depth map on this image, or predict the depth for this image. "
776
+ "The input to this tool should be a string, representing the image_path")
777
+ def inference(self, inputs):
778
+ image = Image.open(inputs)
779
+ depth = self.depth_estimator(image)['depth']
780
+ depth = np.array(depth)
781
+ depth = depth[:, :, None]
782
+ depth = np.concatenate([depth, depth, depth], axis=2)
783
+ depth = Image.fromarray(depth)
784
+ updated_image_path = get_new_image_name(inputs, func_name="depth")
785
+ depth.save(updated_image_path)
786
+ print(f"\nProcessed Image2Depth, Input Image: {inputs}, Output Depth: {updated_image_path}")
787
+ return updated_image_path
788
+
789
+
790
+ class DepthText2Image:
791
+ def __init__(self, device):
792
+ print(f"Initializing DepthText2Image to {device}")
793
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
794
+ self.controlnet = ControlNetModel.from_pretrained(
795
+ "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=self.torch_dtype)
796
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
797
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
798
+ torch_dtype=self.torch_dtype)
799
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
800
+ self.pipe.to(device)
801
+ self.seed = -1
802
+ self.a_prompt = 'best quality, extremely detailed'
803
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
804
+ ' fewer digits, cropped, worst quality, low quality'
805
+
806
+ @prompts(name="Generate Image Condition On Depth",
807
+ description="useful when you want to generate a new real image from both the user description and depth image. "
808
+ "like: generate a real image of a object or something from this depth image, "
809
+ "or generate a new real image of a object or something from the depth map. "
810
+ "The input to this tool should be a comma separated string of two, "
811
+ "representing the image_path and the user description")
812
+ def inference(self, inputs):
813
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
814
+ image = Image.open(image_path)
815
+ self.seed = random.randint(0, 65535)
816
+ seed_everything(self.seed)
817
+ prompt = f'{instruct_text}, {self.a_prompt}'
818
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
819
+ guidance_scale=9.0).images[0]
820
+ updated_image_path = get_new_image_name(image_path, func_name="depth2image")
821
+ image.save(updated_image_path)
822
+ print(f"\nProcessed DepthText2Image, Input Depth: {image_path}, Input Text: {instruct_text}, "
823
+ f"Output Image: {updated_image_path}")
824
+ return updated_image_path
825
+
826
+
827
+ class Image2Normal:
828
+ def __init__(self, device):
829
+ print("Initializing Image2Normal")
830
+ self.depth_estimator = pipeline("depth-estimation", model="Intel/dpt-hybrid-midas")
831
+ self.bg_threhold = 0.4
832
+
833
+ @prompts(name="Predict Normal Map On Image",
834
+ description="useful when you want to detect norm map of the image. "
835
+ "like: generate normal map from this image, or predict normal map of this image. "
836
+ "The input to this tool should be a string, representing the image_path")
837
+ def inference(self, inputs):
838
+ image = Image.open(inputs)
839
+ original_size = image.size
840
+ image = self.depth_estimator(image)['predicted_depth'][0]
841
+ image = image.numpy()
842
+ image_depth = image.copy()
843
+ image_depth -= np.min(image_depth)
844
+ image_depth /= np.max(image_depth)
845
+ x = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=3)
846
+ x[image_depth < self.bg_threhold] = 0
847
+ y = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=3)
848
+ y[image_depth < self.bg_threhold] = 0
849
+ z = np.ones_like(x) * np.pi * 2.0
850
+ image = np.stack([x, y, z], axis=2)
851
+ image /= np.sum(image ** 2.0, axis=2, keepdims=True) ** 0.5
852
+ image = (image * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
853
+ image = Image.fromarray(image)
854
+ image = image.resize(original_size)
855
+ updated_image_path = get_new_image_name(inputs, func_name="normal-map")
856
+ image.save(updated_image_path)
857
+ print(f"\nProcessed Image2Normal, Input Image: {inputs}, Output Depth: {updated_image_path}")
858
+ return updated_image_path
859
+
860
+
861
+ class NormalText2Image:
862
+ def __init__(self, device):
863
+ print(f"Initializing NormalText2Image to {device}")
864
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
865
+ self.controlnet = ControlNetModel.from_pretrained(
866
+ "fusing/stable-diffusion-v1-5-controlnet-normal", torch_dtype=self.torch_dtype)
867
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
868
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
869
+ torch_dtype=self.torch_dtype)
870
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
871
+ self.pipe.to(device)
872
+ self.seed = -1
873
+ self.a_prompt = 'best quality, extremely detailed'
874
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
875
+ ' fewer digits, cropped, worst quality, low quality'
876
+
877
+ @prompts(name="Generate Image Condition On Normal Map",
878
+ description="useful when you want to generate a new real image from both the user description and normal map. "
879
+ "like: generate a real image of a object or something from this normal map, "
880
+ "or generate a new real image of a object or something from the normal map. "
881
+ "The input to this tool should be a comma separated string of two, "
882
+ "representing the image_path and the user description")
883
+ def inference(self, inputs):
884
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
885
+ image = Image.open(image_path)
886
+ self.seed = random.randint(0, 65535)
887
+ seed_everything(self.seed)
888
+ prompt = f'{instruct_text}, {self.a_prompt}'
889
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
890
+ guidance_scale=9.0).images[0]
891
+ updated_image_path = get_new_image_name(image_path, func_name="normal2image")
892
+ image.save(updated_image_path)
893
+ print(f"\nProcessed NormalText2Image, Input Normal: {image_path}, Input Text: {instruct_text}, "
894
+ f"Output Image: {updated_image_path}")
895
+ return updated_image_path
896
+
897
+
898
+ class VisualQuestionAnswering:
899
+ def __init__(self, device):
900
+ print(f"Initializing VisualQuestionAnswering to {device}")
901
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
902
+ self.device = device
903
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
904
+ self.model = BlipForQuestionAnswering.from_pretrained(
905
+ "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype).to(self.device)
906
+
907
+ @prompts(name="Answer Question About The Image",
908
+ description="useful when you need an answer for a question based on an image. "
909
+ "like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
910
+ "The input to this tool should be a comma separated string of two, representing the image_path and the question")
911
+ def inference(self, inputs):
912
+ image_path, question = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
913
+ raw_image = Image.open(image_path).convert('RGB')
914
+ inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device, self.torch_dtype)
915
+ out = self.model.generate(**inputs)
916
+ answer = self.processor.decode(out[0], skip_special_tokens=True)
917
+ print(f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
918
+ f"Output Answer: {answer}")
919
+ return answer
920
+
921
+
922
+ class InfinityOutPainting:
923
+ template_model = True # Add this line to show this is a template model.
924
+ def __init__(self, ImageCaptioning, ImageEditing, VisualQuestionAnswering):
925
+ self.llm = OpenAI(temperature=0)
926
+ self.ImageCaption = ImageCaptioning
927
+ self.ImageEditing = ImageEditing
928
+ self.ImageVQA = VisualQuestionAnswering
929
+ self.a_prompt = 'best quality, extremely detailed'
930
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
931
+ 'fewer digits, cropped, worst quality, low quality'
932
+
933
+ def get_BLIP_vqa(self, image, question):
934
+ inputs = self.ImageVQA.processor(image, question, return_tensors="pt").to(self.ImageVQA.device,
935
+ self.ImageVQA.torch_dtype)
936
+ out = self.ImageVQA.model.generate(**inputs)
937
+ answer = self.ImageVQA.processor.decode(out[0], skip_special_tokens=True)
938
+ print(f"\nProcessed VisualQuestionAnswering, Input Question: {question}, Output Answer: {answer}")
939
+ return answer
940
+
941
+ def get_BLIP_caption(self, image):
942
+ inputs = self.ImageCaption.processor(image, return_tensors="pt").to(self.ImageCaption.device,
943
+ self.ImageCaption.torch_dtype)
944
+ out = self.ImageCaption.model.generate(**inputs)
945
+ BLIP_caption = self.ImageCaption.processor.decode(out[0], skip_special_tokens=True)
946
+ return BLIP_caption
947
+
948
+ def check_prompt(self, prompt):
949
+ check = f"Here is a paragraph with adjectives. " \
950
+ f"{prompt} " \
951
+ f"Please change all plural forms in the adjectives to singular forms. "
952
+ return self.llm(check)
953
+
954
+ def get_imagine_caption(self, image, imagine):
955
+ BLIP_caption = self.get_BLIP_caption(image)
956
+ background_color = self.get_BLIP_vqa(image, 'what is the background color of this image')
957
+ style = self.get_BLIP_vqa(image, 'what is the style of this image')
958
+ imagine_prompt = f"let's pretend you are an excellent painter and now " \
959
+ f"there is an incomplete painting with {BLIP_caption} in the center, " \
960
+ f"please imagine the complete painting and describe it" \
961
+ f"you should consider the background color is {background_color}, the style is {style}" \
962
+ f"You should make the painting as vivid and realistic as possible" \
963
+ f"You can not use words like painting or picture" \
964
+ f"and you should use no more than 50 words to describe it"
965
+ caption = self.llm(imagine_prompt) if imagine else BLIP_caption
966
+ caption = self.check_prompt(caption)
967
+ print(f'BLIP observation: {BLIP_caption}, ChatGPT imagine to {caption}') if imagine else print(
968
+ f'Prompt: {caption}')
969
+ return caption
970
+
971
+ def resize_image(self, image, max_size=1000000, multiple=8):
972
+ aspect_ratio = image.size[0] / image.size[1]
973
+ new_width = int(math.sqrt(max_size * aspect_ratio))
974
+ new_height = int(new_width / aspect_ratio)
975
+ new_width, new_height = new_width - (new_width % multiple), new_height - (new_height % multiple)
976
+ return image.resize((new_width, new_height))
977
+
978
+ def dowhile(self, original_img, tosize, expand_ratio, imagine, usr_prompt):
979
+ old_img = original_img
980
+ while (old_img.size != tosize):
981
+ prompt = self.check_prompt(usr_prompt) if usr_prompt else self.get_imagine_caption(old_img, imagine)
982
+ crop_w = 15 if old_img.size[0] != tosize[0] else 0
983
+ crop_h = 15 if old_img.size[1] != tosize[1] else 0
984
+ old_img = ImageOps.crop(old_img, (crop_w, crop_h, crop_w, crop_h))
985
+ temp_canvas_size = (expand_ratio * old_img.width if expand_ratio * old_img.width < tosize[0] else tosize[0],
986
+ expand_ratio * old_img.height if expand_ratio * old_img.height < tosize[1] else tosize[
987
+ 1])
988
+ temp_canvas, temp_mask = Image.new("RGB", temp_canvas_size, color="white"), Image.new("L", temp_canvas_size,
989
+ color="white")
990
+ x, y = (temp_canvas.width - old_img.width) // 2, (temp_canvas.height - old_img.height) // 2
991
+ temp_canvas.paste(old_img, (x, y))
992
+ temp_mask.paste(0, (x, y, x + old_img.width, y + old_img.height))
993
+ resized_temp_canvas, resized_temp_mask = self.resize_image(temp_canvas), self.resize_image(temp_mask)
994
+ image = self.ImageEditing.inpaint(prompt=prompt, image=resized_temp_canvas, mask_image=resized_temp_mask,
995
+ height=resized_temp_canvas.height, width=resized_temp_canvas.width,
996
+ num_inference_steps=50).images[0].resize(
997
+ (temp_canvas.width, temp_canvas.height), Image.ANTIALIAS)
998
+ image = blend_gt2pt(old_img, image)
999
+ old_img = image
1000
+ return old_img
1001
+
1002
+ @prompts(name="Extend An Image",
1003
+ description="useful when you need to extend an image into a larger image."
1004
+ "like: extend the image into a resolution of 2048x1024, extend the image into 2048x1024. "
1005
+ "The input to this tool should be a comma separated string of two, representing the image_path and the resolution of widthxheight")
1006
+ def inference(self, inputs):
1007
+ image_path, resolution = inputs.split(',')
1008
+ width, height = resolution.split('x')
1009
+ tosize = (int(width), int(height))
1010
+ image = Image.open(image_path)
1011
+ image = ImageOps.crop(image, (10, 10, 10, 10))
1012
+ out_painted_image = self.dowhile(image, tosize, 4, True, False)
1013
+ updated_image_path = get_new_image_name(image_path, func_name="outpainting")
1014
+ out_painted_image.save(updated_image_path)
1015
+ print(f"\nProcessed InfinityOutPainting, Input Image: {image_path}, Input Resolution: {resolution}, "
1016
+ f"Output Image: {updated_image_path}")
1017
+ return updated_image_path
1018
+
1019
+ #############################################New Tool#############################################
1020
+ class Grounded_dino_sam_inpainting:
1021
+ def __init__(self, device):
1022
+ print(f"Initializing BLIP")
1023
+ self.device = device
1024
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
1025
+ self.blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
1026
+ self.blip_model = BlipForConditionalGeneration.from_pretrained(
1027
+ "Salesforce/blip-image-captioning-large", torch_dtype=self.torch_dtype
1028
+ ).to(self.device)
1029
+ print(f"Initializing GroundingDINO")
1030
+ self.dino_model = load_model(
1031
+ model_config_path="GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py",
1032
+ model_checkpoint_path="groundingdino_swint_ogc.pth",
1033
+ device=self.device
1034
+ )
1035
+ print(f"Initializing Segment Anthing")
1036
+ self.sam_model = build_sam(checkpoint="sam_vit_h_4b8939.pth").to(self.device)
1037
+ print(f"Initializing Stable Diffusion")
1038
+ self.sd_pipe = StableDiffusionInpaintPipeline.from_pretrained(
1039
+ "runwayml/stable-diffusion-inpainting", torch_dtype=self.torch_dtype
1040
+ ).to(self.device)
1041
+
1042
+ @prompts(name="Get Photo Description",
1043
+ description="useful when you want to know what is inside the photo. receives image_path as input. "
1044
+ "The input to this tool should be a string, representing the image_path. ")
1045
+ def inference_caption(self, image_path):
1046
+ inputs = self.blip_processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype)
1047
+ out = self.blip_model.generate(**inputs)
1048
+ captions = self.blip_processor.decode(out[0], skip_special_tokens=True)
1049
+ print(f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}")
1050
+ return captions
1051
+
1052
+ def _detect_object(self, image_path, text_prompt, func_name):
1053
+ image_pil, image = load_image(image_path)
1054
+ boxes_filt, scores, pred_phrases = get_grounding_output(
1055
+ self.dino_model, image, text_prompt, 0.3, 0.25, device=self.device
1056
+ )
1057
+ # use NMS to handle overlapped boxes
1058
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
1059
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, 0.5).numpy().tolist()
1060
+ boxes_filt = boxes_filt[nms_idx]
1061
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
1062
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
1063
+ size = image_pil.size
1064
+ pred_dict = {
1065
+ "boxes": boxes_filt,
1066
+ "size": [size[1], size[0]], # H,W
1067
+ "labels": pred_phrases,
1068
+ }
1069
+ image_with_box = plot_boxes_to_image(image_pil, pred_dict)[0]
1070
+ updated_image_path = get_new_image_name(image_path, func_name)
1071
+ image_with_box.save(updated_image_path)
1072
+ return updated_image_path
1073
+
1074
+ @prompts(name="Detect One Object In Image",
1075
+ description="useful when you want to detect the specific object in the image. "
1076
+ "like: detect the black dog in the image. "
1077
+ "The input to this tool should be a comma separated string of two, "
1078
+ "representing the image_path and the description of specific object.")
1079
+ def inference_detect_one_object(self, inputs):
1080
+ image_path, text_prompt = inputs.split(',')
1081
+ print(f"\nInput Text Prompt: {text_prompt}")
1082
+ updated_image_path = self._detect_object(image_path, text_prompt, func_name="det-object")
1083
+ print(f"Processed DetectOneObject, Input Image: {image_path}, Output Image: {updated_image_path}")
1084
+ return updated_image_path
1085
+
1086
+ @prompts(name="Detect Multiple Objects In Image",
1087
+ description="useful when you want to detect two or more specific objects in the image. "
1088
+ "like: detect the black dog and white cat in the image. "
1089
+ "The input to this tool should be a comma separated string of two, "
1090
+ "representing the image_path and the description of multiple specific objects. "
1091
+ "Different description should be separated by symbol '&', "
1092
+ "like 'black dog & white cat'. ")
1093
+ def inference_detect_multi_object(self, inputs):
1094
+ image_path, text_prompt = inputs.split(',')
1095
+ processed_text_prompt = text_prompt.replace(' &', ',')
1096
+ print(f"\nOriginal Text Prompt: {text_prompt}, Input Text Prompt: {processed_text_prompt}")
1097
+ updated_image_path = self._detect_object(image_path, text_prompt, func_name="det-objects")
1098
+ print(f"Processed DetectMultiObject, Input Image: {image_path}, Output Image: {updated_image_path}")
1099
+ return updated_image_path
1100
+
1101
+ # modified from https://github.com/Cheems-Seminar/segment-anything-and-name-it/blob/58408f1e4e340f565c5ef6b0c71920cdcd30b213/chatbot.py#L1046
1102
+ @prompts(name="Segment Anything in Image",
1103
+ description="useful when you want to segment anything in the image. "
1104
+ "like: segment anything in the image. "
1105
+ "The input to this tool should be a string, representing the image_path. ")
1106
+ def inference_segment_anything(self, image_path):
1107
+ image = cv2.imread(image_path)
1108
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1109
+ mask_generator = SamAutomaticMaskGenerator(self.sam_model)
1110
+ anns = mask_generator.generate(image)
1111
+ plt.figure(figsize=(10, 10))
1112
+ plt.imshow(image)
1113
+ sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
1114
+ ax = plt.gca()
1115
+ ax.set_autoscale_on(False)
1116
+ for ann in sorted_anns:
1117
+ m = ann['segmentation']
1118
+ img = np.ones((m.shape[0], m.shape[1], 3))
1119
+ color_mask = np.random.random((1, 3)).tolist()[0]
1120
+ for i in range(3):
1121
+ img[:,:,i] = color_mask[i]
1122
+ ax.imshow(np.dstack((img, m*0.35)))
1123
+ plt.axis('off')
1124
+ updated_image_path = get_new_image_name(image_path, func_name="seg-any")
1125
+ plt.savefig(updated_image_path, bbox_inches='tight', dpi=300, pad_inches=0.0)
1126
+ print(f"\nProcessed SegmentAnything, Input Image: {image_path}, Output Image: {updated_image_path}")
1127
+ return updated_image_path
1128
+
1129
+ def _segment_object(self, image_path, text_prompt, func_name):
1130
+ image_pil, image = load_image(image_path)
1131
+ # run grounding dino model
1132
+ boxes_filt, scores, pred_phrases = get_grounding_output(
1133
+ self.dino_model, image, text_prompt, 0.25, 0.2, device=self.device
1134
+ )
1135
+ # initialize SAM
1136
+ predictor = SamPredictor(self.sam_model)
1137
+ image = cv2.imread(image_path)
1138
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1139
+ predictor.set_image(image)
1140
+ size = image_pil.size
1141
+ H, W = size[1], size[0]
1142
+ for i in range(boxes_filt.size(0)):
1143
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
1144
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
1145
+ boxes_filt[i][2:] += boxes_filt[i][:2]
1146
+ boxes_filt = boxes_filt.cpu()
1147
+ # use NMS to handle overlapped boxes
1148
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
1149
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, 0.5).numpy().tolist()
1150
+ boxes_filt = boxes_filt[nms_idx]
1151
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
1152
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
1153
+ # generate mask
1154
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2])
1155
+ masks, _, _ = predictor.predict_torch(
1156
+ point_coords = None,
1157
+ point_labels = None,
1158
+ boxes = transformed_boxes.to(self.device),
1159
+ multimask_output = False,
1160
+ )
1161
+ # remove the mask when area < area_thresh (in pixels)
1162
+ new_masks = []
1163
+ for mask in masks:
1164
+ # reshape to be used in remove_small_regions()
1165
+ mask = mask.cpu().numpy().squeeze()
1166
+ mask, _ = remove_small_regions(mask, 100, mode="holes")
1167
+ mask, _ = remove_small_regions(mask, 100, mode="islands")
1168
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
1169
+ masks = torch.stack(new_masks, dim=0)
1170
+ # add box and mask in the image
1171
+ plt.figure(figsize=(10, 10))
1172
+ plt.imshow(image)
1173
+ for mask in masks:
1174
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
1175
+ for box, label in zip(boxes_filt, pred_phrases):
1176
+ show_box(box.numpy(), plt.gca(), label)
1177
+ plt.axis('off')
1178
+ updated_image_path = get_new_image_name(image_path, func_name)
1179
+ plt.savefig(updated_image_path, bbox_inches='tight', dpi=300, pad_inches=0.0)
1180
+ return updated_image_path, pred_phrases
1181
+
1182
+ @prompts(name="Segment One Object In Image",
1183
+ description="useful when you want to segment the specific object in the image. "
1184
+ "like: segment the black dog in the image, or mask the black dog in the image. "
1185
+ "The input to this tool should be a comma separated string of two, "
1186
+ "representing the image_path and the description of specific object.")
1187
+ def inference_segment_one_object(self, inputs):
1188
+ image_path, text_prompt = inputs.split(',')
1189
+ print(f"\nInput Text Prompt: {text_prompt}")
1190
+ updated_image_path, _ = self._segment_object(image_path, text_prompt, func_name="seg-object")
1191
+ print(f"Processed SegmentOneObject, Input Image: {image_path}, Output Image: {updated_image_path}")
1192
+ return updated_image_path
1193
+
1194
+ @prompts(name="Segment Multiple Object In Image",
1195
+ description="useful when you want to segment two or more specific objects in the image. "
1196
+ "like: segment the black dog and white cat in the image. "
1197
+ "The input to this tool should be a comma separated string of two, "
1198
+ "representing the image_path and the description of multiple specific objects. "
1199
+ "Different description should be separated by symbol '&', "
1200
+ "like 'black dog & white cat'. ")
1201
+ def inference_segment_multi_object(self, inputs):
1202
+ image_path, text_prompt = inputs.split(',')
1203
+ processed_text_prompt = text_prompt.replace(' &', ',')
1204
+ print("\nOriginal Text Prompt: {text_prompt}, Input Text Prompt: {processed_text_prompt}, ")
1205
+ updated_image_path, _ = self._segment_object(image_path, text_prompt, func_name="seg-objects")
1206
+ print(f"Processed SegmentMultiObject, Input Image: {image_path}, Output Image: {updated_image_path}")
1207
+ return updated_image_path
1208
+
1209
+ @prompts(name="Auto Label the Image",
1210
+ description="useful when you want to label the image automatically. "
1211
+ "like: help me label the image. "
1212
+ "The input to this tool should be a string, representing the image_path. ")
1213
+ def inference_auto_segment_object(self, image_path):
1214
+ inputs = self.blip_processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype)
1215
+ out = self.blip_model.generate(**inputs)
1216
+ caption = self.blip_processor.decode(out[0], skip_special_tokens=True)
1217
+ text_prompt = generate_tags(caption, split=",")
1218
+ print(f"\nCaption: {caption}")
1219
+ print(f"Tags: {text_prompt}")
1220
+ updated_image_path, pred_phrases = self._segment_object(image_path, text_prompt, func_name="auto-label")
1221
+ caption = check_caption(caption, pred_phrases)
1222
+ print(f"Revise caption with number: {caption}")
1223
+ print(f"Processed SegmentMultiObject, Input Image: {image_path}, Caption: {caption}, "
1224
+ f"Text Prompt: {text_prompt}, Output Image: {updated_image_path}")
1225
+ return updated_image_path
1226
+
1227
+ def _inpainting(self, image_path, to_be_replaced_txt, replace_with_txt, func_name):
1228
+ image_pil, image = load_image(image_path)
1229
+ # run grounding dino model
1230
+ boxes_filt, scores, pred_phrases = get_grounding_output(
1231
+ self.dino_model, image, to_be_replaced_txt, 0.3, 0.25, device=self.device
1232
+ )
1233
+ # initialize SAM
1234
+ predictor = SamPredictor(self.sam_model)
1235
+ image = cv2.imread(image_path)
1236
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1237
+ predictor.set_image(image)
1238
+ size = image_pil.size
1239
+ H, W = size[1], size[0]
1240
+ for i in range(boxes_filt.size(0)):
1241
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
1242
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
1243
+ boxes_filt[i][2:] += boxes_filt[i][:2]
1244
+ boxes_filt = boxes_filt.cpu()
1245
+ # generate mask
1246
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2])
1247
+ masks, _, _ = predictor.predict_torch(
1248
+ point_coords = None,
1249
+ point_labels = None,
1250
+ boxes = transformed_boxes.to(self.device),
1251
+ multimask_output = False,
1252
+ )
1253
+ # inpainting pipeline
1254
+ mask = masks[0][0].cpu().numpy() # simply choose the first mask, which will be refine in the future release
1255
+ mask_pil = Image.fromarray(mask).resize((512, 512))
1256
+ image_pil = Image.fromarray(image).resize((512, 512))
1257
+ image = self.sd_pipe(prompt=replace_with_txt, image=image_pil, mask_image=mask_pil).images[0]
1258
+ updated_image_path = get_new_image_name(image_path, func_name)
1259
+ image.save(updated_image_path)
1260
+ return updated_image_path
1261
+
1262
+ @prompts(name="Replace Something From The Photo",
1263
+ description="useful when you want to replace an object from the object description or "
1264
+ "location with another object from its description. "
1265
+ "The input to this tool should be a comma separated string of three, "
1266
+ "representing the image_path, the object to be replaced, the object to be replaced with ")
1267
+ def inference_replace(self, inputs):
1268
+ image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
1269
+ print(f"\nReplace {to_be_replaced_txt} to {replace_with_txt}")
1270
+ updated_image_path = self._inpainting(image_path, to_be_replaced_txt, replace_with_txt, 'replace-something')
1271
+ print(f"Processed ImageEditing, Input Image: {image_path}, Output Image: {updated_image_path}")
1272
+ return updated_image_path
1273
+
1274
+ #############################################New Tool#############################################
1275
+
1276
+
1277
+ class ConversationBot:
1278
+ def __init__(self, load_dict):
1279
+ # load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...}
1280
+ print(f"Initializing VisualChatGPT, load_dict={load_dict}")
1281
+ if 'ImageCaptioning' not in load_dict and 'Grounded_dino_sam_inpainting' not in load_dict:
1282
+ raise ValueError("You have to load ImageCaptioning or Grounded_dino_sam_inpainting as a basic function for VisualChatGPT")
1283
+
1284
+ self.models = {}
1285
+ # Load Basic Foundation Models
1286
+ for class_name, device in load_dict.items():
1287
+ self.models[class_name] = globals()[class_name](device=device)
1288
+
1289
+ # Load Template Foundation Models
1290
+ for class_name, module in globals().items():
1291
+ if getattr(module, 'template_model', False):
1292
+ template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
1293
+ loaded_names = set([type(e).__name__ for e in self.models.values()])
1294
+ if template_required_names.issubset(loaded_names):
1295
+ self.models[class_name] = globals()[class_name](
1296
+ **{name: self.models[name] for name in template_required_names})
1297
+ self.tools = []
1298
+ for instance in self.models.values():
1299
+ for e in dir(instance):
1300
+ if e.startswith('inference'):
1301
+ func = getattr(instance, e)
1302
+ self.tools.append(Tool(name=func.name, description=func.description, func=func))
1303
+ self.llm = OpenAI(temperature=0)
1304
+ self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
1305
+
1306
+ def run_text(self, text, state):
1307
+ self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
1308
+ res = self.agent({"input": text.strip()})
1309
+ res['output'] = res['output'].replace("\\", "/")
1310
+ response = re.sub('(image/[-\w]*.png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output'])
1311
+ state = state + [(text, response)]
1312
+ print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n"
1313
+ f"Current Memory: {self.agent.memory.buffer}")
1314
+ return state, state
1315
+
1316
+ def run_image(self, image, state, txt, lang):
1317
+ # image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
1318
+ # print("======>Auto Resize Image...")
1319
+ # img = Image.open(image.name)
1320
+ # width, height = img.size
1321
+ # ratio = min(512 / width, 512 / height)
1322
+ # width_new, height_new = (round(width * ratio), round(height * ratio))
1323
+ # width_new = int(np.round(width_new / 64.0)) * 64
1324
+ # height_new = int(np.round(height_new / 64.0)) * 64
1325
+ # img = img.resize((width_new, height_new))
1326
+ # img = img.convert('RGB')
1327
+ # img.save(image_filename)
1328
+ # img.save(image_filename, "PNG")
1329
+ # print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
1330
+ ## Directly use original image for better results
1331
+ suffix = image.name.split('.')[-1]
1332
+ image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.{suffix}")
1333
+ shutil.copy(image.name, image_filename)
1334
+ if 'Grounded_dino_sam_inpainting' in self.models:
1335
+ description = self.models['Grounded_dino_sam_inpainting'].inference_caption(image_filename)
1336
+ else:
1337
+ description = self.models['ImageCaptioning'].inference(image_filename)
1338
+ if lang == 'Chinese':
1339
+ Human_prompt = f'\nHuman: 提供一张名为 {image_filename}的图片。它的描述是: {description}。 这些信息帮助你理解这个图像,但是你应该使用工具来完成下面的任务,而不是直接从我的描述中想象。 如果你明白了, 说 \"收到\". \n'
1340
+ AI_prompt = "收到。 "
1341
+ else:
1342
+ Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
1343
+ AI_prompt = "Received. "
1344
+ self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
1345
+ state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)]
1346
+ print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
1347
+ f"Current Memory: {self.agent.memory.buffer}")
1348
+ return state, state, f'{txt} {image_filename} '
1349
+
1350
+ def init_agent(self, openai_api_key, lang):
1351
+ self.memory.clear() #clear previous history
1352
+ if lang=='English':
1353
+ PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = VISUAL_CHATGPT_PREFIX, VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, VISUAL_CHATGPT_SUFFIX
1354
+ place = "Enter text and press enter, or upload an image"
1355
+ label_clear = "Clear"
1356
+ else:
1357
+ PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = VISUAL_CHATGPT_PREFIX_CN, VISUAL_CHATGPT_FORMAT_INSTRUCTIONS_CN, VISUAL_CHATGPT_SUFFIX_CN
1358
+ place = "输入文字并回车,或者上传图片"
1359
+ label_clear = "清除"
1360
+ self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
1361
+ self.agent = initialize_agent(
1362
+ self.tools,
1363
+ self.llm,
1364
+ agent="conversational-react-description",
1365
+ verbose=True,
1366
+ memory=self.memory,
1367
+ return_intermediate_steps=True,
1368
+ agent_kwargs={'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX}, )
1369
+ return gr.update(visible = True), gr.update(visible = True)
1370
+
1371
+
1372
+ whisper_model = whisper.load_model("base").to('cuda:0')
1373
+ def speech_recognition(speech_file):
1374
+ # whisper
1375
+ # load audio and pad/trim it to fit 30 seconds
1376
+ audio = whisper.load_audio(speech_file)
1377
+ audio = whisper.pad_or_trim(audio)
1378
+
1379
+ # make log-Mel spectrogram and move to the same device as the model
1380
+ mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
1381
+
1382
+ # detect the spoken language
1383
+ _, probs = whisper_model.detect_language(mel)
1384
+ speech_language = max(probs, key=probs.get)
1385
+ print(f'\nDetect Language: {speech_language}')
1386
+
1387
+ # decode the audio
1388
+ options = whisper.DecodingOptions(fp16 = False)
1389
+ result = whisper.decode(whisper_model, mel, options)
1390
+ print(result.text)
1391
+
1392
+ return result.text
1393
+
1394
+
1395
+ if __name__ == '__main__':
1396
+ load_dict = {'Grounded_dino_sam_inpainting': 'cuda:0'}
1397
+ # load_dict = {'ImageCaptioning': 'cuda:0'}
1398
+
1399
+ bot = ConversationBot(load_dict)
1400
+
1401
+ with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
1402
+ gr.Markdown("<h3><center>ChatBot</center></h3>")
1403
+ gr.Markdown(
1404
+ """This is a demo to the work [Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything).<br>
1405
+ """
1406
+ )
1407
+
1408
+ with gr.Row():
1409
+ lang = gr.Radio(choices=['Chinese', 'English'], value='English', label='Language')
1410
+ openai_api_key_textbox = gr.Textbox(
1411
+ placeholder="Paste your OpenAI API key here to start ChatBot(sk-...) and press Enter ↵️",
1412
+ show_label=False,
1413
+ lines=1,
1414
+ type="password",
1415
+ )
1416
+
1417
+ chatbot = gr.Chatbot(elem_id="chatbot", label="ChatBot")
1418
+ state = gr.State([])
1419
+
1420
+ with gr.Row(visible=False) as input_raws:
1421
+ with gr.Column(scale=0.7):
1422
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
1423
+ with gr.Column(scale=0.10, min_width=0):
1424
+ run = gr.Button("🏃‍♂️Run")
1425
+ with gr.Column(scale=0.10, min_width=0):
1426
+ clear = gr.Button("🔄Clear️")
1427
+ with gr.Column(scale=0.10, min_width=0):
1428
+ btn = gr.UploadButton("🖼️Upload", file_types=["image"])
1429
+ with gr.Row(visible=False, equal_height=True) as audio_raw:
1430
+ with gr.Column(scale=0.85):
1431
+ audio = gr.Audio(source="microphone", type="filepath", label="Just say it!")
1432
+ with gr.Column(scale=0.15):
1433
+ transcribe = gr.Button("Transcribe")
1434
+
1435
+ gr.Examples(
1436
+ examples=[
1437
+ "Describe this image",
1438
+ "Detect the dog",
1439
+ "Detect the dog and the cat",
1440
+ "Segment anything",
1441
+ "Segment the dog",
1442
+ "Help me label the image",
1443
+ "Replace the dog with a cat",
1444
+ ],
1445
+ inputs=txt
1446
+ )
1447
+
1448
+ openai_api_key_textbox.submit(bot.init_agent, [openai_api_key_textbox, lang], [input_raws, audio_raw])
1449
+ transcribe.click(speech_recognition, inputs=[audio], outputs=[txt])
1450
+ txt.submit(bot.run_text, [txt, state], [chatbot, state])
1451
+ txt.submit(lambda: "", None, txt)
1452
+ run.click(bot.run_text, [txt, state], [chatbot, state])
1453
+ run.click(lambda: "", None, txt)
1454
+ btn.upload(bot.run_image, [btn, state, txt, lang], [chatbot, state, txt])
1455
+ clear.click(bot.memory.clear)
1456
+ clear.click(lambda: [], None, chatbot)
1457
+ clear.click(lambda: [], None, state)
1458
+
1459
+ demo.launch(server_name="0.0.0.0", server_port=10010)
1460
+
external/Grounded-Segment-Anything/cog.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for Cog ⚙️
2
+ # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md
3
+
4
+ build:
5
+ gpu: true
6
+ cuda: "11.7"
7
+ system_packages:
8
+ - "libgl1-mesa-glx"
9
+ - "libglib2.0-0"
10
+ python_version: "3.10"
11
+ python_packages:
12
+ - "timm==0.9.2"
13
+ - "transformers==4.30.2"
14
+ - "fairscale==0.4.13"
15
+ - "pycocoevalcap==1.2"
16
+ - "torch==1.13.0"
17
+ - "torchvision==0.14.0"
18
+ - "Pillow==9.5.0"
19
+ - "scipy==1.10.1"
20
+ - "opencv-python==4.7.0.72"
21
+ - "addict==2.4.0"
22
+ - "yapf==0.40.0"
23
+ - "supervision==0.10.0"
24
+ - git+https://github.com/openai/CLIP.git
25
+ - ipython
26
+
27
+ predict: "predict.py:Predictor"
external/Grounded-Segment-Anything/gradio_app.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import cv2
4
+ from scipy import ndimage
5
+
6
+ import gradio as gr
7
+ import argparse
8
+ import litellm
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torchvision
13
+ from PIL import Image, ImageDraw, ImageFont
14
+
15
+ # Grounding DINO
16
+ import GroundingDINO.groundingdino.datasets.transforms as T
17
+ from GroundingDINO.groundingdino.models import build_model
18
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
19
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
20
+
21
+ # segment anything
22
+ from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator
23
+ import numpy as np
24
+
25
+ # diffusers
26
+ import torch
27
+ from diffusers import StableDiffusionInpaintPipeline
28
+
29
+ # BLIP
30
+ from transformers import BlipProcessor, BlipForConditionalGeneration
31
+
32
+ import openai
33
+
34
+ def show_anns(anns):
35
+ if len(anns) == 0:
36
+ return
37
+ sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
38
+ full_img = None
39
+
40
+ # for ann in sorted_anns:
41
+ for i in range(len(sorted_anns)):
42
+ ann = anns[i]
43
+ m = ann['segmentation']
44
+ if full_img is None:
45
+ full_img = np.zeros((m.shape[0], m.shape[1], 3))
46
+ map = np.zeros((m.shape[0], m.shape[1]), dtype=np.uint16)
47
+ map[m != 0] = i + 1
48
+ color_mask = np.random.random((1, 3)).tolist()[0]
49
+ full_img[m != 0] = color_mask
50
+ full_img = full_img*255
51
+ # anno encoding from https://github.com/LUSSeg/ImageNet-S
52
+ res = np.zeros((map.shape[0], map.shape[1], 3))
53
+ res[:, :, 0] = map % 256
54
+ res[:, :, 1] = map // 256
55
+ res.astype(np.float32)
56
+ full_img = Image.fromarray(np.uint8(full_img))
57
+ return full_img, res
58
+
59
+ def generate_caption(processor, blip_model, raw_image):
60
+ # unconditional image captioning
61
+ inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
62
+ out = blip_model.generate(**inputs)
63
+ caption = processor.decode(out[0], skip_special_tokens=True)
64
+ return caption
65
+
66
+ def generate_tags(caption, split=',', max_tokens=100, model="gpt-3.5-turbo", openai_api_key=''):
67
+ openai.api_key = openai_api_key
68
+ openai.api_base = 'https://closeai.deno.dev/v1'
69
+ prompt = [
70
+ {
71
+ 'role': 'system',
72
+ 'content': 'Extract the unique nouns in the caption. Remove all the adjectives. ' + \
73
+ f'List the nouns in singular form. Split them by "{split} ". ' + \
74
+ f'Caption: {caption}.'
75
+ }
76
+ ]
77
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
78
+ reply = response['choices'][0]['message']['content']
79
+ # sometimes return with "noun: xxx, xxx, xxx"
80
+ tags = reply.split(':')[-1].strip()
81
+ return tags
82
+
83
+ def transform_image(image_pil):
84
+
85
+ transform = T.Compose(
86
+ [
87
+ T.RandomResize([800], max_size=1333),
88
+ T.ToTensor(),
89
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
90
+ ]
91
+ )
92
+ image, _ = transform(image_pil, None) # 3, h, w
93
+ return image
94
+
95
+
96
+ def load_model(model_config_path, model_checkpoint_path, device):
97
+ args = SLConfig.fromfile(model_config_path)
98
+ args.device = device
99
+ model = build_model(args)
100
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
101
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
102
+ print(load_res)
103
+ _ = model.eval()
104
+ return model
105
+
106
+
107
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True):
108
+ caption = caption.lower()
109
+ caption = caption.strip()
110
+ if not caption.endswith("."):
111
+ caption = caption + "."
112
+
113
+ with torch.no_grad():
114
+ outputs = model(image[None], captions=[caption])
115
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
116
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
117
+ logits.shape[0]
118
+
119
+ # filter output
120
+ logits_filt = logits.clone()
121
+ boxes_filt = boxes.clone()
122
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
123
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
124
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
125
+ logits_filt.shape[0]
126
+
127
+ # get phrase
128
+ tokenlizer = model.tokenizer
129
+ tokenized = tokenlizer(caption)
130
+ # build pred
131
+ pred_phrases = []
132
+ scores = []
133
+ for logit, box in zip(logits_filt, boxes_filt):
134
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
135
+ if with_logits:
136
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
137
+ else:
138
+ pred_phrases.append(pred_phrase)
139
+ scores.append(logit.max().item())
140
+
141
+ return boxes_filt, torch.Tensor(scores), pred_phrases
142
+
143
+ def draw_mask(mask, draw, random_color=False):
144
+ if random_color:
145
+ color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 153)
146
+ else:
147
+ color = (30, 144, 255, 153)
148
+
149
+ nonzero_coords = np.transpose(np.nonzero(mask))
150
+
151
+ for coord in nonzero_coords:
152
+ draw.point(coord[::-1], fill=color)
153
+
154
+ def draw_box(box, draw, label):
155
+ # random color
156
+ color = tuple(np.random.randint(0, 255, size=3).tolist())
157
+
158
+ draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline=color, width=2)
159
+
160
+ if label:
161
+ font = ImageFont.load_default()
162
+ if hasattr(font, "getbbox"):
163
+ bbox = draw.textbbox((box[0], box[1]), str(label), font)
164
+ else:
165
+ w, h = draw.textsize(str(label), font)
166
+ bbox = (box[0], box[1], w + box[0], box[1] + h)
167
+ draw.rectangle(bbox, fill=color)
168
+ draw.text((box[0], box[1]), str(label), fill="white")
169
+
170
+ draw.text((box[0], box[1]), label)
171
+
172
+
173
+
174
+ config_file = 'GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py'
175
+ ckpt_repo_id = "ShilongLiu/GroundingDINO"
176
+ ckpt_filenmae = "groundingdino_swint_ogc.pth"
177
+ sam_checkpoint='sam_vit_h_4b8939.pth'
178
+ output_dir="outputs"
179
+ device="cuda"
180
+
181
+
182
+ blip_processor = None
183
+ blip_model = None
184
+ groundingdino_model = None
185
+ sam_predictor = None
186
+ sam_automask_generator = None
187
+ inpaint_pipeline = None
188
+
189
+ def run_grounded_sam(input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold, iou_threshold, inpaint_mode, scribble_mode, openai_api_key):
190
+
191
+ global blip_processor, blip_model, groundingdino_model, sam_predictor, sam_automask_generator, inpaint_pipeline
192
+
193
+ # make dir
194
+ os.makedirs(output_dir, exist_ok=True)
195
+ # load image
196
+ image = input_image["image"]
197
+ scribble = input_image["mask"]
198
+ size = image.size # w, h
199
+
200
+ if sam_predictor is None:
201
+ # initialize SAM
202
+ assert sam_checkpoint, 'sam_checkpoint is not found!'
203
+ sam = build_sam(checkpoint=sam_checkpoint)
204
+ sam.to(device=device)
205
+ sam_predictor = SamPredictor(sam)
206
+ sam_automask_generator = SamAutomaticMaskGenerator(sam)
207
+
208
+ if groundingdino_model is None:
209
+ groundingdino_model = load_model(config_file, ckpt_filenmae, device=device)
210
+
211
+ image_pil = image.convert("RGB")
212
+ image = np.array(image_pil)
213
+
214
+ if task_type == 'scribble':
215
+ sam_predictor.set_image(image)
216
+ scribble = scribble.convert("RGB")
217
+ scribble = np.array(scribble)
218
+ scribble = scribble.transpose(2, 1, 0)[0]
219
+
220
+ # 将连通域进行标记
221
+ labeled_array, num_features = ndimage.label(scribble >= 255)
222
+
223
+ # 计算每个连通域的质心
224
+ centers = ndimage.center_of_mass(scribble, labeled_array, range(1, num_features+1))
225
+ centers = np.array(centers)
226
+
227
+ point_coords = torch.from_numpy(centers)
228
+ point_coords = sam_predictor.transform.apply_coords_torch(point_coords, image.shape[:2])
229
+ point_coords = point_coords.unsqueeze(0).to(device)
230
+ point_labels = torch.from_numpy(np.array([1] * len(centers))).unsqueeze(0).to(device)
231
+ if scribble_mode == 'split':
232
+ point_coords = point_coords.permute(1, 0, 2)
233
+ point_labels = point_labels.permute(1, 0)
234
+ masks, _, _ = sam_predictor.predict_torch(
235
+ point_coords=point_coords if len(point_coords) > 0 else None,
236
+ point_labels=point_labels if len(point_coords) > 0 else None,
237
+ mask_input = None,
238
+ boxes = None,
239
+ multimask_output = False,
240
+ )
241
+ elif task_type == 'automask':
242
+ masks = sam_automask_generator.generate(image)
243
+ else:
244
+ transformed_image = transform_image(image_pil)
245
+
246
+ if task_type == 'automatic':
247
+ # generate caption and tags
248
+ # use Tag2Text can generate better captions
249
+ # https://huggingface.co/spaces/xinyu1205/Tag2Text
250
+ # but there are some bugs...
251
+ blip_processor = blip_processor or BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
252
+ blip_model = blip_model or BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", torch_dtype=torch.float16).to("cuda")
253
+ text_prompt = generate_caption(blip_processor, blip_model, image_pil)
254
+ if len(openai_api_key) > 0:
255
+ text_prompt = generate_tags(text_prompt, split=",", openai_api_key=openai_api_key)
256
+ print(f"Caption: {text_prompt}")
257
+
258
+ # run grounding dino model
259
+ boxes_filt, scores, pred_phrases = get_grounding_output(
260
+ groundingdino_model, transformed_image, text_prompt, box_threshold, text_threshold
261
+ )
262
+
263
+ # process boxes
264
+ H, W = size[1], size[0]
265
+ for i in range(boxes_filt.size(0)):
266
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
267
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
268
+ boxes_filt[i][2:] += boxes_filt[i][:2]
269
+
270
+ boxes_filt = boxes_filt.cpu()
271
+
272
+
273
+ if task_type == 'seg' or task_type == 'inpainting' or task_type == 'automatic':
274
+ sam_predictor.set_image(image)
275
+
276
+ if task_type == 'automatic':
277
+ # use NMS to handle overlapped boxes
278
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
279
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
280
+ boxes_filt = boxes_filt[nms_idx]
281
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
282
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
283
+ print(f"Revise caption with number: {text_prompt}")
284
+
285
+ transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
286
+
287
+ masks, _, _ = sam_predictor.predict_torch(
288
+ point_coords = None,
289
+ point_labels = None,
290
+ boxes = transformed_boxes,
291
+ multimask_output = False,
292
+ )
293
+
294
+ if task_type == 'det':
295
+ image_draw = ImageDraw.Draw(image_pil)
296
+ for box, label in zip(boxes_filt, pred_phrases):
297
+ draw_box(box, image_draw, label)
298
+
299
+ return [image_pil]
300
+ elif task_type == 'automask':
301
+ full_img, res = show_anns(masks)
302
+ return [full_img]
303
+ elif task_type == 'scribble':
304
+ mask_image = Image.new('RGBA', size, color=(0, 0, 0, 0))
305
+
306
+ mask_draw = ImageDraw.Draw(mask_image)
307
+
308
+ for mask in masks:
309
+ draw_mask(mask[0].cpu().numpy(), mask_draw, random_color=True)
310
+
311
+ image_pil = image_pil.convert('RGBA')
312
+ image_pil.alpha_composite(mask_image)
313
+ return [image_pil, mask_image]
314
+ elif task_type == 'seg' or task_type == 'automatic':
315
+
316
+ mask_image = Image.new('RGBA', size, color=(0, 0, 0, 0))
317
+
318
+ mask_draw = ImageDraw.Draw(mask_image)
319
+ for mask in masks:
320
+ draw_mask(mask[0].cpu().numpy(), mask_draw, random_color=True)
321
+
322
+ image_draw = ImageDraw.Draw(image_pil)
323
+
324
+ for box, label in zip(boxes_filt, pred_phrases):
325
+ draw_box(box, image_draw, label)
326
+
327
+ if task_type == 'automatic':
328
+ image_draw.text((10, 10), text_prompt, fill='black')
329
+
330
+ image_pil = image_pil.convert('RGBA')
331
+ image_pil.alpha_composite(mask_image)
332
+ return [image_pil, mask_image]
333
+ elif task_type == 'inpainting':
334
+ assert inpaint_prompt, 'inpaint_prompt is not found!'
335
+ # inpainting pipeline
336
+ if inpaint_mode == 'merge':
337
+ masks = torch.sum(masks, dim=0).unsqueeze(0)
338
+ masks = torch.where(masks > 0, True, False)
339
+ mask = masks[0][0].cpu().numpy() # simply choose the first mask, which will be refine in the future release
340
+ mask_pil = Image.fromarray(mask)
341
+
342
+ if inpaint_pipeline is None:
343
+ inpaint_pipeline = StableDiffusionInpaintPipeline.from_pretrained(
344
+ "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16
345
+ )
346
+ inpaint_pipeline = inpaint_pipeline.to("cuda")
347
+
348
+ image = inpaint_pipeline(prompt=inpaint_prompt, image=image_pil.resize((512, 512)), mask_image=mask_pil.resize((512, 512))).images[0]
349
+ image = image.resize(size)
350
+
351
+ return [image, mask_pil]
352
+ else:
353
+ print("task_type:{} error!".format(task_type))
354
+
355
+ if __name__ == "__main__":
356
+ parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
357
+ parser.add_argument("--debug", action="store_true", help="using debug mode")
358
+ parser.add_argument("--share", action="store_true", help="share the app")
359
+ parser.add_argument('--port', type=int, default=7589, help='port to run the server')
360
+ parser.add_argument('--no-gradio-queue', action="store_true", help='path to the SAM checkpoint')
361
+ args = parser.parse_args()
362
+
363
+ print(args)
364
+
365
+ block = gr.Blocks()
366
+ if not args.no_gradio_queue:
367
+ block = block.queue()
368
+
369
+ with block:
370
+ with gr.Row():
371
+ with gr.Column():
372
+ input_image = gr.Image(source='upload', type="pil", value="assets/demo1.jpg", tool="sketch")
373
+ task_type = gr.Dropdown(["scribble", "automask", "det", "seg", "inpainting", "automatic"], value="automatic", label="task_type")
374
+ text_prompt = gr.Textbox(label="Text Prompt")
375
+ inpaint_prompt = gr.Textbox(label="Inpaint Prompt")
376
+ run_button = gr.Button(label="Run")
377
+ with gr.Accordion("Advanced options", open=False):
378
+ box_threshold = gr.Slider(
379
+ label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.05
380
+ )
381
+ text_threshold = gr.Slider(
382
+ label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.05
383
+ )
384
+ iou_threshold = gr.Slider(
385
+ label="IOU Threshold", minimum=0.0, maximum=1.0, value=0.5, step=0.05
386
+ )
387
+ inpaint_mode = gr.Dropdown(["merge", "first"], value="merge", label="inpaint_mode")
388
+ scribble_mode = gr.Dropdown(["merge", "split"], value="split", label="scribble_mode")
389
+ openai_api_key= gr.Textbox(label="(Optional)OpenAI key, enable chatgpt")
390
+
391
+ with gr.Column():
392
+ gallery = gr.Gallery(
393
+ label="Generated images", show_label=False, elem_id="gallery"
394
+ ).style(preview=True, grid=2, object_fit="scale-down")
395
+
396
+ run_button.click(fn=run_grounded_sam, inputs=[
397
+ input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold, iou_threshold, inpaint_mode, scribble_mode, openai_api_key], outputs=gallery)
398
+
399
+ block.queue(concurrency_count=100)
400
+ block.launch(server_name='0.0.0.0', server_port=args.port, debug=args.debug, share=args.share)
external/Grounded-Segment-Anything/grounded_sam.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
external/Grounded-Segment-Anything/grounded_sam_inpainting_demo.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import copy
4
+
5
+ import numpy as np
6
+ import torch
7
+ from PIL import Image, ImageDraw, ImageFont
8
+
9
+ # Grounding DINO
10
+ import GroundingDINO.groundingdino.datasets.transforms as T
11
+ from GroundingDINO.groundingdino.models import build_model
12
+ from GroundingDINO.groundingdino.util import box_ops
13
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
14
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
15
+
16
+ # segment anything
17
+ from segment_anything import build_sam, SamPredictor
18
+ import cv2
19
+ import numpy as np
20
+ import matplotlib.pyplot as plt
21
+
22
+
23
+ # diffusers
24
+ import PIL
25
+ import requests
26
+ import torch
27
+ from io import BytesIO
28
+ from diffusers import StableDiffusionInpaintPipeline
29
+
30
+
31
+ def load_image(image_path):
32
+ # load image
33
+ image_pil = Image.open(image_path).convert("RGB") # load image
34
+
35
+ transform = T.Compose(
36
+ [
37
+ T.RandomResize([800], max_size=1333),
38
+ T.ToTensor(),
39
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
40
+ ]
41
+ )
42
+ image, _ = transform(image_pil, None) # 3, h, w
43
+ return image_pil, image
44
+
45
+
46
+ def load_model(model_config_path, model_checkpoint_path, device):
47
+ args = SLConfig.fromfile(model_config_path)
48
+ args.device = device
49
+ model = build_model(args)
50
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
51
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
52
+ print(load_res)
53
+ _ = model.eval()
54
+ return model
55
+
56
+
57
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True, device="cpu"):
58
+ caption = caption.lower()
59
+ caption = caption.strip()
60
+ if not caption.endswith("."):
61
+ caption = caption + "."
62
+ model = model.to(device)
63
+ image = image.to(device)
64
+ with torch.no_grad():
65
+ outputs = model(image[None], captions=[caption])
66
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
67
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
68
+ logits.shape[0]
69
+
70
+ # filter output
71
+ logits_filt = logits.clone()
72
+ boxes_filt = boxes.clone()
73
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
74
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
75
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
76
+ logits_filt.shape[0]
77
+
78
+ # get phrase
79
+ tokenlizer = model.tokenizer
80
+ tokenized = tokenlizer(caption)
81
+ # build pred
82
+ pred_phrases = []
83
+ for logit, box in zip(logits_filt, boxes_filt):
84
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
85
+ if with_logits:
86
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
87
+ else:
88
+ pred_phrases.append(pred_phrase)
89
+
90
+ return boxes_filt, pred_phrases
91
+
92
+ def show_mask(mask, ax, random_color=False):
93
+ if random_color:
94
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
95
+ else:
96
+ color = np.array([30/255, 144/255, 255/255, 0.6])
97
+ h, w = mask.shape[-2:]
98
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
99
+ ax.imshow(mask_image)
100
+
101
+
102
+ def show_box(box, ax, label):
103
+ x0, y0 = box[0], box[1]
104
+ w, h = box[2] - box[0], box[3] - box[1]
105
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
106
+ ax.text(x0, y0, label)
107
+
108
+
109
+ if __name__ == "__main__":
110
+
111
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
112
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
113
+ parser.add_argument(
114
+ "--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
115
+ )
116
+ parser.add_argument(
117
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
118
+ )
119
+ parser.add_argument("--input_image", type=str, required=True, help="path to image file")
120
+ parser.add_argument("--det_prompt", type=str, required=True, help="text prompt")
121
+ parser.add_argument("--inpaint_prompt", type=str, required=True, help="inpaint prompt")
122
+ parser.add_argument(
123
+ "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
124
+ )
125
+ parser.add_argument("--cache_dir", type=str, default=None, help="save your huggingface large model cache")
126
+ parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
127
+ parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
128
+ parser.add_argument("--inpaint_mode", type=str, default="first", help="inpaint mode")
129
+ parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
130
+ args = parser.parse_args()
131
+
132
+ # cfg
133
+ config_file = args.config # change the path of the model config file
134
+ grounded_checkpoint = args.grounded_checkpoint # change the path of the model
135
+ sam_checkpoint = args.sam_checkpoint
136
+ image_path = args.input_image
137
+ det_prompt = args.det_prompt
138
+ inpaint_prompt = args.inpaint_prompt
139
+ output_dir = args.output_dir
140
+ cache_dir=args.cache_dir
141
+ box_threshold = args.box_threshold
142
+ text_threshold = args.text_threshold
143
+ inpaint_mode = args.inpaint_mode
144
+ device = args.device
145
+
146
+ # make dir
147
+ os.makedirs(output_dir, exist_ok=True)
148
+ # load image
149
+ image_pil, image = load_image(image_path)
150
+ # load model
151
+ model = load_model(config_file, grounded_checkpoint, device=device)
152
+
153
+ # visualize raw image
154
+ image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
155
+
156
+ # run grounding dino model
157
+ boxes_filt, pred_phrases = get_grounding_output(
158
+ model, image, det_prompt, box_threshold, text_threshold, device=device
159
+ )
160
+
161
+ # initialize SAM
162
+ predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))
163
+ image = cv2.imread(image_path)
164
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
165
+ predictor.set_image(image)
166
+
167
+ size = image_pil.size
168
+ H, W = size[1], size[0]
169
+ for i in range(boxes_filt.size(0)):
170
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
171
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
172
+ boxes_filt[i][2:] += boxes_filt[i][:2]
173
+
174
+ boxes_filt = boxes_filt.cpu()
175
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
176
+
177
+ masks, _, _ = predictor.predict_torch(
178
+ point_coords = None,
179
+ point_labels = None,
180
+ boxes = transformed_boxes.to(device),
181
+ multimask_output = False,
182
+ )
183
+
184
+ # masks: [1, 1, 512, 512]
185
+
186
+ # draw output image
187
+ plt.figure(figsize=(10, 10))
188
+ plt.imshow(image)
189
+ for mask in masks:
190
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
191
+ for box, label in zip(boxes_filt, pred_phrases):
192
+ show_box(box.numpy(), plt.gca(), label)
193
+ plt.axis('off')
194
+ plt.savefig(os.path.join(output_dir, "grounded_sam_output.jpg"), bbox_inches="tight")
195
+
196
+ # inpainting pipeline
197
+ if inpaint_mode == 'merge':
198
+ masks = torch.sum(masks, dim=0).unsqueeze(0)
199
+ masks = torch.where(masks > 0, True, False)
200
+ mask = masks[0][0].cpu().numpy() # simply choose the first mask, which will be refine in the future release
201
+ mask_pil = Image.fromarray(mask)
202
+ image_pil = Image.fromarray(image)
203
+
204
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
205
+ "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16,cache_dir=cache_dir
206
+ )
207
+ pipe = pipe.to("cuda")
208
+
209
+ image_pil = image_pil.resize((512, 512))
210
+ mask_pil = mask_pil.resize((512, 512))
211
+ # prompt = "A sofa, high quality, detailed"
212
+ image = pipe(prompt=inpaint_prompt, image=image_pil, mask_image=mask_pil).images[0]
213
+ image = image.resize(size)
214
+ image.save(os.path.join(output_dir, "grounded_sam_inpainting_output.jpg"))
215
+
216
+
external/Grounded-Segment-Anything/grounded_sam_multi_gpu_demo.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import time
5
+ import torch
6
+ import numpy as np
7
+ import json
8
+ from PIL import Image
9
+ from concurrent.futures import ThreadPoolExecutor
10
+
11
+ sys.path.append(os.path.join(os.getcwd(), "GroundingDINO"))
12
+ sys.path.append(os.path.join(os.getcwd(), "segment_anything"))
13
+
14
+ # Grounding DINO imports
15
+ import GroundingDINO.groundingdino.datasets.transforms as T
16
+ from GroundingDINO.groundingdino.models import build_model
17
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
18
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
19
+
20
+ # Segment Anything imports
21
+ from segment_anything import sam_model_registry, sam_hq_model_registry, SamPredictor
22
+ import cv2
23
+ import matplotlib.pyplot as plt
24
+
25
+
26
+ def load_image(image_path):
27
+ image_pil = Image.open(image_path).convert("RGB")
28
+ transform = T.Compose([
29
+ T.RandomResize([800], max_size=1333),
30
+ T.ToTensor(),
31
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
32
+ ])
33
+ image, _ = transform(image_pil, None)
34
+ return image_pil, image
35
+
36
+
37
+ def load_model(model_config_path, model_checkpoint_path, device):
38
+ print("Loading model from...........", device)
39
+ args = SLConfig.fromfile(model_config_path)
40
+ args.device = device
41
+ model = build_model(args)
42
+
43
+ # Load the model checkpoint onto the specific GPU
44
+ checkpoint = torch.load(model_checkpoint_path, map_location=device)
45
+ model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
46
+ model.eval()
47
+ model.to(device)
48
+
49
+ return model
50
+
51
+
52
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold, device="cpu"):
53
+ caption = caption.lower().strip()
54
+ if not caption.endswith("."):
55
+ caption += "."
56
+ model.to(device)
57
+ image = image.to(device)
58
+ with torch.no_grad():
59
+ outputs = model(image[None], captions=[caption])
60
+ logits = outputs["pred_logits"].sigmoid()[0] # Keep it on the device
61
+ boxes = outputs["pred_boxes"][0] # Keep it on the device
62
+
63
+ filt_mask = logits.max(dim=1)[0] > box_threshold
64
+ logits_filt = logits[filt_mask]
65
+ boxes_filt = boxes[filt_mask]
66
+
67
+ tokenlizer = model.tokenizer
68
+ tokenized = tokenlizer(caption)
69
+ pred_phrases = []
70
+ for logit, box in zip(logits_filt, boxes_filt):
71
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
72
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
73
+
74
+ return boxes_filt, pred_phrases
75
+
76
+
77
+ def process_image(image_path, model, predictor, output_dir, text_prompt, box_threshold, text_threshold, device):
78
+
79
+ # Load the image and move to GPU
80
+ image_pil, image = load_image(image_path)
81
+ # image_pil.save(os.path.join(output_dir, f"raw_image_{os.path.basename(image_path)}.jpg"))
82
+ # Run GroundingDINO model to get bounding boxes and labels
83
+ boxes_filt, pred_phrases = get_grounding_output(
84
+ model, image, text_prompt, box_threshold, text_threshold, device=device
85
+ )
86
+
87
+ # Load SAM model onto GPU
88
+ image_cv = cv2.imread(image_path)
89
+ image_cv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
90
+ predictor.set_image(image_cv)
91
+
92
+ # Convert boxes to original image size
93
+ size = image_pil.size
94
+ H, W = size[1], size[0]
95
+ for i in range(boxes_filt.size(0)):
96
+ boxes_filt[i] = boxes_filt[i] * torch.tensor([W, H, W, H], device=device)
97
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
98
+ boxes_filt[i][2:] += boxes_filt[i][:2]
99
+
100
+ # Transform boxes to be compatible with SAM
101
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image_cv.shape[:2]).to(device)
102
+
103
+ # Get masks using SAM
104
+ masks, _, _ = predictor.predict_torch(
105
+ point_coords=None,
106
+ point_labels=None,
107
+ boxes=transformed_boxes.to(device),
108
+ multimask_output=False,
109
+ )
110
+
111
+ # Visualization and saving
112
+ plt.figure(figsize=(10, 10))
113
+ plt.imshow(image_cv)
114
+ # for mask in masks:
115
+ # show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
116
+ for box, label in zip(boxes_filt, pred_phrases):
117
+ show_box(box.cpu().numpy(), plt.gca(), label)
118
+ image_base_name = os.path.basename(image_path).split('.')[0]
119
+ plt.axis('off')
120
+ plt.savefig(
121
+ os.path.join(output_dir, f"grounded_sam_output_{image_base_name}.jpg"),
122
+ bbox_inches="tight", dpi=300, pad_inches=0.0
123
+ )
124
+ plt.close()
125
+
126
+ save_mask_data(output_dir, masks, boxes_filt, pred_phrases, image_base_name)
127
+ # Clear GPU memory
128
+ del image, transformed_boxes, masks # model, sam
129
+ # torch.cuda.empty_cache()
130
+
131
+
132
+ def show_mask(mask, ax, random_color=False):
133
+ if random_color:
134
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
135
+ else:
136
+ color = np.array([30/255, 144/255, 255/255, 0.6])
137
+ h, w = mask.shape[-2:]
138
+ # print("mask.shape:", mask.shape)
139
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
140
+ ax.imshow(mask_image)
141
+
142
+
143
+ def show_box(box, ax, label):
144
+ x0, y0 = box[0], box[1]
145
+ w, h = box[2] - box[0], box[3] - box[1]
146
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))
147
+ ax.text(x0, y0, label)
148
+
149
+
150
+ def save_mask_data(output_dir, mask_list, box_list, label_list, image_base_name=''):
151
+ value = 0 # 0 for background
152
+
153
+ mask_img = torch.zeros(mask_list.shape[-2:], device=mask_list.device)
154
+ for idx, mask in enumerate(mask_list):
155
+ mask_img[mask[0] == True] = value + idx + 1
156
+ plt.figure(figsize=(10, 10))
157
+ plt.imshow(mask_img.cpu().numpy())
158
+ plt.axis('off')
159
+ plt.savefig(os.path.join(output_dir, f'{image_base_name}.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)
160
+ plt.close()
161
+ json_data = [{
162
+ 'value': value,
163
+ 'label': 'background'
164
+ }]
165
+ for label, box in zip(label_list, box_list):
166
+ value += 1
167
+ name, logit = label.split('(')
168
+ logit = logit[:-1] # the last is ')'
169
+ json_data.append({
170
+ 'value': value,
171
+ 'label': name,
172
+ 'logit': float(logit),
173
+ 'box': box.cpu().numpy().tolist(),
174
+ })
175
+ with open(os.path.join(output_dir, f'{image_base_name}.json'), 'w') as f:
176
+ json.dump(json_data, f)
177
+
178
+
179
+ if __name__ == "__main__":
180
+
181
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
182
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
183
+ parser.add_argument("--grounded_checkpoint", type=str, required=True, help="path to checkpoint file")
184
+ parser.add_argument("--sam_version", type=str, default="vit_h", required=False, help="SAM ViT version: vit_b / vit_l / vit_h")
185
+ parser.add_argument("--sam_checkpoint", type=str, required=False, help="path to sam checkpoint file")
186
+ parser.add_argument("--sam_hq_checkpoint", type=str, default=None, help="path to sam-hq checkpoint file")
187
+ parser.add_argument("--use_sam_hq", action="store_true", help="using sam-hq for prediction")
188
+ parser.add_argument("--input_path", type=str, required=True, help="path to directory containing image files")
189
+ parser.add_argument("--text_prompt", type=str, required=True, help="text prompt")
190
+ parser.add_argument("--output_dir", "-o", type=str, default="outputs", required=True, help="output directory")
191
+ parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
192
+ parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
193
+ parser.add_argument("--device", type=str, default="cuda", help="device to run the inference on, e.g., 'cuda' or 'cuda:0'")
194
+ args = parser.parse_args()
195
+
196
+ torch.backends.cudnn.enabled = False
197
+ torch.backends.cudnn.benchmark = True
198
+
199
+ start_time = time.time()
200
+ # Determine if we are using a single GPU or all available GPUs
201
+ if args.device == "cuda":
202
+ if torch.cuda.device_count() > 1:
203
+ device_list = [torch.device(f"cuda:{i}") for i in range(torch.cuda.device_count())] # Use all GPUs
204
+ else:
205
+ device_list = [torch.device("cuda:0")] # Default to first GPU
206
+ else:
207
+ device_list = [torch.device(args.device)]
208
+ print("device_list:", device_list)
209
+
210
+ # Get list of images
211
+ image_paths = [os.path.join(args.input_path, img) for img in os.listdir(args.input_path) if img.endswith(('.png', '.jpg', '.jpeg'))]
212
+
213
+ # Split images among available GPUs
214
+ image_batches = np.array_split(image_paths, len(device_list))
215
+ print("Processing images:", image_batches)
216
+ # Function to process a batch of images on the specified device
217
+ def process_batch(batch_images, model_config, model_checkpoint, sam_version, sam_checkpoint, sam_hq_checkpoint, use_sam_hq, device, output_dir):
218
+ # Load model onto GPU
219
+ torch.cuda.set_device(device)
220
+ model = load_model(model_config, model_checkpoint, device)
221
+
222
+ # Load SAM model onto GPU
223
+ if use_sam_hq:
224
+ sam = sam_hq_model_registry[sam_version](checkpoint=sam_hq_checkpoint).to(device)
225
+ else:
226
+ sam = sam_model_registry[sam_version](checkpoint=sam_checkpoint).to(device)
227
+ # Move model to the correct device
228
+ device = torch.device(device)
229
+ model.to(device)
230
+ sam.to(device)
231
+ predictor = SamPredictor(sam)
232
+ for image_path in batch_images:
233
+ # Process each image
234
+ print("Processing image:", image_path)
235
+ process_image(
236
+ image_path=image_path,
237
+ model=model,
238
+ predictor=predictor,
239
+ output_dir=output_dir,
240
+ text_prompt=args.text_prompt,
241
+ box_threshold=args.box_threshold,
242
+ text_threshold=args.text_threshold,
243
+ device=device
244
+ )
245
+ print("Image processing complete {}".format(image_path))
246
+ # Clear GPU memory after processing the batch
247
+ # del model, sam
248
+ torch.cuda.empty_cache()
249
+
250
+ # Use ThreadPoolExecutor to parallelize the processing across GPUs
251
+ with ThreadPoolExecutor(max_workers=len(device_list)*2) as executor:
252
+ futures = []
253
+ for i, device in enumerate(device_list):
254
+ print(f"Processing images on device {device}")
255
+ print("Image batches for each GPU:", len(image_batches[i]))
256
+ futures.append(executor.submit(
257
+ process_batch, image_batches[i], args.config, args.grounded_checkpoint, args.sam_version, args.sam_checkpoint, args.sam_hq_checkpoint, args.use_sam_hq, device, args.output_dir
258
+ ))
259
+
260
+ # Wait for all threads to complete
261
+ for future in futures:
262
+ future.result()
263
+
264
+ print("Processing complete. Results saved to the output directory.")
265
+ print(f"Total time taken: {time.time() - start_time:.2f} seconds")
external/Grounded-Segment-Anything/grounded_sam_simple_demo.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import supervision as sv
4
+
5
+ import torch
6
+ import torchvision
7
+
8
+ from groundingdino.util.inference import Model
9
+ from segment_anything import sam_model_registry, SamPredictor
10
+
11
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
+
13
+ # GroundingDINO config and checkpoint
14
+ GROUNDING_DINO_CONFIG_PATH = "GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py"
15
+ GROUNDING_DINO_CHECKPOINT_PATH = "./groundingdino_swint_ogc.pth"
16
+
17
+ # Segment-Anything checkpoint
18
+ SAM_ENCODER_VERSION = "vit_h"
19
+ SAM_CHECKPOINT_PATH = "./sam_vit_h_4b8939.pth"
20
+
21
+ # Building GroundingDINO inference model
22
+ grounding_dino_model = Model(model_config_path=GROUNDING_DINO_CONFIG_PATH, model_checkpoint_path=GROUNDING_DINO_CHECKPOINT_PATH)
23
+
24
+ # Building SAM Model and SAM Predictor
25
+ sam = sam_model_registry[SAM_ENCODER_VERSION](checkpoint=SAM_CHECKPOINT_PATH)
26
+ sam.to(device=DEVICE)
27
+ sam_predictor = SamPredictor(sam)
28
+
29
+
30
+ # Predict classes and hyper-param for GroundingDINO
31
+ SOURCE_IMAGE_PATH = "./assets/demo2.jpg"
32
+ CLASSES = ["The running dog"]
33
+ BOX_THRESHOLD = 0.25
34
+ TEXT_THRESHOLD = 0.25
35
+ NMS_THRESHOLD = 0.8
36
+
37
+
38
+ # load image
39
+ image = cv2.imread(SOURCE_IMAGE_PATH)
40
+
41
+ # detect objects
42
+ detections = grounding_dino_model.predict_with_classes(
43
+ image=image,
44
+ classes=CLASSES,
45
+ box_threshold=BOX_THRESHOLD,
46
+ text_threshold=TEXT_THRESHOLD
47
+ )
48
+
49
+ # annotate image with detections
50
+ box_annotator = sv.BoxAnnotator()
51
+ labels = [
52
+ f"{CLASSES[class_id]} {confidence:0.2f}"
53
+ for _, _, confidence, class_id, _, _
54
+ in detections]
55
+ annotated_frame = box_annotator.annotate(scene=image.copy(), detections=detections, labels=labels)
56
+
57
+ # save the annotated grounding dino image
58
+ cv2.imwrite("groundingdino_annotated_image.jpg", annotated_frame)
59
+
60
+
61
+ # NMS post process
62
+ print(f"Before NMS: {len(detections.xyxy)} boxes")
63
+ nms_idx = torchvision.ops.nms(
64
+ torch.from_numpy(detections.xyxy),
65
+ torch.from_numpy(detections.confidence),
66
+ NMS_THRESHOLD
67
+ ).numpy().tolist()
68
+
69
+ detections.xyxy = detections.xyxy[nms_idx]
70
+ detections.confidence = detections.confidence[nms_idx]
71
+ detections.class_id = detections.class_id[nms_idx]
72
+
73
+ print(f"After NMS: {len(detections.xyxy)} boxes")
74
+
75
+ # Prompting SAM with detected boxes
76
+ def segment(sam_predictor: SamPredictor, image: np.ndarray, xyxy: np.ndarray) -> np.ndarray:
77
+ sam_predictor.set_image(image)
78
+ result_masks = []
79
+ for box in xyxy:
80
+ masks, scores, logits = sam_predictor.predict(
81
+ box=box,
82
+ multimask_output=True
83
+ )
84
+ index = np.argmax(scores)
85
+ result_masks.append(masks[index])
86
+ return np.array(result_masks)
87
+
88
+
89
+ # convert detections to masks
90
+ detections.mask = segment(
91
+ sam_predictor=sam_predictor,
92
+ image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB),
93
+ xyxy=detections.xyxy
94
+ )
95
+
96
+ # annotate image with detections
97
+ box_annotator = sv.BoxAnnotator()
98
+ mask_annotator = sv.MaskAnnotator()
99
+ labels = [
100
+ f"{CLASSES[class_id]} {confidence:0.2f}"
101
+ for _, _, confidence, class_id, _, _
102
+ in detections]
103
+ annotated_image = mask_annotator.annotate(scene=image.copy(), detections=detections)
104
+ annotated_image = box_annotator.annotate(scene=annotated_image, detections=detections, labels=labels)
105
+
106
+ # save the annotated grounded-sam image
107
+ cv2.imwrite("grounded_sam_annotated_image.jpg", annotated_image)
external/Grounded-Segment-Anything/grounded_sam_visam.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from copy import deepcopy
3
+ import json
4
+
5
+ import os
6
+ import argparse
7
+ import torchvision.transforms.functional as F
8
+ import torch
9
+ import cv2
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+ from pathlib import Path
13
+ import sys
14
+ sys.path.append('VISAM')
15
+ from main import get_args_parser
16
+ from models import build_model
17
+ from util.tool import load_model
18
+ from models.structures import Instances
19
+
20
+ from torch.utils.data import Dataset, DataLoader
21
+
22
+
23
+ # segment anything
24
+ sys.path.append('segment_anything')
25
+ from segment_anything import build_sam, SamPredictor
26
+
27
+
28
+ class Colors:
29
+ # Ultralytics color palette https://ultralytics.com/
30
+ def __init__(self):
31
+ # hex = matplotlib.colors.TABLEAU_COLORS.values()
32
+ hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
33
+ '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
34
+ self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
35
+ self.n = len(self.palette)
36
+
37
+ def __call__(self, i, bgr=False):
38
+ c = self.palette[int(i) % self.n]
39
+ return (c[2], c[1], c[0]) if bgr else c
40
+
41
+ @staticmethod
42
+ def hex2rgb(h): # rgb order (PIL)
43
+ return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
44
+
45
+
46
+ colors = Colors() # create instance for 'from utils.plots import colors'
47
+
48
+
49
+ class ListImgDataset(Dataset):
50
+ def __init__(self, mot_path, img_list, det_db) -> None:
51
+ super().__init__()
52
+ self.mot_path = mot_path
53
+ self.img_list = img_list
54
+ self.det_db = det_db
55
+
56
+ '''
57
+ common settings
58
+ '''
59
+ self.img_height = 800
60
+ self.img_width = 1536
61
+ self.mean = [0.485, 0.456, 0.406]
62
+ self.std = [0.229, 0.224, 0.225]
63
+
64
+ def load_img_from_file(self, f_path):
65
+ cur_img = cv2.imread(os.path.join(self.mot_path, f_path))
66
+ assert cur_img is not None, f_path
67
+ cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
68
+ proposals = []
69
+ im_h, im_w = cur_img.shape[:2]
70
+ for line in self.det_db[f_path[:-4] + '.txt']:
71
+ l, t, w, h, s = list(map(float, line.split(',')))
72
+ proposals.append([(l + w / 2) / im_w,
73
+ (t + h / 2) / im_h,
74
+ w / im_w,
75
+ h / im_h,
76
+ s])
77
+ return cur_img, torch.as_tensor(proposals).reshape(-1, 5)
78
+
79
+ def init_img(self, img, proposals):
80
+ ori_img = img.copy()
81
+ self.seq_h, self.seq_w = img.shape[:2]
82
+ scale = self.img_height / min(self.seq_h, self.seq_w)
83
+ if max(self.seq_h, self.seq_w) * scale > self.img_width:
84
+ scale = self.img_width / max(self.seq_h, self.seq_w)
85
+ target_h = int(self.seq_h * scale)
86
+ target_w = int(self.seq_w * scale)
87
+ img = cv2.resize(img, (target_w, target_h))
88
+ img = F.normalize(F.to_tensor(img), self.mean, self.std)
89
+ img = img.unsqueeze(0)
90
+ return img, ori_img, proposals
91
+
92
+ def __len__(self):
93
+ return len(self.img_list)
94
+
95
+ def __getitem__(self, index):
96
+ img, proposals = self.load_img_from_file(self.img_list[index])
97
+ return self.init_img(img, proposals)
98
+
99
+
100
+ class Detector(object):
101
+ def __init__(self, args, model, vid, sam_predictor=None):
102
+ self.args = args
103
+ self.detr = model
104
+
105
+ self.vid = vid
106
+ self.seq_num = os.path.basename(vid)
107
+ img_list = os.listdir(os.path.join(self.args.mot_path, vid, 'img1'))
108
+ img_list = [os.path.join(vid, 'img1', i) for i in img_list if 'jpg' in i]
109
+
110
+ self.img_list = sorted(img_list)
111
+ self.img_len = len(self.img_list)
112
+
113
+ self.predict_path = os.path.join(self.args.output_dir, args.exp_name)
114
+ os.makedirs(self.predict_path, exist_ok=True)
115
+
116
+ fps = 25
117
+ size = (1920, 1080)
118
+ self.videowriter = cv2.VideoWriter('visam.avi', cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)
119
+
120
+ self.sam_predictor = sam_predictor
121
+
122
+ @staticmethod
123
+ def filter_dt_by_score(dt_instances: Instances, prob_threshold: float) -> Instances:
124
+ keep = dt_instances.scores > prob_threshold
125
+ keep &= dt_instances.obj_idxes >= 0
126
+ return dt_instances[keep]
127
+
128
+ @staticmethod
129
+ def filter_dt_by_area(dt_instances: Instances, area_threshold: float) -> Instances:
130
+ wh = dt_instances.boxes[:, 2:4] - dt_instances.boxes[:, 0:2]
131
+ areas = wh[:, 0] * wh[:, 1]
132
+ keep = areas > area_threshold
133
+ return dt_instances[keep]
134
+
135
+ def detect(self, prob_threshold=0.6, area_threshold=100, vis=False):
136
+ total_dts = 0
137
+ total_occlusion_dts = 0
138
+
139
+ track_instances = None
140
+ with open(os.path.join(self.args.mot_path, 'DanceTrack', self.args.det_db)) as f:
141
+ det_db = json.load(f)
142
+ loader = DataLoader(ListImgDataset(self.args.mot_path, self.img_list, det_db), 1, num_workers=2)
143
+ lines = []
144
+ for i, data in enumerate(tqdm(loader)):
145
+ cur_img, ori_img, proposals = [d[0] for d in data]
146
+ cur_img, proposals = cur_img.cuda(), proposals.cuda()
147
+
148
+ # track_instances = None
149
+ if track_instances is not None:
150
+ track_instances.remove('boxes')
151
+ track_instances.remove('labels')
152
+ seq_h, seq_w, _ = ori_img.shape
153
+
154
+ res = self.detr.inference_single_image(cur_img, (seq_h, seq_w), track_instances, proposals)
155
+ track_instances = res['track_instances']
156
+
157
+ dt_instances = deepcopy(track_instances)
158
+
159
+ # filter det instances by score.
160
+ dt_instances = self.filter_dt_by_score(dt_instances, prob_threshold)
161
+ dt_instances = self.filter_dt_by_area(dt_instances, area_threshold)
162
+
163
+ total_dts += len(dt_instances)
164
+
165
+ bbox_xyxy = dt_instances.boxes.tolist()
166
+ identities = dt_instances.obj_idxes.tolist()
167
+
168
+ img = ori_img.to(torch.device('cpu')).numpy().copy()[..., ::-1]
169
+ if self.sam_predictor is not None:
170
+ masks_all = []
171
+ self.sam_predictor.set_image(ori_img.to(torch.device('cpu')).numpy().copy())
172
+
173
+ for bbox, id in zip(np.array(bbox_xyxy), identities):
174
+ masks, iou_predictions, low_res_masks = self.sam_predictor.predict(box=bbox)
175
+ index_max = iou_predictions.argsort()[0]
176
+ masks = np.concatenate([masks[index_max:(index_max+1)], masks[index_max:(index_max+1)], masks[index_max:(index_max+1)]], axis=0)
177
+ masks = masks.astype(np.int32)*np.array(colors(id))[:, None, None]
178
+ masks_all.append(masks)
179
+
180
+ self.sam_predictor.reset_image()
181
+ if len(masks_all):
182
+ masks_sum = masks_all[0].copy()
183
+ for m in masks_all[1:]:
184
+ masks_sum += m
185
+ else:
186
+ masks_sum = np.zeros_like(img).transpose(2, 0, 1)
187
+
188
+ img = (img * 0.5 + (masks_sum.transpose(1,2,0) * 30) %128).astype(np.uint8)
189
+ for bbox in bbox_xyxy:
190
+ cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0,0,255), thickness=3)
191
+ self.videowriter.write(img)
192
+
193
+ save_format = '{frame},{id},{x1:.2f},{y1:.2f},{w:.2f},{h:.2f},1,-1,-1,-1\n'
194
+ for xyxy, track_id in zip(bbox_xyxy, identities):
195
+ if track_id < 0 or track_id is None:
196
+ continue
197
+ x1, y1, x2, y2 = xyxy
198
+ w, h = x2 - x1, y2 - y1
199
+ lines.append(save_format.format(frame=i + 1, id=track_id, x1=x1, y1=y1, w=w, h=h))
200
+ with open(os.path.join(self.predict_path, f'{self.seq_num}.txt'), 'w') as f:
201
+ f.writelines(lines)
202
+ print("totally {} dts {} occlusion dts".format(total_dts, total_occlusion_dts))
203
+
204
+
205
+ class RuntimeTrackerBase(object):
206
+ def __init__(self, score_thresh=0.6, filter_score_thresh=0.5, miss_tolerance=10):
207
+ self.score_thresh = score_thresh
208
+ self.filter_score_thresh = filter_score_thresh
209
+ self.miss_tolerance = miss_tolerance
210
+ self.max_obj_id = 0
211
+
212
+ def clear(self):
213
+ self.max_obj_id = 0
214
+
215
+ def update(self, track_instances: Instances):
216
+ device = track_instances.obj_idxes.device
217
+
218
+ track_instances.disappear_time[track_instances.scores >= self.score_thresh] = 0
219
+ new_obj = (track_instances.obj_idxes == -1) & (track_instances.scores >= self.score_thresh)
220
+ disappeared_obj = (track_instances.obj_idxes >= 0) & (track_instances.scores < self.filter_score_thresh)
221
+ num_new_objs = new_obj.sum().item()
222
+
223
+ track_instances.obj_idxes[new_obj] = self.max_obj_id + torch.arange(num_new_objs, device=device)
224
+ self.max_obj_id += num_new_objs
225
+
226
+ track_instances.disappear_time[disappeared_obj] += 1
227
+ to_del = disappeared_obj & (track_instances.disappear_time >= self.miss_tolerance)
228
+ track_instances.obj_idxes[to_del] = -1
229
+
230
+
231
+ if __name__ == "__main__":
232
+
233
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything VISAM Demo", parents=[get_args_parser()])
234
+ parser.add_argument('--score_threshold', default=0.5, type=float)
235
+ parser.add_argument('--update_score_threshold', default=0.5, type=float)
236
+ parser.add_argument('--miss_tolerance', default=20, type=int)
237
+
238
+ parser.add_argument(
239
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
240
+ )
241
+ parser.add_argument("--video_path", type=str, required=True, help="path to image file")
242
+
243
+ args = parser.parse_args()
244
+
245
+ # make dir
246
+ if args.output_dir:
247
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
248
+
249
+ sam_predictor = SamPredictor(build_sam(checkpoint=args.sam_checkpoint))
250
+ _ = sam_predictor.model.to(device='cuda')
251
+
252
+ # load model and weights
253
+ detr, _, _ = build_model(args)
254
+ detr.track_embed.score_thr = args.update_score_threshold
255
+ detr.track_base = RuntimeTrackerBase(args.score_threshold, args.score_threshold, args.miss_tolerance)
256
+ checkpoint = torch.load(args.resume, map_location='cpu')
257
+ detr = load_model(detr, args.resume)
258
+ detr.eval()
259
+ detr = detr.cuda()
260
+
261
+ rank = int(os.environ.get('RLAUNCH_REPLICA', '0'))
262
+ ws = int(os.environ.get('RLAUNCH_REPLICA_TOTAL', '1'))
263
+
264
+ det = Detector(args, model=detr, vid=args.video_path, sam_predictor=sam_predictor)
265
+ det.detect(args.score_threshold)
external/Grounded-Segment-Anything/grounded_sam_whisper_demo.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import copy
4
+
5
+ import numpy as np
6
+ import json
7
+ import torch
8
+ import torchvision
9
+ from PIL import Image, ImageDraw, ImageFont
10
+
11
+ # Grounding DINO
12
+ import GroundingDINO.groundingdino.datasets.transforms as T
13
+ from GroundingDINO.groundingdino.models import build_model
14
+ from GroundingDINO.groundingdino.util import box_ops
15
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
16
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
17
+
18
+ # segment anything
19
+ from segment_anything import build_sam, SamPredictor
20
+ import cv2
21
+ import numpy as np
22
+ import matplotlib.pyplot as plt
23
+
24
+ # whisper
25
+ import whisper
26
+
27
+
28
+ def load_image(image_path):
29
+ # load image
30
+ image_pil = Image.open(image_path).convert("RGB") # load image
31
+
32
+ transform = T.Compose(
33
+ [
34
+ T.RandomResize([800], max_size=1333),
35
+ T.ToTensor(),
36
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
37
+ ]
38
+ )
39
+ image, _ = transform(image_pil, None) # 3, h, w
40
+ return image_pil, image
41
+
42
+
43
+ def load_model(model_config_path, model_checkpoint_path, device):
44
+ args = SLConfig.fromfile(model_config_path)
45
+ args.device = device
46
+ model = build_model(args)
47
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
48
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
49
+ print(load_res)
50
+ _ = model.eval()
51
+ return model
52
+
53
+
54
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold,device="cpu"):
55
+ caption = caption.lower()
56
+ caption = caption.strip()
57
+ if not caption.endswith("."):
58
+ caption = caption + "."
59
+ model = model.to(device)
60
+ image = image.to(device)
61
+ with torch.no_grad():
62
+ outputs = model(image[None], captions=[caption])
63
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
64
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
65
+ logits.shape[0]
66
+
67
+ # filter output
68
+ logits_filt = logits.clone()
69
+ boxes_filt = boxes.clone()
70
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
71
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
72
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
73
+ logits_filt.shape[0]
74
+
75
+ # get phrase
76
+ tokenlizer = model.tokenizer
77
+ tokenized = tokenlizer(caption)
78
+ # build pred
79
+ pred_phrases = []
80
+ scores = []
81
+ for logit, box in zip(logits_filt, boxes_filt):
82
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
83
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
84
+ scores.append(logit.max().item())
85
+
86
+ return boxes_filt, torch.Tensor(scores), pred_phrases
87
+
88
+ def show_mask(mask, ax, random_color=False):
89
+ if random_color:
90
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
91
+ else:
92
+ color = np.array([30/255, 144/255, 255/255, 0.6])
93
+ h, w = mask.shape[-2:]
94
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
95
+ ax.imshow(mask_image)
96
+
97
+
98
+ def show_box(box, ax, label):
99
+ x0, y0 = box[0], box[1]
100
+ w, h = box[2] - box[0], box[3] - box[1]
101
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
102
+ ax.text(x0, y0, label)
103
+
104
+
105
+ def save_mask_data(output_dir, mask_list, box_list, label_list):
106
+ value = 0 # 0 for background
107
+
108
+ mask_img = torch.zeros(mask_list.shape[-2:])
109
+ for idx, mask in enumerate(mask_list):
110
+ mask_img[mask.cpu().numpy()[0] == True] = value + idx + 1
111
+ plt.figure(figsize=(10, 10))
112
+ plt.imshow(mask_img.numpy())
113
+ plt.axis('off')
114
+ plt.savefig(os.path.join(output_dir, 'mask.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)
115
+
116
+ json_data = [{
117
+ 'value': value,
118
+ 'label': 'background'
119
+ }]
120
+ for label, box in zip(label_list, box_list):
121
+ value += 1
122
+ name, logit = label.split('(')
123
+ logit = logit[:-1] # the last is ')'
124
+ json_data.append({
125
+ 'value': value,
126
+ 'label': name,
127
+ 'logit': float(logit),
128
+ 'box': box.numpy().tolist(),
129
+ })
130
+ with open(os.path.join(output_dir, 'mask.json'), 'w') as f:
131
+ json.dump(json_data, f)
132
+
133
+
134
+ def speech_recognition(speech_file, model):
135
+ # whisper
136
+ # load audio and pad/trim it to fit 30 seconds
137
+ audio = whisper.load_audio(speech_file)
138
+ audio = whisper.pad_or_trim(audio)
139
+
140
+ # make log-Mel spectrogram and move to the same device as the model
141
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
142
+
143
+ # detect the spoken language
144
+ _, probs = model.detect_language(mel)
145
+ speech_language = max(probs, key=probs.get)
146
+
147
+ # decode the audio
148
+ options = whisper.DecodingOptions()
149
+ result = whisper.decode(model, mel, options)
150
+
151
+ # print the recognized text
152
+ speech_text = result.text
153
+ return speech_text, speech_language
154
+
155
+ if __name__ == "__main__":
156
+
157
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
158
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
159
+ parser.add_argument(
160
+ "--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
161
+ )
162
+ parser.add_argument(
163
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
164
+ )
165
+ parser.add_argument("--input_image", type=str, required=True, help="path to image file")
166
+ parser.add_argument("--speech_file", type=str, required=True, help="speech file")
167
+ parser.add_argument(
168
+ "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
169
+ )
170
+
171
+ parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
172
+ parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
173
+ parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold")
174
+
175
+ parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
176
+ args = parser.parse_args()
177
+
178
+ # cfg
179
+ config_file = args.config # change the path of the model config file
180
+ grounded_checkpoint = args.grounded_checkpoint # change the path of the model
181
+ sam_checkpoint = args.sam_checkpoint
182
+ image_path = args.input_image
183
+ output_dir = args.output_dir
184
+ box_threshold = args.box_threshold
185
+ text_threshold = args.text_threshold
186
+ iou_threshold = args.iou_threshold
187
+ device = args.device
188
+
189
+ # load speech
190
+ whisper_model = whisper.load_model("base")
191
+ speech_text, speech_language = speech_recognition(args.speech_file, whisper_model)
192
+ print(f"speech_text: {speech_text}")
193
+ print(f"speech_language: {speech_language}")
194
+
195
+ # make dir
196
+ os.makedirs(output_dir, exist_ok=True)
197
+ # load image
198
+ image_pil, image = load_image(image_path)
199
+ # load model
200
+ model = load_model(config_file, grounded_checkpoint, device=device)
201
+
202
+ # visualize raw image
203
+ image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
204
+
205
+ # run grounding dino model
206
+ text_prompt = speech_text
207
+ boxes_filt, scores, pred_phrases = get_grounding_output(
208
+ model, image, text_prompt, box_threshold, text_threshold, device=device
209
+ )
210
+
211
+ # initialize SAM
212
+ sam = build_sam(checkpoint=sam_checkpoint)
213
+ sam.to(device=device)
214
+ predictor = SamPredictor(sam)
215
+ image = cv2.imread(image_path)
216
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
217
+ predictor.set_image(image)
218
+
219
+ size = image_pil.size
220
+ H, W = size[1], size[0]
221
+ for i in range(boxes_filt.size(0)):
222
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
223
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
224
+ boxes_filt[i][2:] += boxes_filt[i][:2]
225
+
226
+ boxes_filt = boxes_filt.cpu()
227
+ # use NMS to handle overlapped boxes
228
+ print(f"Before NMS: {boxes_filt.shape[0]} boxes")
229
+ nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
230
+ boxes_filt = boxes_filt[nms_idx]
231
+ pred_phrases = [pred_phrases[idx] for idx in nms_idx]
232
+ print(f"After NMS: {boxes_filt.shape[0]} boxes")
233
+
234
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
235
+
236
+ masks, _, _ = predictor.predict_torch(
237
+ point_coords = None,
238
+ point_labels = None,
239
+ boxes = transformed_boxes.to(args.device),
240
+ multimask_output = False,
241
+ )
242
+
243
+ # draw output image
244
+ plt.figure(figsize=(10, 10))
245
+ plt.imshow(image)
246
+ for mask in masks:
247
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
248
+ for box, label in zip(boxes_filt, pred_phrases):
249
+ show_box(box.numpy(), plt.gca(), label)
250
+
251
+ plt.title(speech_text)
252
+ plt.axis('off')
253
+ plt.savefig(
254
+ os.path.join(output_dir, "grounded_sam_whisper_output.jpg"),
255
+ bbox_inches="tight", dpi=300, pad_inches=0.0
256
+ )
257
+
258
+
259
+ save_mask_data(output_dir, masks, boxes_filt, pred_phrases)
260
+
external/Grounded-Segment-Anything/grounded_sam_whisper_inpainting_demo.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from warnings import warn
4
+
5
+ import numpy as np
6
+ import torch
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ import litellm
9
+
10
+ # Grounding DINO
11
+ import GroundingDINO.groundingdino.datasets.transforms as T
12
+ from GroundingDINO.groundingdino.models import build_model
13
+ from GroundingDINO.groundingdino.util import box_ops
14
+ from GroundingDINO.groundingdino.util.slconfig import SLConfig
15
+ from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
16
+
17
+ # segment anything
18
+ from segment_anything import build_sam, SamPredictor
19
+ import cv2
20
+ import numpy as np
21
+ import matplotlib.pyplot as plt
22
+
23
+
24
+ # diffusers
25
+ import PIL
26
+ import requests
27
+ import torch
28
+ from io import BytesIO
29
+ from diffusers import StableDiffusionInpaintPipeline
30
+
31
+ # whisper
32
+ import whisper
33
+
34
+ # ChatGPT
35
+ import openai
36
+
37
+
38
+ def load_image(image_path):
39
+ # load image
40
+ image_pil = Image.open(image_path).convert("RGB") # load image
41
+
42
+ transform = T.Compose(
43
+ [
44
+ T.RandomResize([800], max_size=1333),
45
+ T.ToTensor(),
46
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
47
+ ]
48
+ )
49
+ image, _ = transform(image_pil, None) # 3, h, w
50
+ return image_pil, image
51
+
52
+
53
+ def load_model(model_config_path, model_checkpoint_path, device):
54
+ args = SLConfig.fromfile(model_config_path)
55
+ args.device = device
56
+ model = build_model(args)
57
+ checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
58
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
59
+ print(load_res)
60
+ _ = model.eval()
61
+ return model
62
+
63
+
64
+ def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True, device="cpu"):
65
+ caption = caption.lower()
66
+ caption = caption.strip()
67
+ if not caption.endswith("."):
68
+ caption = caption + "."
69
+ model = model.to(device)
70
+ image = image.to(device)
71
+ with torch.no_grad():
72
+ outputs = model(image[None], captions=[caption])
73
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
74
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
75
+ logits.shape[0]
76
+
77
+ # filter output
78
+ logits_filt = logits.clone()
79
+ boxes_filt = boxes.clone()
80
+ filt_mask = logits_filt.max(dim=1)[0] > box_threshold
81
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
82
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
83
+ logits_filt.shape[0]
84
+
85
+ # get phrase
86
+ tokenlizer = model.tokenizer
87
+ tokenized = tokenlizer(caption)
88
+ # build pred
89
+ pred_phrases = []
90
+ for logit, box in zip(logits_filt, boxes_filt):
91
+ pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
92
+ if with_logits:
93
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
94
+ else:
95
+ pred_phrases.append(pred_phrase)
96
+
97
+ return boxes_filt, pred_phrases
98
+
99
+ def show_mask(mask, ax, random_color=False):
100
+ if random_color:
101
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
102
+ else:
103
+ color = np.array([30/255, 144/255, 255/255, 0.6])
104
+ h, w = mask.shape[-2:]
105
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
106
+ ax.imshow(mask_image)
107
+
108
+
109
+ def show_box(box, ax, label):
110
+ x0, y0 = box[0], box[1]
111
+ w, h = box[2] - box[0], box[3] - box[1]
112
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
113
+ ax.text(x0, y0, label)
114
+
115
+
116
+ def speech_recognition(speech_file, model):
117
+ # whisper
118
+ # load audio and pad/trim it to fit 30 seconds
119
+ audio = whisper.load_audio(speech_file)
120
+ audio = whisper.pad_or_trim(audio)
121
+
122
+ # make log-Mel spectrogram and move to the same device as the model
123
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
124
+
125
+ # detect the spoken language
126
+ _, probs = model.detect_language(mel)
127
+ speech_language = max(probs, key=probs.get)
128
+
129
+ # decode the audio
130
+ options = whisper.DecodingOptions()
131
+ result = whisper.decode(model, mel, options)
132
+
133
+ # print the recognized text
134
+ speech_text = result.text
135
+ return speech_text, speech_language
136
+
137
+
138
+ def filter_prompts_with_chatgpt(caption, max_tokens=100, model="gpt-3.5-turbo"):
139
+ prompt = [
140
+ {
141
+ 'role': 'system',
142
+ 'content': f"Extract the main object to be replaced and marked it as 'main_object', " + \
143
+ f"Extract the remaining part as 'other prompt' " + \
144
+ f"Return (main_object, other prompt)" + \
145
+ f'Given caption: {caption}.'
146
+ }
147
+ ]
148
+ response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
149
+ reply = response['choices'][0]['message']['content']
150
+ try:
151
+ det_prompt, inpaint_prompt = reply.split('\n')[0].split(':')[-1].strip(), reply.split('\n')[1].split(':')[-1].strip()
152
+ except:
153
+ warn(f"Failed to extract tags from caption") # use caption as det_prompt, inpaint_prompt
154
+ det_prompt, inpaint_prompt = caption, caption
155
+ return det_prompt, inpaint_prompt
156
+
157
+
158
+ if __name__ == "__main__":
159
+
160
+ parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
161
+ parser.add_argument("--config", type=str, required=True, help="path to config file")
162
+ parser.add_argument(
163
+ "--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
164
+ )
165
+ parser.add_argument(
166
+ "--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
167
+ )
168
+ parser.add_argument("--input_image", type=str, required=True, help="path to image file")
169
+ parser.add_argument(
170
+ "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
171
+ )
172
+ parser.add_argument("--cache_dir", type=str, default=None, help="save your huggingface large model cache")
173
+ parser.add_argument("--det_speech_file", type=str, help="grounding speech file")
174
+ parser.add_argument("--inpaint_speech_file", type=str, help="inpaint speech file")
175
+ parser.add_argument("--prompt_speech_file", type=str, help="prompt speech file, no need to provide det_speech_file")
176
+ parser.add_argument("--enable_chatgpt", action="store_true", help="enable chatgpt")
177
+ parser.add_argument("--openai_key", type=str, help="key for chatgpt")
178
+ parser.add_argument("--openai_proxy", default=None, type=str, help="proxy for chatgpt")
179
+ parser.add_argument("--whisper_model", type=str, default="small", help="whisper model version: tiny, base, small, medium, large")
180
+ parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
181
+ parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
182
+ parser.add_argument("--inpaint_mode", type=str, default="first", help="inpaint mode")
183
+ parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
184
+ parser.add_argument("--prompt_extra", type=str, default=" high resolution, real scene", help="extra prompt for inpaint")
185
+ args = parser.parse_args()
186
+
187
+ # cfg
188
+ config_file = args.config # change the path of the model config file
189
+ grounded_checkpoint = args.grounded_checkpoint # change the path of the model
190
+ sam_checkpoint = args.sam_checkpoint
191
+ image_path = args.input_image
192
+
193
+ output_dir = args.output_dir
194
+ cache_dir=args.cache_dir
195
+ # if not os.path.exists(cache_dir):
196
+ # print(f"create your cache dir:{cache_dir}")
197
+ # os.mkdir(cache_dir)
198
+ box_threshold = args.box_threshold
199
+ text_threshold = args.text_threshold
200
+ inpaint_mode = args.inpaint_mode
201
+ device = args.device
202
+
203
+ # make dir
204
+ os.makedirs(output_dir, exist_ok=True)
205
+ # load image
206
+ image_pil, image = load_image(image_path)
207
+ # load model
208
+ model = load_model(config_file, grounded_checkpoint, device=device)
209
+
210
+ # visualize raw image
211
+ image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
212
+
213
+ # recognize speech
214
+ whisper_model = whisper.load_model(args.whisper_model)
215
+
216
+ if args.enable_chatgpt:
217
+ openai.api_key = args.openai_key
218
+ if args.openai_proxy:
219
+ openai.proxy = {"http": args.openai_proxy, "https": args.openai_proxy}
220
+ speech_text, _ = speech_recognition(args.prompt_speech_file, whisper_model)
221
+ det_prompt, inpaint_prompt = filter_prompts_with_chatgpt(speech_text)
222
+ inpaint_prompt += args.prompt_extra
223
+ print(f"det_prompt: {det_prompt}, inpaint_prompt: {inpaint_prompt}")
224
+ else:
225
+ det_prompt, det_speech_language = speech_recognition(args.det_speech_file, whisper_model)
226
+ inpaint_prompt, inpaint_speech_language = speech_recognition(args.inpaint_speech_file, whisper_model)
227
+ print(f"det_prompt: {det_prompt}, using language: {det_speech_language}")
228
+ print(f"inpaint_prompt: {inpaint_prompt}, using language: {inpaint_speech_language}")
229
+
230
+ # run grounding dino model
231
+ boxes_filt, pred_phrases = get_grounding_output(
232
+ model, image, det_prompt, box_threshold, text_threshold, device=device
233
+ )
234
+
235
+ # initialize SAM
236
+ predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))
237
+ image = cv2.imread(image_path)
238
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
239
+ predictor.set_image(image)
240
+
241
+ size = image_pil.size
242
+ H, W = size[1], size[0]
243
+ for i in range(boxes_filt.size(0)):
244
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
245
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
246
+ boxes_filt[i][2:] += boxes_filt[i][:2]
247
+
248
+ boxes_filt = boxes_filt.cpu()
249
+ transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
250
+
251
+ masks, _, _ = predictor.predict_torch(
252
+ point_coords = None,
253
+ point_labels = None,
254
+ boxes = transformed_boxes.to(device),
255
+ multimask_output = False,
256
+ )
257
+
258
+ # masks: [1, 1, 512, 512]
259
+
260
+ # inpainting pipeline
261
+ if inpaint_mode == 'merge':
262
+ masks = torch.sum(masks, dim=0).unsqueeze(0)
263
+ masks = torch.where(masks > 0, True, False)
264
+ mask = masks[0][0].cpu().numpy() # simply choose the first mask, which will be refine in the future release
265
+ mask_pil = Image.fromarray(mask)
266
+ image_pil = Image.fromarray(image)
267
+
268
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
269
+ "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16,cache_dir=cache_dir
270
+ )
271
+ pipe = pipe.to("cuda")
272
+
273
+ # prompt = "A sofa, high quality, detailed"
274
+ image = pipe(prompt=inpaint_prompt, image=image_pil, mask_image=mask_pil).images[0]
275
+ image.save(os.path.join(output_dir, "grounded_sam_inpainting_output.jpg"))
276
+
277
+ # draw output image
278
+ # plt.figure(figsize=(10, 10))
279
+ # plt.imshow(image)
280
+ # for mask in masks:
281
+ # show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
282
+ # for box, label in zip(boxes_filt, pred_phrases):
283
+ # show_box(box.numpy(), plt.gca(), label)
284
+ # plt.axis('off')
285
+ # plt.savefig(os.path.join(output_dir, "grounded_sam_output.jpg"), bbox_inches="tight")
286
+
external/Grounded-Segment-Anything/playground/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Playground
2
+
3
+ We will try more interesting **base models** and **build more fun demos** in the playground. In the playground, we will:
4
+
5
+ - **Simplify the demo code** to make it easier for users to get started.
6
+ - **Keep complete usage notes** and some pitfalls to reduce the burden on users.
7
+
8
+ ## Table of Contents
9
+ - [DeepFloyd: Text-to-Image Generation](./DeepFloyd/)
10
+ - [Dream: Text-to-Image Generation](./DeepFloyd/dream.py)
11
+ - [Style Transfer](./DeepFloyd/style_transfer.py)
12
+ - [Paint by Example: Exemplar-based Image Editing with Diffusion Models](./PaintByExample/)
13
+ - [Diffuser Demo](./PaintByExample/paint_by_example.py)
14
+ - [PaintByExample with SAM](./PaintByExample/sam_paint_by_example.py)
15
+ - [LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions](./LaMa/)
16
+ - [LaMa Demo](./LaMa/lama_inpaint_demo.py)
17
+ - [LaMa with SAM](./LaMa/sam_lama.py)
18
+ - [RePaint: Inpainting using Denoising Diffusion Probabilistic Models](./RePaint/)
19
+ - [RePaint Demo](./RePaint/repaint.py)
external/Grounded-Segment-Anything/recognize-anything/.gitignore ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # checkpoint
132
+ *.pth
133
+ outputs/
134
+
135
+ # Editor
136
+ .idea/
137
+ .vscode/
138
+
139
+ # gradio cache
140
+ gradio_cached_examples/
external/Grounded-Segment-Anything/recognize-anything/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+
190
+ Copyright (c) 2022 OPPO
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ https://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
external/Grounded-Segment-Anything/recognize-anything/MANIFEST.in ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include ram/configs/*.json
2
+ include ram/configs/swin/*.json
3
+ include ram/data/*.txt
external/Grounded-Segment-Anything/recognize-anything/NOTICE.txt ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NOTICES AND INFORMATION
2
+
3
+ This software incorporates material from third parties.
4
+
5
+ - BLIP
6
+ - Swin Transofmrer
7
+ - pytorch-image-models
8
+ - transformers
9
+
10
+
11
+ ## Utility: BLIP
12
+
13
+ ### BLIP
14
+
15
+ **Source**: https://github.com/salesforce/BLIP
16
+
17
+ Copyright (c) 2022, Salesforce.com, Inc.
18
+ All rights reserved.
19
+
20
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
21
+
22
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
23
+
24
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
25
+
26
+ * Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
27
+
28
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+
30
+
31
+ ## Utility: Swin Transofmrer
32
+
33
+ ### Swin Transformer
34
+
35
+ **Source**: https://github.com/microsoft/Swin-Transformer
36
+
37
+ MIT License
38
+
39
+ Copyright (c) Microsoft Corporation.
40
+
41
+ Permission is hereby granted, free of charge, to any person obtaining a copy
42
+ of this software and associated documentation files (the "Software"), to deal
43
+ in the Software without restriction, including without limitation the rights
44
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
45
+ copies of the Software, and to permit persons to whom the Software is
46
+ furnished to do so, subject to the following conditions:
47
+
48
+ The above copyright notice and this permission notice shall be included in all
49
+ copies or substantial portions of the Software.
50
+
51
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
52
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
53
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
54
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
55
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
56
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
57
+ SOFTWARE
58
+
59
+
60
+ ## Utility: pytorch-image-models
61
+
62
+ ### pytorch-image-models
63
+
64
+ **Source**: https://github.com/huggingface/pytorch-image-models
65
+
66
+
67
+
68
+ Apache License
69
+ Version 2.0, January 2004
70
+ http://www.apache.org/licenses/
71
+
72
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
73
+
74
+ 1. Definitions.
75
+
76
+ "License" shall mean the terms and conditions for use, reproduction,
77
+ and distribution as defined by Sections 1 through 9 of this document.
78
+
79
+ "Licensor" shall mean the copyright owner or entity authorized by
80
+ the copyright owner that is granting the License.
81
+
82
+ "Legal Entity" shall mean the union of the acting entity and all
83
+ other entities that control, are controlled by, or are under common
84
+ control with that entity. For the purposes of this definition,
85
+ "control" means (i) the power, direct or indirect, to cause the
86
+ direction or management of such entity, whether by contract or
87
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
88
+ outstanding shares, or (iii) beneficial ownership of such entity.
89
+
90
+ "You" (or "Your") shall mean an individual or Legal Entity
91
+ exercising permissions granted by this License.
92
+
93
+ "Source" form shall mean the preferred form for making modifications,
94
+ including but not limited to software source code, documentation
95
+ source, and configuration files.
96
+
97
+ "Object" form shall mean any form resulting from mechanical
98
+ transformation or translation of a Source form, including but
99
+ not limited to compiled object code, generated documentation,
100
+ and conversions to other media types.
101
+
102
+ "Work" shall mean the work of authorship, whether in Source or
103
+ Object form, made available under the License, as indicated by a
104
+ copyright notice that is included in or attached to the work
105
+ (an example is provided in the Appendix below).
106
+
107
+ "Derivative Works" shall mean any work, whether in Source or Object
108
+ form, that is based on (or derived from) the Work and for which the
109
+ editorial revisions, annotations, elaborations, or other modifications
110
+ represent, as a whole, an original work of authorship. For the purposes
111
+ of this License, Derivative Works shall not include works that remain
112
+ separable from, or merely link (or bind by name) to the interfaces of,
113
+ the Work and Derivative Works thereof.
114
+
115
+ "Contribution" shall mean any work of authorship, including
116
+ the original version of the Work and any modifications or additions
117
+ to that Work or Derivative Works thereof, that is intentionally
118
+ submitted to Licensor for inclusion in the Work by the copyright owner
119
+ or by an individual or Legal Entity authorized to submit on behalf of
120
+ the copyright owner. For the purposes of this definition, "submitted"
121
+ means any form of electronic, verbal, or written communication sent
122
+ to the Licensor or its representatives, including but not limited to
123
+ communication on electronic mailing lists, source code control systems,
124
+ and issue tracking systems that are managed by, or on behalf of, the
125
+ Licensor for the purpose of discussing and improving the Work, but
126
+ excluding communication that is conspicuously marked or otherwise
127
+ designated in writing by the copyright owner as "Not a Contribution."
128
+
129
+ "Contributor" shall mean Licensor and any individual or Legal Entity
130
+ on behalf of whom a Contribution has been received by Licensor and
131
+ subsequently incorporated within the Work.
132
+
133
+ 2. Grant of Copyright License. Subject to the terms and conditions of
134
+ this License, each Contributor hereby grants to You a perpetual,
135
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
136
+ copyright license to reproduce, prepare Derivative Works of,
137
+ publicly display, publicly perform, sublicense, and distribute the
138
+ Work and such Derivative Works in Source or Object form.
139
+
140
+ 3. Grant of Patent License. Subject to the terms and conditions of
141
+ this License, each Contributor hereby grants to You a perpetual,
142
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
143
+ (except as stated in this section) patent license to make, have made,
144
+ use, offer to sell, sell, import, and otherwise transfer the Work,
145
+ where such license applies only to those patent claims licensable
146
+ by such Contributor that are necessarily infringed by their
147
+ Contribution(s) alone or by combination of their Contribution(s)
148
+ with the Work to which such Contribution(s) was submitted. If You
149
+ institute patent litigation against any entity (including a
150
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
151
+ or a Contribution incorporated within the Work constitutes direct
152
+ or contributory patent infringement, then any patent licenses
153
+ granted to You under this License for that Work shall terminate
154
+ as of the date such litigation is filed.
155
+
156
+ 4. Redistribution. You may reproduce and distribute copies of the
157
+ Work or Derivative Works thereof in any medium, with or without
158
+ modifications, and in Source or Object form, provided that You
159
+ meet the following conditions:
160
+
161
+ (a) You must give any other recipients of the Work or
162
+ Derivative Works a copy of this License; and
163
+
164
+ (b) You must cause any modified files to carry prominent notices
165
+ stating that You changed the files; and
166
+
167
+ (c) You must retain, in the Source form of any Derivative Works
168
+ that You distribute, all copyright, patent, trademark, and
169
+ attribution notices from the Source form of the Work,
170
+ excluding those notices that do not pertain to any part of
171
+ the Derivative Works; and
172
+
173
+ (d) If the Work includes a "NOTICE" text file as part of its
174
+ distribution, then any Derivative Works that You distribute must
175
+ include a readable copy of the attribution notices contained
176
+ within such NOTICE file, excluding those notices that do not
177
+ pertain to any part of the Derivative Works, in at least one
178
+ of the following places: within a NOTICE text file distributed
179
+ as part of the Derivative Works; within the Source form or
180
+ documentation, if provided along with the Derivative Works; or,
181
+ within a display generated by the Derivative Works, if and
182
+ wherever such third-party notices normally appear. The contents
183
+ of the NOTICE file are for informational purposes only and
184
+ do not modify the License. You may add Your own attribution
185
+ notices within Derivative Works that You distribute, alongside
186
+ or as an addendum to the NOTICE text from the Work, provided
187
+ that such additional attribution notices cannot be construed
188
+ as modifying the License.
189
+
190
+ You may add Your own copyright statement to Your modifications and
191
+ may provide additional or different license terms and conditions
192
+ for use, reproduction, or distribution of Your modifications, or
193
+ for any such Derivative Works as a whole, provided Your use,
194
+ reproduction, and distribution of the Work otherwise complies with
195
+ the conditions stated in this License.
196
+
197
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
198
+ any Contribution intentionally submitted for inclusion in the Work
199
+ by You to the Licensor shall be under the terms and conditions of
200
+ this License, without any additional terms or conditions.
201
+ Notwithstanding the above, nothing herein shall supersede or modify
202
+ the terms of any separate license agreement you may have executed
203
+ with Licensor regarding such Contributions.
204
+
205
+ 6. Trademarks. This License does not grant permission to use the trade
206
+ names, trademarks, service marks, or product names of the Licensor,
207
+ except as required for reasonable and customary use in describing the
208
+ origin of the Work and reproducing the content of the NOTICE file.
209
+
210
+ 7. Disclaimer of Warranty. Unless required by applicable law or
211
+ agreed to in writing, Licensor provides the Work (and each
212
+ Contributor provides its Contributions) on an "AS IS" BASIS,
213
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
214
+ implied, including, without limitation, any warranties or conditions
215
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
216
+ PARTICULAR PURPOSE. You are solely responsible for determining the
217
+ appropriateness of using or redistributing the Work and assume any
218
+ risks associated with Your exercise of permissions under this License.
219
+
220
+ 8. Limitation of Liability. In no event and under no legal theory,
221
+ whether in tort (including negligence), contract, or otherwise,
222
+ unless required by applicable law (such as deliberate and grossly
223
+ negligent acts) or agreed to in writing, shall any Contributor be
224
+ liable to You for damages, including any direct, indirect, special,
225
+ incidental, or consequential damages of any character arising as a
226
+ result of this License or out of the use or inability to use the
227
+ Work (including but not limited to damages for loss of goodwill,
228
+ work stoppage, computer failure or malfunction, or any and all
229
+ other commercial damages or losses), even if such Contributor
230
+ has been advised of the possibility of such damages.
231
+
232
+ 9. Accepting Warranty or Additional Liability. While redistributing
233
+ the Work or Derivative Works thereof, You may choose to offer,
234
+ and charge a fee for, acceptance of support, warranty, indemnity,
235
+ or other liability obligations and/or rights consistent with this
236
+ License. However, in accepting such obligations, You may act only
237
+ on Your own behalf and on Your sole responsibility, not on behalf
238
+ of any other Contributor, and only if You agree to indemnify,
239
+ defend, and hold each Contributor harmless for any liability
240
+ incurred by, or claims asserted against, such Contributor by reason
241
+ of your accepting any such warranty or additional liability.
242
+
243
+ END OF TERMS AND CONDITIONS
244
+
245
+ APPENDIX: How to apply the Apache License to your work.
246
+
247
+ To apply the Apache License to your work, attach the following
248
+ boilerplate notice, with the fields enclosed by brackets "{}"
249
+ replaced with your own identifying information. (Don't include
250
+ the brackets!) The text should be enclosed in the appropriate
251
+ comment syntax for the file format. We also recommend that a
252
+ file or class name and description of purpose be included on the
253
+ same "printed page" as the copyright notice for easier
254
+ identification within third-party archives.
255
+
256
+ Copyright 2019 Ross Wightman
257
+
258
+ Licensed under the Apache License, Version 2.0 (the "License");
259
+ you may not use this file except in compliance with the License.
260
+ You may obtain a copy of the License at
261
+
262
+ http://www.apache.org/licenses/LICENSE-2.0
263
+
264
+ Unless required by applicable law or agreed to in writing, software
265
+ distributed under the License is distributed on an "AS IS" BASIS,
266
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
267
+ See the License for the specific language governing permissions and
268
+ limitations under the License.
269
+
270
+
271
+
272
+
273
+ ## Utility: transformers
274
+
275
+ ### transformers
276
+
277
+ **Source**: https://github.com/huggingface/transformers
278
+
279
+ Copyright 2018- The Hugging Face team. All rights reserved.
280
+
281
+ Apache License
282
+ Version 2.0, January 2004
283
+ http://www.apache.org/licenses/
284
+
285
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
286
+
287
+ 1. Definitions.
288
+
289
+ "License" shall mean the terms and conditions for use, reproduction,
290
+ and distribution as defined by Sections 1 through 9 of this document.
291
+
292
+ "Licensor" shall mean the copyright owner or entity authorized by
293
+ the copyright owner that is granting the License.
294
+
295
+ "Legal Entity" shall mean the union of the acting entity and all
296
+ other entities that control, are controlled by, or are under common
297
+ control with that entity. For the purposes of this definition,
298
+ "control" means (i) the power, direct or indirect, to cause the
299
+ direction or management of such entity, whether by contract or
300
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
301
+ outstanding shares, or (iii) beneficial ownership of such entity.
302
+
303
+ "You" (or "Your") shall mean an individual or Legal Entity
304
+ exercising permissions granted by this License.
305
+
306
+ "Source" form shall mean the preferred form for making modifications,
307
+ including but not limited to software source code, documentation
308
+ source, and configuration files.
309
+
310
+ "Object" form shall mean any form resulting from mechanical
311
+ transformation or translation of a Source form, including but
312
+ not limited to compiled object code, generated documentation,
313
+ and conversions to other media types.
314
+
315
+ "Work" shall mean the work of authorship, whether in Source or
316
+ Object form, made available under the License, as indicated by a
317
+ copyright notice that is included in or attached to the work
318
+ (an example is provided in the Appendix below).
319
+
320
+ "Derivative Works" shall mean any work, whether in Source or Object
321
+ form, that is based on (or derived from) the Work and for which the
322
+ editorial revisions, annotations, elaborations, or other modifications
323
+ represent, as a whole, an original work of authorship. For the purposes
324
+ of this License, Derivative Works shall not include works that remain
325
+ separable from, or merely link (or bind by name) to the interfaces of,
326
+ the Work and Derivative Works thereof.
327
+
328
+ "Contribution" shall mean any work of authorship, including
329
+ the original version of the Work and any modifications or additions
330
+ to that Work or Derivative Works thereof, that is intentionally
331
+ submitted to Licensor for inclusion in the Work by the copyright owner
332
+ or by an individual or Legal Entity authorized to submit on behalf of
333
+ the copyright owner. For the purposes of this definition, "submitted"
334
+ means any form of electronic, verbal, or written communication sent
335
+ to the Licensor or its representatives, including but not limited to
336
+ communication on electronic mailing lists, source code control systems,
337
+ and issue tracking systems that are managed by, or on behalf of, the
338
+ Licensor for the purpose of discussing and improving the Work, but
339
+ excluding communication that is conspicuously marked or otherwise
340
+ designated in writing by the copyright owner as "Not a Contribution."
341
+
342
+ "Contributor" shall mean Licensor and any individual or Legal Entity
343
+ on behalf of whom a Contribution has been received by Licensor and
344
+ subsequently incorporated within the Work.
345
+
346
+ 2. Grant of Copyright License. Subject to the terms and conditions of
347
+ this License, each Contributor hereby grants to You a perpetual,
348
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
349
+ copyright license to reproduce, prepare Derivative Works of,
350
+ publicly display, publicly perform, sublicense, and distribute the
351
+ Work and such Derivative Works in Source or Object form.
352
+
353
+ 3. Grant of Patent License. Subject to the terms and conditions of
354
+ this License, each Contributor hereby grants to You a perpetual,
355
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
356
+ (except as stated in this section) patent license to make, have made,
357
+ use, offer to sell, sell, import, and otherwise transfer the Work,
358
+ where such license applies only to those patent claims licensable
359
+ by such Contributor that are necessarily infringed by their
360
+ Contribution(s) alone or by combination of their Contribution(s)
361
+ with the Work to which such Contribution(s) was submitted. If You
362
+ institute patent litigation against any entity (including a
363
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
364
+ or a Contribution incorporated within the Work constitutes direct
365
+ or contributory patent infringement, then any patent licenses
366
+ granted to You under this License for that Work shall terminate
367
+ as of the date such litigation is filed.
368
+
369
+ 4. Redistribution. You may reproduce and distribute copies of the
370
+ Work or Derivative Works thereof in any medium, with or without
371
+ modifications, and in Source or Object form, provided that You
372
+ meet the following conditions:
373
+
374
+ (a) You must give any other recipients of the Work or
375
+ Derivative Works a copy of this License; and
376
+
377
+ (b) You must cause any modified files to carry prominent notices
378
+ stating that You changed the files; and
379
+
380
+ (c) You must retain, in the Source form of any Derivative Works
381
+ that You distribute, all copyright, patent, trademark, and
382
+ attribution notices from the Source form of the Work,
383
+ excluding those notices that do not pertain to any part of
384
+ the Derivative Works; and
385
+
386
+ (d) If the Work includes a "NOTICE" text file as part of its
387
+ distribution, then any Derivative Works that You distribute must
388
+ include a readable copy of the attribution notices contained
389
+ within such NOTICE file, excluding those notices that do not
390
+ pertain to any part of the Derivative Works, in at least one
391
+ of the following places: within a NOTICE text file distributed
392
+ as part of the Derivative Works; within the Source form or
393
+ documentation, if provided along with the Derivative Works; or,
394
+ within a display generated by the Derivative Works, if and
395
+ wherever such third-party notices normally appear. The contents
396
+ of the NOTICE file are for informational purposes only and
397
+ do not modify the License. You may add Your own attribution
398
+ notices within Derivative Works that You distribute, alongside
399
+ or as an addendum to the NOTICE text from the Work, provided
400
+ that such additional attribution notices cannot be construed
401
+ as modifying the License.
402
+
403
+ You may add Your own copyright statement to Your modifications and
404
+ may provide additional or different license terms and conditions
405
+ for use, reproduction, or distribution of Your modifications, or
406
+ for any such Derivative Works as a whole, provided Your use,
407
+ reproduction, and distribution of the Work otherwise complies with
408
+ the conditions stated in this License.
409
+
410
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
411
+ any Contribution intentionally submitted for inclusion in the Work
412
+ by You to the Licensor shall be under the terms and conditions of
413
+ this License, without any additional terms or conditions.
414
+ Notwithstanding the above, nothing herein shall supersede or modify
415
+ the terms of any separate license agreement you may have executed
416
+ with Licensor regarding such Contributions.
417
+
418
+ 6. Trademarks. This License does not grant permission to use the trade
419
+ names, trademarks, service marks, or product names of the Licensor,
420
+ except as required for reasonable and customary use in describing the
421
+ origin of the Work and reproducing the content of the NOTICE file.
422
+
423
+ 7. Disclaimer of Warranty. Unless required by applicable law or
424
+ agreed to in writing, Licensor provides the Work (and each
425
+ Contributor provides its Contributions) on an "AS IS" BASIS,
426
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
427
+ implied, including, without limitation, any warranties or conditions
428
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
429
+ PARTICULAR PURPOSE. You are solely responsible for determining the
430
+ appropriateness of using or redistributing the Work and assume any
431
+ risks associated with Your exercise of permissions under this License.
432
+
433
+ 8. Limitation of Liability. In no event and under no legal theory,
434
+ whether in tort (including negligence), contract, or otherwise,
435
+ unless required by applicable law (such as deliberate and grossly
436
+ negligent acts) or agreed to in writing, shall any Contributor be
437
+ liable to You for damages, including any direct, indirect, special,
438
+ incidental, or consequential damages of any character arising as a
439
+ result of this License or out of the use or inability to use the
440
+ Work (including but not limited to damages for loss of goodwill,
441
+ work stoppage, computer failure or malfunction, or any and all
442
+ other commercial damages or losses), even if such Contributor
443
+ has been advised of the possibility of such damages.
444
+
445
+ 9. Accepting Warranty or Additional Liability. While redistributing
446
+ the Work or Derivative Works thereof, You may choose to offer,
447
+ and charge a fee for, acceptance of support, warranty, indemnity,
448
+ or other liability obligations and/or rights consistent with this
449
+ License. However, in accepting such obligations, You may act only
450
+ on Your own behalf and on Your sole responsibility, not on behalf
451
+ of any other Contributor, and only if You agree to indemnify,
452
+ defend, and hold each Contributor harmless for any liability
453
+ incurred by, or claims asserted against, such Contributor by reason
454
+ of your accepting any such warranty or additional liability.
455
+
456
+ END OF TERMS AND CONDITIONS
457
+
458
+ APPENDIX: How to apply the Apache License to your work.
459
+
460
+ To apply the Apache License to your work, attach the following
461
+ boilerplate notice, with the fields enclosed by brackets "[]"
462
+ replaced with your own identifying information. (Don't include
463
+ the brackets!) The text should be enclosed in the appropriate
464
+ comment syntax for the file format. We also recommend that a
465
+ file or class name and description of purpose be included on the
466
+ same "printed page" as the copyright notice for easier
467
+ identification within third-party archives.
468
+
469
+ Copyright [yyyy] [name of copyright owner]
470
+
471
+ Licensed under the Apache License, Version 2.0 (the "License");
472
+ you may not use this file except in compliance with the License.
473
+ You may obtain a copy of the License at
474
+
475
+ http://www.apache.org/licenses/LICENSE-2.0
476
+
477
+ Unless required by applicable law or agreed to in writing, software
478
+ distributed under the License is distributed on an "AS IS" BASIS,
479
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
480
+ See the License for the specific language governing permissions and
481
+ limitations under the License.
external/Grounded-Segment-Anything/recognize-anything/README.md ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # <font size=8> :label: Recognize Anything Model </font>
2
+
3
+ This project aims to develop a series of open-source and strong fundamental image recognition models.
4
+
5
+ [![Training Dataset](https://img.shields.io/badge/📦-Training%20Dataset-orange.svg)](#open_book-training-datasets)
6
+ [![Tag List](https://img.shields.io/badge/🏷️-4585%20Tags-green.svg)](ram/data/ram_tag_list.txt)
7
+ [![Web Demo](https://img.shields.io/badge/🤗-HuggingFace%20Space-cyan.svg)](https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text)
8
+ [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mhd-medfa/recognize-anything/blob/main/recognize_anything_demo.ipynb)
9
+ [![Open in Bohrium](https://cdn.dp.tech/bohrium/web/static/images/open-in-bohrium.svg)](https://bohrium.dp.tech/notebooks/63116114759)
10
+
11
+
12
+ - **Recognize Anything Plus Model (RAM++)** [[Paper](https://arxiv.org/abs/2310.15200)] <br>
13
+
14
+ RAM++ is the next generation of RAM, which can **recognize any category with high accuracy**, including **both predefined common categories and diverse open-set categories**.
15
+
16
+ - **Recognize Anything Model (RAM)** [[Paper](https://arxiv.org/abs/2306.03514)][[Demo](https://huggingface.co/spaces/xinyu1205/recognize-anything)] <br>
17
+
18
+ RAM is an image tagging model, which can **recognize any common category with high accuracy**.
19
+
20
+ RAM is accepted at **CVPR 2024 Multimodal Foundation Models Workshop**.
21
+
22
+ - **Tag2Text (ICLR 2024)** [[Paper](https://arxiv.org/abs/2303.05657)] [[Demo](https://huggingface.co/spaces/xinyu1205/recognize-anything)]<br>
23
+
24
+ Tag2Text is a vision-language model guided by tagging, which can **support tagging and comprehensive captioning simultaneously**.
25
+
26
+ Tag2Text is accepted at **ICLR 2024!** See you in Vienna!
27
+
28
+
29
+
30
+
31
+ ## :bulb: Highlight
32
+
33
+ ### **Superior Image Recognition Capability**
34
+
35
+ RAM++ outperforms existing SOTA image fundamental recognition models on common tag categories, uncommon tag categories, and human-object interaction phrases.
36
+
37
+ <p align="center">
38
+ <table class="tg">
39
+ <tr>
40
+ <td class="tg-c3ow"><img src="images/ram_plus_compare.jpg" align="center" width="700" ></td>
41
+ </tr>
42
+ </table>
43
+ <p align="center">Comparison of zero-shot image recognition performance.</p>
44
+ </p>
45
+
46
+
47
+ ### **Strong Visual Semantic Analysis**
48
+
49
+
50
+ We have combined Tag2Text and RAM with localization models (Grounding-DINO and SAM) and developed a strong visual semantic analysis pipeline in the [Grounded-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything) project.
51
+
52
+ ![](./images/ram_grounded_sam.jpg)
53
+
54
+
55
+ ## :sunrise: Model Zoo
56
+
57
+ <details>
58
+ <summary><font size="3" style="font-weight:bold;">
59
+ RAM++
60
+ </font></summary>
61
+
62
+ RAM++ is the next generation of RAM, which can recognize any category with high accuracy, including both predefined common categories and diverse open-set categories.
63
+
64
+
65
+ - **For Common Predefined Categoies.** RAM++ exhibits exceptional image tagging capabilities with powerful zero-shot generalization, which maintains the same capabilities as RAM.
66
+ <!-- - RAM++ showcases impressive zero-shot performance, significantly outperforming CLIP and BLIP.
67
+ - RAM++ even surpasses the fully supervised manners (ML-Decoder).
68
+ - RAM++ exhibits competitive performance with the Google tagging API. -->
69
+ - **For Diverse Open-set Categoires.** RAM++ achieves notably enhancements beyond CLIP and RAM.
70
+ <!-- - RAM++ integrate the image-tags-text triplets within a unified alignment framework.
71
+ - RAM++ pioneer the intergation of LLM's knowledge into image tagging training. -->
72
+
73
+
74
+ <p align="center">
75
+ <table class="tg">
76
+ <tr>
77
+ <td class="tg-c3ow"><img src="images/ram_plus_experiment.png" align="center" width="800" ></td>
78
+ </tr>
79
+ </table>
80
+ <p align="center">(Green color means fully supervised learning and others means zero-shot performance.)</p>
81
+ </p>
82
+
83
+
84
+ <p align="center">
85
+ <table class="tg">
86
+ <tr>
87
+ <td class="tg-c3ow"><img src="images/ram_plus_visualization.jpg" align="center" width="800" ></td>
88
+ </tr>
89
+ </table>
90
+ <p align="center">RAM++ demonstrate a significant improvement in open-set category recognition.</p>
91
+ </p>
92
+
93
+
94
+ </details>
95
+
96
+
97
+
98
+ <details>
99
+ <summary><font size="3" style="font-weight:bold;">
100
+ RAM
101
+ </font></summary>
102
+
103
+
104
+ RAM is a strong image tagging model, which can recognize any common category with high accuracy.
105
+ - **Strong and general.** RAM exhibits exceptional image tagging capabilities with powerful zero-shot generalization;
106
+ - RAM showcases impressive zero-shot performance, significantly outperforming CLIP and BLIP.
107
+ - RAM even surpasses the fully supervised manners (ML-Decoder).
108
+ - RAM exhibits competitive performance with the Google tagging API.
109
+ - **Reproducible and affordable.** RAM requires Low reproduction cost with open-source and annotation-free dataset;
110
+ - **Flexible and versatile.** RAM offers remarkable flexibility, catering to various application scenarios.
111
+
112
+
113
+ <p align="center">
114
+ <table class="tg">
115
+ <tr>
116
+ <td class="tg-c3ow"><img src="images/experiment_comparison.png" align="center" width="800" ></td>
117
+ </tr>
118
+ </table>
119
+ <p align="center">(Green color means fully supervised learning and Blue color means zero-shot performance.)</p>
120
+ </p>
121
+
122
+ <p align="center">
123
+ <table class="tg">
124
+ <tr>
125
+ <td class="tg-c3ow"><img src="images/tagging_results.jpg" align="center" width="800" ></td>
126
+ </tr>
127
+ </table>
128
+ </p>
129
+
130
+ RAM significantly improves the tagging ability based on the Tag2text framework.
131
+ - **Accuracy.** RAM utilizes a **data engine** to **generate** additional annotations and **clean** incorrect ones, **higher accuracy** compared to Tag2Text.
132
+ - **Scope.** RAM upgrades the number of fixed tags from 3,400+ to **[6,400+](./ram/data/ram_tag_list.txt)** (synonymous reduction to 4,500+ different semantic tags), covering **more valuable categories**.
133
+ Moreover, RAM is equipped with **open-set capability**, feasible to recognize tags not seen during training
134
+
135
+
136
+ </details>
137
+
138
+
139
+
140
+ <details>
141
+ <summary><font size="3" style="font-weight:bold;">
142
+ Tag2text
143
+ </font></summary>
144
+
145
+
146
+ Tag2Text is an efficient and controllable vision-language model with tagging guidance.
147
+ - **Tagging.** Tag2Text recognizes **[3,400+](./ram/data/tag2text_ori_tag_list.txt)** commonly human-used categories without manual annotations.
148
+ - **Captioning.** Tag2Text integrates **tags information** into text generation as the **guiding elements**, resulting in **more controllable and comprehensive descriptions**.
149
+ - **Retrieval.** Tag2Text provides **tags** as **additional visible alignment indicators** for image-text retrieval.
150
+
151
+
152
+ <p align="center">
153
+ <table class="tg">
154
+ <tr>
155
+ <td class="tg-c3ow"><img src="images/tag2text_visualization.png" align="center" width="800" ></td>
156
+ </tr>
157
+ </table>
158
+ <p align="center">Tag2Text generate more comprehensive captions with tagging guidance.</p>
159
+ </p>
160
+
161
+ <p align="center">
162
+ <table class="tg">
163
+ <tr>
164
+ <td class="tg-c3ow"><img src="images/tag2text_retrieval_visualization.png" align="center" width="800" ></td>
165
+ </tr>
166
+ </table>
167
+ <p align="center">Tag2Text provides tags as additional visible alignment indicators.</p>
168
+ </p>
169
+
170
+
171
+ </details>
172
+
173
+ <!-- ## :sparkles: Highlight Projects with other Models
174
+ - [Tag2Text/RAM with Grounded-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything) is trong and general pipeline for visual semantic analysis, which can automatically **recognize**, detect, and segment for an image!
175
+ - [Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) is a multifunctional video question answering tool. Tag2Text provides powerful tagging and captioning capabilities as a fundamental component.
176
+ - [Prompt-can-anything](https://github.com/positive666/Prompt-Can-Anything) is a gradio web library that integrates SOTA multimodal large models, including Tag2text as the core model for graphic understanding -->
177
+
178
+
179
+ <!--
180
+ ## :fire: News
181
+
182
+ - **`2023/10/30`**: We release the [Recognize Anything Model Plus Model(RAM++)](), checkpoints and inference code!
183
+ - **`2023/06/08`**: We release the [Recognize Anything Model (RAM) Tag2Text web demo 🤗](https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text), checkpoints and inference code!
184
+ - **`2023/06/07`**: We release the [Recognize Anything Model (RAM)](https://recognize-anything.github.io/), a strong image tagging model!
185
+ - **`2023/06/05`**: Tag2Text is combined with [Prompt-can-anything](https://github.com/OpenGVLab/Ask-Anything).
186
+ - **`2023/05/20`**: Tag2Text is combined with [VideoChat](https://github.com/OpenGVLab/Ask-Anything).
187
+ - **`2023/04/20`**: We marry Tag2Text with with [Grounded-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything).
188
+ - **`2023/04/10`**: Code and checkpoint is available Now!
189
+ - **`2023/03/14`**: [Tag2Text web demo 🤗](https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text) is available on Hugging Face Space! -->
190
+
191
+
192
+
193
+
194
+ <!--
195
+ ## :writing_hand: TODO
196
+
197
+ - [x] Release checkpoints.
198
+ - [x] Release inference code.
199
+ - [x] Release demo and checkpoints.
200
+ - [x] Release training codes.
201
+ - [x] Release training datasets.
202
+ - [ ] Release full training codes and scripts. -->
203
+
204
+
205
+ ## :open_book: Training Datasets
206
+
207
+ ### **Image Texts and Tags**
208
+
209
+ These annotation files come from the [Tag2Text](https://arxiv.org/abs/2303.05657) and [RAM](https://recognize-anything.github.io/). Tag2Text automatically extracts image tags from image-text pairs. RAM further augments both tags and texts via an automatic data engine.
210
+
211
+
212
+ | DataSet | Size | Images | Texts | Tags |
213
+ |----------|---------|--------|-------|-------|
214
+ | [COCO](https://huggingface.co/datasets/xinyu1205/recognize-anything-dataset/blob/main/coco_train_rmcocodev_ram.json) | 168 MB | 113K | 680K | 3.2M |
215
+ | [VG](https://huggingface.co/datasets/xinyu1205/recognize-anything-dataset/blob/main/vg_ram.json) | 55 MB | 100K | 923K | 2.7M |
216
+ | [SBU](https://huggingface.co/datasets/xinyu1205/recognize-anything-dataset/blob/main/sbu_ram.json) | 234 MB | 849K | 1.7M | 7.6M |
217
+ | [CC3M](https://huggingface.co/datasets/xinyu1205/recognize-anything-dataset/blob/main/cc3m_train_ram.json) | 766 MB | 2.8M | 5.6M | 28.2M |
218
+ | [CC3M-val](https://huggingface.co/datasets/xinyu1205/recognize-anything-dataset/blob/main/cc3m_val_ram.json) | 3.5 MB | 12K | 26K | 132K |
219
+
220
+ CC12M to be released in the next update.
221
+
222
+ ### **LLM Tag Descriptions**
223
+
224
+ These tag descriptions files come from the [RAM++](https://arxiv.org/abs/2310.15200) by calling GPT api. You can also customize any tag categories by [generate_tag_des_llm.py](generate_tag_des_llm.py).
225
+
226
+ | Tag Descriptions | Tag List |
227
+ |---------------------|----------|
228
+ | [RAM Tag List](https://huggingface.co/datasets/xinyu1205/recognize-anything-plus-model-tag-descriptions/blob/main/ram_tag_list_4585_llm_tag_descriptions.json) | [4,585](ram/data/ram_tag_list.txt) |
229
+ | [OpenImages Uncommon](./datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json) | [200](datasets/openimages_rare_200/openimages_rare_200_ram_taglist.txt) |
230
+
231
+ ## :toolbox: Checkpoints
232
+ Note : you need to create 'pretrained' folder and download these checkpoints into this folder.
233
+ <!-- insert a table -->
234
+ <table>
235
+ <thead>
236
+ <tr style="text-align: right;">
237
+ <th></th>
238
+ <th>Name</th>
239
+ <th>Backbone</th>
240
+ <th>Data</th>
241
+ <th>Illustration</th>
242
+ <th>Checkpoint</th>
243
+ </tr>
244
+ </thead>
245
+ <tbody>
246
+ <tr>
247
+ <th>1</th>
248
+ <td>RAM++ (14M)</td>
249
+ <td>Swin-Large</td>
250
+ <td>COCO, VG, SBU, CC3M, CC3M-val, CC12M</td>
251
+ <td>Provide strong image tagging ability for any category.</td>
252
+ <td><a href="https://huggingface.co/xinyu1205/recognize-anything-plus-model/blob/main/ram_plus_swin_large_14m.pth">Download link</a></td>
253
+ </tr>
254
+ <tr>
255
+ <th>2</th>
256
+ <td>RAM (14M)</td>
257
+ <td>Swin-Large</td>
258
+ <td>COCO, VG, SBU, CC3M, CC3M-val, CC12M</td>
259
+ <td>Provide strong image tagging ability for common category.</td>
260
+ <td><a href="https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text/blob/main/ram_swin_large_14m.pth">Download link</a></td>
261
+ </tr>
262
+ <tr>
263
+ <th>3</th>
264
+ <td>Tag2Text (14M)</td>
265
+ <td>Swin-Base</td>
266
+ <td>COCO, VG, SBU, CC3M, CC3M-val, CC12M</td>
267
+ <td>Support comprehensive captioning and tagging.</td>
268
+ <td><a href="https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text/blob/main/tag2text_swin_14m.pth">Download link</a></td>
269
+ </tr>
270
+ </tbody>
271
+ </table>
272
+
273
+
274
+ ## :running: Model Inference
275
+
276
+ ### **Setting Up** ###
277
+
278
+ 1. Create and activate a Conda environment:
279
+
280
+ ```bash
281
+ conda create -n recognize-anything python=3.8 -y
282
+ conda activate recognize-anything
283
+ ```
284
+
285
+ 2. Install `recognize-anything` as a package:
286
+
287
+ ```bash
288
+ pip install git+https://github.com/xinyu1205/recognize-anything.git
289
+ ```
290
+
291
+ 3. Or, for development, you may build from source:
292
+
293
+ ```bash
294
+ git clone https://github.com/xinyu1205/recognize-anything.git
295
+ cd recognize-anything
296
+ pip install -e .
297
+ ```
298
+
299
+ Then the RAM++, RAM, and Tag2Text models can be imported in other projects:
300
+
301
+ ```python
302
+ from ram.models import ram_plus, ram, tag2text
303
+ ```
304
+
305
+ ### **RAM++ Inference** ###
306
+
307
+ Get the English and Chinese outputs of the images:
308
+
309
+ ```bash
310
+ python inference_ram_plus.py --image images/demo/demo1.jpg --pretrained pretrained/ram_plus_swin_large_14m.pth
311
+ ```
312
+
313
+
314
+ The output will look like the following:
315
+
316
+ ```
317
+ Image Tags: armchair | blanket | lamp | carpet | couch | dog | gray | green | hassock | home | lay | living room | picture frame | pillow | plant | room | wall lamp | sit | wood floor
318
+ 图像标签: 扶手椅 | 毯子/覆盖层 | 灯 | 地毯 | 沙发 | 狗 | 灰色 | 绿色 | 坐垫/搁脚凳/草丛 | 家/住宅 | 躺 | 客厅 | 相框 | 枕头 | 植物 | 房间 | 壁灯 | 坐/放置/坐落 | 木地板
319
+ ```
320
+
321
+ ### **RAM++ Inference on Unseen Categories (Open-Set)** ##
322
+
323
+ 1. Get the [OpenImages-Uncommon categories](./datasets/openimages_rare_200/openimages_rare_200_ram_taglist.txt) of the image:
324
+
325
+ We have released the LLM tag descriptions of OpenImages-Uncommon categories in [openimages_rare_200_llm_tag_descriptions](./datasets/openimages_rare_200/).
326
+
327
+ <pre/>
328
+ python inference_ram_plus_openset.py --image images/openset_example.jpg \
329
+ --pretrained pretrained/ram_plus_swin_large_14m.pth \
330
+ --llm_tag_des datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json
331
+ </pre>
332
+
333
+ The output will look like the following:
334
+ ```
335
+ Image Tags: Close-up | Compact car | Go-kart | Horse racing | Sport utility vehicle | Touring car
336
+ ```
337
+
338
+ 2. You can also customize any tag categories for recognition through tag descriptions:
339
+
340
+ Modify [categories](./generate_tag_des_llm.py#L56), and call GPT api to generate corresponding tag descriptions:
341
+
342
+ <pre/>
343
+ python generate_tag_des_llm.py \
344
+ --openai_api_key 'your openai api key' \
345
+ --output_file_path datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json
346
+ </pre>
347
+
348
+ <details>
349
+ <summary><font size="4" style="font-weight:bold;">
350
+ RAM Inference
351
+ </font></summary>
352
+
353
+ Get the English and Chinese outputs of the images:
354
+
355
+ <pre/>
356
+ python inference_ram.py --image images/demo/demo1.jpg \
357
+ --pretrained pretrained/ram_swin_large_14m.pth
358
+ </pre>
359
+
360
+ The output will look like the following:
361
+
362
+ ```
363
+ Image Tags: armchair | blanket | lamp | carpet | couch | dog | floor | furniture | gray | green | living room | picture frame | pillow | plant | room | sit | stool | wood floor
364
+ 图像标签: 扶手椅 | 毯子/覆盖层 | 灯 | 地毯 | 沙发 | 狗 | 地板/地面 | 家具 | 灰色 | 绿色 | 客厅 | 相框 | 枕头 | 植物 | 房间 | 坐/放置/坐落 | 凳子 | 木地板
365
+ ```
366
+
367
+ </details>
368
+
369
+
370
+ <details>
371
+ <summary><font size="4" style="font-weight:bold;">
372
+ RAM Inference on Unseen Categories (Open-Set)
373
+ </font></summary>
374
+
375
+
376
+ Firstly, custom recognition categories in [build_openset_label_embedding](./ram/utils/openset_utils.py), then get the tags of the images:
377
+
378
+ <pre/>
379
+ python inference_ram_openset.py --image images/openset_example.jpg \
380
+ --pretrained pretrained/ram_swin_large_14m.pth
381
+ </pre>
382
+
383
+ The output will look like the following:
384
+ ```
385
+ Image Tags: Black-and-white | Go-kart
386
+ ```
387
+
388
+
389
+ </details>
390
+
391
+ <details>
392
+ <summary><font size="4" style="font-weight:bold;">
393
+ Tag2Text Inference
394
+ </font></summary>
395
+
396
+
397
+ Get the tagging and captioning results:
398
+ <pre/>
399
+ python inference_tag2text.py --image images/demo/demo1.jpg \
400
+ --pretrained pretrained/tag2text_swin_14m.pth
401
+ </pre>
402
+ Or get the tagging and sepcifed captioning results (optional):
403
+ <pre/>python inference_tag2text.py --image images/demo/demo1.jpg \
404
+ --pretrained pretrained/tag2text_swin_14m.pth \
405
+ --specified-tags "cloud,sky"</pre>
406
+
407
+ </details>
408
+
409
+ ### **Batch Inference and Evaluation** ##
410
+ We release two datasets `OpenImages-common` (214 common tag classes) and `OpenImages-rare` (200 uncommon tag classes). Copy or sym-link test images of [OpenImages v6](https://storage.googleapis.com/openimages/web/download_v6.html) to `datasets/openimages_common_214/imgs/` and `datasets/openimages_rare_200/imgs`.
411
+
412
+ To evaluate RAM++ on `OpenImages-common`:
413
+
414
+ ```bash
415
+ python batch_inference.py \
416
+ --model-type ram_plus \
417
+ --checkpoint pretrained/ram_plus_swin_large_14m.pth \
418
+ --dataset openimages_common_214 \
419
+ --output-dir outputs/ram_plus
420
+ ```
421
+
422
+ To evaluate RAM++ open-set capability on `OpenImages-rare`:
423
+
424
+ ```bash
425
+ python batch_inference.py \
426
+ --model-type ram_plus \
427
+ -- pretrained/ram_plus_swin_large_14m.pth \
428
+ --open-set \
429
+ --dataset openimages_rare_200 \
430
+ --output-dir outputs/ram_plus_openset
431
+ ```
432
+
433
+ To evaluate RAM on `OpenImages-common`:
434
+
435
+ ```bash
436
+ python batch_inference.py \
437
+ --model-type ram \
438
+ -- pretrained/ram_swin_large_14m.pth \
439
+ --dataset openimages_common_214 \
440
+ --output-dir outputs/ram
441
+ ```
442
+
443
+ To evaluate RAM open-set capability on `OpenImages-rare`:
444
+
445
+ ```bash
446
+ python batch_inference.py \
447
+ --model-type ram \
448
+ -- pretrained/ram_swin_large_14m.pth \
449
+ --open-set \
450
+ --dataset openimages_rare_200 \
451
+ --output-dir outputs/ram_openset
452
+ ```
453
+
454
+ To evaluate Tag2Text on `OpenImages-common`:
455
+
456
+ ```bash
457
+ python batch_inference.py \
458
+ --model-type tag2text \
459
+ -- pretrained/tag2text_swin_14m.pth \
460
+ --dataset openimages_common_214 \
461
+ --output-dir outputs/tag2text
462
+ ```
463
+
464
+ Please refer to `batch_inference.py` for more options. To get P/R in table 3 of RAM paper, pass `--threshold=0.86` for RAM and `--threshold=0.68` for Tag2Text.
465
+
466
+ To batch inference custom images, you can set up you own datasets following the given two datasets.
467
+
468
+
469
+ ## :golfing: Model Training/Finetuning
470
+
471
+ ### **RAM++** ##
472
+
473
+ 1. Download [RAM training datasets](#open_book-training-datasets) where each json file contains a list. Each item in the list is a dictonary with three key-value pairs: {'image_path': path_of_image, 'caption': text_of_image, 'union_label_id': image tags for tagging which including parsed tags and pseudo tags }.
474
+
475
+ 2. In ram/configs/pretrain.yaml, set 'train_file' as the paths for the json files.
476
+
477
+ 3. Prepare [pretained Swin-Transformer](https://github.com/microsoft/Swin-Transformer), and set 'ckpt' in ram/configs/swin.
478
+
479
+ 4. Download RAM++ frozen tag embedding file "[ram_plus_tag_embedding_class_4585_des_51.pth](https://huggingface.co/xinyu1205/recognize-anything-plus-model/blob/main/ram_plus_tag_embedding_class_4585_des_51.pth)", and set file in "ram/data/frozen_tag_embedding/ram_plus_tag_embedding_class_4585_des_51.pth"
480
+
481
+ 5. Pre-train the model using 8 A100 GPUs:
482
+
483
+ ```bash
484
+ python -m torch.distributed.run --nproc_per_node=8 pretrain.py \
485
+ --model-type ram_plus \
486
+ --config ram/configs/pretrain.yaml \
487
+ --output-dir outputs/ram_plus
488
+ ```
489
+
490
+ 6. Fine-tune the pre-trained checkpoint using 8 A100 GPUs:
491
+
492
+ ```bash
493
+ python -m torch.distributed.run --nproc_per_node=8 finetune.py \
494
+ --model-type ram_plus \
495
+ --config ram/configs/finetune.yaml \
496
+ --checkpoint outputs/ram_plus/checkpoint_04.pth \
497
+ --output-dir outputs/ram_plus_ft
498
+ ```
499
+
500
+
501
+ <details>
502
+ <summary><font size="4" style="font-weight:bold;">
503
+ RAM
504
+ </font></summary>
505
+
506
+ 1. Download [RAM training datasets](#open_book-training-datasets) where each json file contains a list. Each item in the list is a dictonary with four key-value pairs: {'image_path': path_of_image, 'caption': text_of_image, 'union_label_id': image tags for tagging which including parsed tags and pseudo tags, 'parse_label_id': image tags parsed from caption }.
507
+
508
+ 2. In ram/configs/pretrain.yaml, set 'train_file' as the paths for the json files.
509
+
510
+ 3. Prepare [pretained Swin-Transformer](https://github.com/microsoft/Swin-Transformer), and set 'ckpt' in ram/configs/swin.
511
+
512
+ 4. Download RAM frozen tag embedding file "[ram_tag_embedding_class_4585.pth](https://huggingface.co/xinyu1205/recognize_anything_model/blob/main/ram_tag_embedding_class_4585.pth)", and set file in "ram/data/frozen_tag_embedding/ram_tag_embedding_class_4585.pth"
513
+
514
+ 5. Pre-train the model using 8 A100 GPUs:
515
+
516
+ ```bash
517
+ python -m torch.distributed.run --nproc_per_node=8 pretrain.py \
518
+ --model-type ram \
519
+ --config ram/configs/pretrain.yaml \
520
+ --output-dir outputs/ram
521
+ ```
522
+
523
+ 6. Fine-tune the pre-trained checkpoint using 8 A100 GPUs:
524
+
525
+ ```bash
526
+ python -m torch.distributed.run --nproc_per_node=8 finetune.py \
527
+ --model-type ram \
528
+ --config ram/configs/finetune.yaml \
529
+ --checkpoint outputs/ram/checkpoint_04.pth \
530
+ --output-dir outputs/ram_ft
531
+ ```
532
+
533
+ </details>
534
+
535
+
536
+ <details>
537
+ <summary><font size="4" style="font-weight:bold;">
538
+ Tag2Text
539
+ </font></summary>
540
+
541
+ 1. Download [RAM training datasets](#open_book-training-datasets) where each json file contains a list. Each item in the list is a dictonary with three key-value pairs: {'image_path': path_of_image, 'caption': text_of_image, 'parse_label_id': image tags parsed from caption }.
542
+
543
+ 2. In ram/configs/pretrain_tag2text.yaml, set 'train_file' as the paths for the json files.
544
+
545
+ 3. Prepare [pretained Swin-Transformer](https://github.com/microsoft/Swin-Transformer), and set 'ckpt' in ram/configs/swin.
546
+
547
+ 4. Pre-train the model using 8 A100 GPUs:
548
+
549
+ ```bash
550
+ python -m torch.distributed.run --nproc_per_node=8 pretrain.py \
551
+ --model-type tag2text \
552
+ --config ram/configs/pretrain_tag2text.yaml \
553
+ --output-dir outputs/tag2text
554
+ ```
555
+
556
+ 5. Fine-tune the pre-trained checkpoint using 8 A100 GPUs:
557
+
558
+ ```bash
559
+ python -m torch.distributed.run --nproc_per_node=8 finetune.py \
560
+ --model-type tag2text \
561
+ --config ram/configs/finetune_tag2text.yaml \
562
+ --checkpoint outputs/tag2text/checkpoint_04.pth \
563
+ --output-dir outputs/tag2text_ft
564
+ ```
565
+
566
+ </details>
567
+
568
+
569
+ ## :black_nib: Citation
570
+ If you find our work to be useful for your research, please consider citing.
571
+
572
+ ```
573
+ @article{huang2023open,
574
+ title={Open-Set Image Tagging with Multi-Grained Text Supervision},
575
+ author={Huang, Xinyu and Huang, Yi-Jie and Zhang, Youcai and Tian, Weiwei and Feng, Rui and Zhang, Yuejie and Xie, Yanchun and Li, Yaqian and Zhang, Lei},
576
+ journal={arXiv e-prints},
577
+ pages={arXiv--2310},
578
+ year={2023}
579
+ }
580
+
581
+ @article{zhang2023recognize,
582
+ title={Recognize Anything: A Strong Image Tagging Model},
583
+ author={Zhang, Youcai and Huang, Xinyu and Ma, Jinyu and Li, Zhaoyang and Luo, Zhaochuan and Xie, Yanchun and Qin, Yuzhuo and Luo, Tong and Li, Yaqian and Liu, Shilong and others},
584
+ journal={arXiv preprint arXiv:2306.03514},
585
+ year={2023}
586
+ }
587
+
588
+ @article{huang2023tag2text,
589
+ title={Tag2Text: Guiding Vision-Language Model via Image Tagging},
590
+ author={Huang, Xinyu and Zhang, Youcai and Ma, Jinyu and Tian, Weiwei and Feng, Rui and Zhang, Yuejie and Li, Yaqian and Guo, Yandong and Zhang, Lei},
591
+ journal={arXiv preprint arXiv:2303.05657},
592
+ year={2023}
593
+ }
594
+ ```
595
+
596
+ ## :hearts: Acknowledgements
597
+ This work is done with the help of the amazing code base of [BLIP](https://github.com/salesforce/BLIP), thanks very much!
598
+
599
+ We want to thank @Cheng Rui @Shilong Liu @Ren Tianhe for their help in [marrying RAM/Tag2Text with Grounded-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything).
600
+
601
+ We also want to thank [Ask-Anything](https://github.com/OpenGVLab/Ask-Anything), [Prompt-can-anything](https://github.com/positive666/Prompt-Can-Anything) for combining RAM/Tag2Text, which greatly expands the application boundaries of RAM/Tag2Text.
external/Grounded-Segment-Anything/recognize-anything/batch_inference.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ from pathlib import Path
3
+ from typing import Dict, List, Optional, TextIO, Tuple
4
+
5
+ import torch
6
+ from PIL import Image, UnidentifiedImageError
7
+ from torch import Tensor
8
+ from torch.nn import Module, Parameter
9
+ from torch.nn.functional import relu, sigmoid
10
+ from torch.utils.data import DataLoader, Dataset
11
+ from tqdm import tqdm
12
+ import torch.nn.functional as F
13
+ import os
14
+ import json
15
+
16
+ from ram import get_transform
17
+ from ram.models import ram_plus, ram, tag2text
18
+ from ram.utils import build_openset_llm_label_embedding, build_openset_label_embedding, get_mAP, get_PR
19
+
20
+ device = "cuda" if torch.cuda.is_available() else "cpu"
21
+
22
+
23
+ class _Dataset(Dataset):
24
+ def __init__(self, imglist, input_size):
25
+ self.imglist = imglist
26
+ self.transform = get_transform(input_size)
27
+
28
+ def __len__(self):
29
+ return len(self.imglist)
30
+
31
+ def __getitem__(self, index):
32
+ try:
33
+ img = Image.open(self.imglist[index]+".jpg")
34
+ except (OSError, FileNotFoundError, UnidentifiedImageError):
35
+ img = Image.new('RGB', (10, 10), 0)
36
+ print("Error loading image:", self.imglist[index])
37
+ return self.transform(img)
38
+
39
+
40
+ def parse_args():
41
+ parser = ArgumentParser()
42
+ # model
43
+ parser.add_argument("--model-type",
44
+ type=str,
45
+ choices=("ram_plus", "ram", "tag2text"),
46
+ required=True)
47
+ parser.add_argument("--checkpoint",
48
+ type=str,
49
+ required=True)
50
+ parser.add_argument("--backbone",
51
+ type=str,
52
+ choices=("swin_l", "swin_b"),
53
+ default=None,
54
+ help="If `None`, will judge from `--model-type`")
55
+ parser.add_argument("--open-set",
56
+ action="store_true",
57
+ help=(
58
+ "Treat all categories in the taglist file as "
59
+ "unseen and perform open-set classification. Only "
60
+ "works with RAM."
61
+ ))
62
+ # data
63
+ parser.add_argument("--dataset",
64
+ type=str,
65
+ choices=(
66
+ "openimages_common_214",
67
+ "openimages_rare_200"
68
+ ),
69
+ required=True)
70
+ parser.add_argument("--input-size",
71
+ type=int,
72
+ default=384)
73
+ # threshold
74
+ group = parser.add_mutually_exclusive_group()
75
+ group.add_argument("--threshold",
76
+ type=float,
77
+ default=None,
78
+ help=(
79
+ "Use custom threshold for all classes. Mutually "
80
+ "exclusive with `--threshold-file`. If both "
81
+ "`--threshold` and `--threshold-file` is `None`, "
82
+ "will use a default threshold setting."
83
+ ))
84
+ group.add_argument("--threshold-file",
85
+ type=str,
86
+ default=None,
87
+ help=(
88
+ "Use custom class-wise thresholds by providing a "
89
+ "text file. Each line is a float-type threshold, "
90
+ "following the order of the tags in taglist file. "
91
+ "See `ram/data/ram_tag_list_threshold.txt` as an "
92
+ "example. Mutually exclusive with `--threshold`. "
93
+ "If both `--threshold` and `--threshold-file` is "
94
+ "`None`, will use default threshold setting."
95
+ ))
96
+ # miscellaneous
97
+ parser.add_argument("--output-dir", type=str, default="./outputs")
98
+ parser.add_argument("--batch-size", type=int, default=128)
99
+ parser.add_argument("--num-workers", type=int, default=4)
100
+
101
+ args = parser.parse_args()
102
+
103
+ # post process and validity check
104
+ args.model_type = args.model_type.lower()
105
+
106
+ assert not (args.model_type == "tag2text" and args.open_set)
107
+
108
+ if args.backbone is None:
109
+ args.backbone = "swin_l" if args.model_type == "ram_plus" or args.model_type == "ram" else "swin_b"
110
+
111
+ return args
112
+
113
+
114
+ def load_dataset(
115
+ dataset: str,
116
+ model_type: str,
117
+ input_size: int,
118
+ batch_size: int,
119
+ num_workers: int
120
+ ) -> Tuple[DataLoader, Dict]:
121
+ dataset_root = str(Path(__file__).resolve().parent / "datasets" / dataset)
122
+ img_root = dataset_root + "/imgs"
123
+ # Label system of tag2text contains duplicate tag texts, like
124
+ # "train" (noun) and "train" (verb). Therefore, for tag2text, we use
125
+ # `tagid` instead of `tag`.
126
+ if model_type == "ram_plus" or model_type == "ram":
127
+ tag_file = dataset_root + f"/{dataset}_ram_taglist.txt"
128
+ annot_file = dataset_root + f"/{dataset}_ram_annots.txt"
129
+ else:
130
+ tag_file = dataset_root + f"/{dataset}_tag2text_tagidlist.txt"
131
+ annot_file = dataset_root + f"/{dataset}_{model_type}_idannots.txt"
132
+
133
+ with open(tag_file, "r", encoding="utf-8") as f:
134
+ taglist = [line.strip() for line in f]
135
+
136
+ with open(annot_file, "r", encoding="utf-8") as f:
137
+ imglist = [img_root + "/" + line.strip().split(",")[0] for line in f]
138
+
139
+ loader = DataLoader(
140
+ dataset=_Dataset(imglist,input_size),
141
+ shuffle=False,
142
+ drop_last=False,
143
+ pin_memory=True,
144
+ batch_size=batch_size,
145
+ num_workers=num_workers
146
+ )
147
+
148
+ open_tag_des = dataset_root + f"/{dataset}_llm_tag_descriptions.json"
149
+ if os.path.exists(open_tag_des):
150
+ with open(open_tag_des, 'rb') as fo:
151
+ tag_des = json.load(fo)
152
+
153
+ else:
154
+ tag_des = None
155
+ info = {
156
+ "taglist": taglist,
157
+ "imglist": imglist,
158
+ "annot_file": annot_file,
159
+ "img_root": img_root,
160
+ "tag_des": tag_des
161
+ }
162
+
163
+ return loader, info
164
+
165
+
166
+ def get_class_idxs(
167
+ model_type: str,
168
+ open_set: bool,
169
+ taglist: List[str]
170
+ ) -> Optional[List[int]]:
171
+ """Get indices of required categories in the label system."""
172
+ if model_type == "ram_plus" or model_type == "ram":
173
+ if not open_set:
174
+ model_taglist_file = "ram/data/ram_tag_list.txt"
175
+ with open(model_taglist_file, "r", encoding="utf-8") as f:
176
+ model_taglist = [line.strip() for line in f]
177
+ return [model_taglist.index(tag) for tag in taglist]
178
+ else:
179
+ return None
180
+ else: # for tag2text, we directly use tagid instead of text-form of tag.
181
+ # here tagid equals to tag index.
182
+ return [int(tag) for tag in taglist]
183
+
184
+
185
+ def load_thresholds(
186
+ threshold: Optional[float],
187
+ threshold_file: Optional[str],
188
+ model_type: str,
189
+ open_set: bool,
190
+ class_idxs: List[int],
191
+ num_classes: int,
192
+ ) -> List[float]:
193
+ """Decide what threshold(s) to use."""
194
+ if not threshold_file and not threshold: # use default
195
+ if model_type == "ram_plus" or model_type == "ram":
196
+ if not open_set: # use class-wise tuned thresholds
197
+ ram_threshold_file = "ram/data/ram_tag_list_threshold.txt"
198
+ with open(ram_threshold_file, "r", encoding="utf-8") as f:
199
+ idx2thre = {
200
+ idx: float(line.strip()) for idx, line in enumerate(f)
201
+ }
202
+ return [idx2thre[idx] for idx in class_idxs]
203
+ else:
204
+ return [0.5] * num_classes
205
+ else:
206
+ return [0.68] * num_classes
207
+ elif threshold_file:
208
+ with open(threshold_file, "r", encoding="utf-8") as f:
209
+ thresholds = [float(line.strip()) for line in f]
210
+ assert len(thresholds) == num_classes
211
+ return thresholds
212
+ else:
213
+ return [threshold] * num_classes
214
+
215
+
216
+ def gen_pred_file(
217
+ imglist: List[str],
218
+ tags: List[List[str]],
219
+ img_root: str,
220
+ pred_file: str
221
+ ) -> None:
222
+ """Generate text file of tag prediction results."""
223
+ with open(pred_file, "w", encoding="utf-8") as f:
224
+ for image, tag in zip(imglist, tags):
225
+ # should be relative to img_root to match the gt file.
226
+ s = str(Path(image).relative_to(img_root))
227
+ if tag:
228
+ s = s + "," + ",".join(tag)
229
+ f.write(s + "\n")
230
+
231
+ def load_ram_plus(
232
+ backbone: str,
233
+ checkpoint: str,
234
+ input_size: int,
235
+ taglist: List[str],
236
+ tag_des: List[str],
237
+ open_set: bool,
238
+ class_idxs: List[int],
239
+ ) -> Module:
240
+ model = ram_plus(pretrained=checkpoint, image_size=input_size, vit=backbone)
241
+ # trim taglist for faster inference
242
+ if open_set:
243
+ print("Building tag embeddings ...")
244
+ label_embed, _ = build_openset_llm_label_embedding(tag_des)
245
+ model.label_embed = Parameter(label_embed.float())
246
+ model.num_class = len(tag_des)
247
+ else:
248
+ model.label_embed = Parameter(model.label_embed.data.reshape(model.num_class,51,512)[class_idxs, :, :].reshape(len(class_idxs)*51, 512))
249
+ model.num_class = len(class_idxs)
250
+ return model.to(device).eval()
251
+
252
+
253
+ def load_ram(
254
+ backbone: str,
255
+ checkpoint: str,
256
+ input_size: int,
257
+ taglist: List[str],
258
+ open_set: bool,
259
+ class_idxs: List[int],
260
+ ) -> Module:
261
+ model = ram(pretrained=checkpoint, image_size=input_size, vit=backbone)
262
+ # trim taglist for faster inference
263
+ if open_set:
264
+ print("Building tag embeddings ...")
265
+ label_embed, _ = build_openset_label_embedding(taglist)
266
+ model.label_embed = Parameter(label_embed.float())
267
+ else:
268
+ model.label_embed = Parameter(model.label_embed[class_idxs, :])
269
+ return model.to(device).eval()
270
+
271
+
272
+ def load_tag2text(
273
+ backbone: str,
274
+ checkpoint: str,
275
+ input_size: int
276
+ ) -> Module:
277
+ model = tag2text(
278
+ pretrained=checkpoint,
279
+ image_size=input_size,
280
+ vit=backbone
281
+ )
282
+ return model.to(device).eval()
283
+
284
+ @torch.no_grad()
285
+ def forward_ram_plus(model: Module, imgs: Tensor) -> Tensor:
286
+ image_embeds = model.image_proj(model.visual_encoder(imgs.to(device)))
287
+ image_atts = torch.ones(
288
+ image_embeds.size()[:-1], dtype=torch.long).to(device)
289
+
290
+ image_cls_embeds = image_embeds[:, 0, :]
291
+ image_spatial_embeds = image_embeds[:, 1:, :]
292
+
293
+ bs = image_spatial_embeds.shape[0]
294
+
295
+ des_per_class = int(model.label_embed.shape[0] / model.num_class)
296
+
297
+ image_cls_embeds = image_cls_embeds / image_cls_embeds.norm(dim=-1, keepdim=True)
298
+ reweight_scale = model.reweight_scale.exp()
299
+ logits_per_image = (reweight_scale * image_cls_embeds @ model.label_embed.t())
300
+ logits_per_image = logits_per_image.view(bs, -1,des_per_class)
301
+
302
+ weight_normalized = F.softmax(logits_per_image, dim=2)
303
+ label_embed_reweight = torch.empty(bs, model.num_class, 512).cuda()
304
+ weight_normalized = F.softmax(logits_per_image, dim=2)
305
+ label_embed_reweight = torch.empty(bs, model.num_class, 512).cuda()
306
+ for i in range(bs):
307
+ reshaped_value = model.label_embed.view(-1, des_per_class, 512)
308
+ product = weight_normalized[i].unsqueeze(-1) * reshaped_value
309
+ label_embed_reweight[i] = product.sum(dim=1)
310
+
311
+ label_embed = relu(model.wordvec_proj(label_embed_reweight))
312
+
313
+ tagging_embed, _ = model.tagging_head(
314
+ encoder_embeds=label_embed,
315
+ encoder_hidden_states=image_embeds,
316
+ encoder_attention_mask=image_atts,
317
+ return_dict=False,
318
+ mode='tagging',
319
+ )
320
+ return sigmoid(model.fc(tagging_embed).squeeze(-1))
321
+
322
+ @torch.no_grad()
323
+ def forward_ram(model: Module, imgs: Tensor) -> Tensor:
324
+ image_embeds = model.image_proj(model.visual_encoder(imgs.to(device)))
325
+ image_atts = torch.ones(
326
+ image_embeds.size()[:-1], dtype=torch.long).to(device)
327
+ label_embed = relu(model.wordvec_proj(model.label_embed)).unsqueeze(0)\
328
+ .repeat(imgs.shape[0], 1, 1)
329
+ tagging_embed, _ = model.tagging_head(
330
+ encoder_embeds=label_embed,
331
+ encoder_hidden_states=image_embeds,
332
+ encoder_attention_mask=image_atts,
333
+ return_dict=False,
334
+ mode='tagging',
335
+ )
336
+ return sigmoid(model.fc(tagging_embed).squeeze(-1))
337
+
338
+
339
+ @torch.no_grad()
340
+ def forward_tag2text(
341
+ model: Module,
342
+ class_idxs: List[int],
343
+ imgs: Tensor
344
+ ) -> Tensor:
345
+ image_embeds = model.visual_encoder(imgs.to(device))
346
+ image_atts = torch.ones(
347
+ image_embeds.size()[:-1], dtype=torch.long).to(device)
348
+ label_embed = model.label_embed.weight.unsqueeze(0)\
349
+ .repeat(imgs.shape[0], 1, 1)
350
+ tagging_embed, _ = model.tagging_head(
351
+ encoder_embeds=label_embed,
352
+ encoder_hidden_states=image_embeds,
353
+ encoder_attention_mask=image_atts,
354
+ return_dict=False,
355
+ mode='tagging',
356
+ )
357
+ return sigmoid(model.fc(tagging_embed))[:, class_idxs]
358
+
359
+
360
+ def print_write(f: TextIO, s: str):
361
+ print(s)
362
+ f.write(s + "\n")
363
+
364
+
365
+ if __name__ == "__main__":
366
+ args = parse_args()
367
+
368
+ # set up output paths
369
+ output_dir = args.output_dir
370
+ Path(output_dir).mkdir(parents=True, exist_ok=True)
371
+ pred_file, pr_file, ap_file, summary_file, logit_file = [
372
+ output_dir + "/" + name for name in
373
+ ("pred.txt", "pr.txt", "ap.txt", "summary.txt", "logits.pth")
374
+ ]
375
+ with open(summary_file, "w", encoding="utf-8") as f:
376
+ print_write(f, "****************")
377
+ for key in (
378
+ "model_type", "backbone", "checkpoint", "open_set",
379
+ "dataset", "input_size",
380
+ "threshold", "threshold_file",
381
+ "output_dir", "batch_size", "num_workers"
382
+ ):
383
+ print_write(f, f"{key}: {getattr(args, key)}")
384
+ print_write(f, "****************")
385
+
386
+ # prepare data
387
+ loader, info = load_dataset(
388
+ dataset=args.dataset,
389
+ model_type=args.model_type,
390
+ input_size=args.input_size,
391
+ batch_size=args.batch_size,
392
+ num_workers=args.num_workers
393
+ )
394
+ taglist, imglist, annot_file, img_root, tag_des = \
395
+ info["taglist"], info["imglist"], info["annot_file"], info["img_root"], info["tag_des"]
396
+
397
+ # get class idxs
398
+ class_idxs = get_class_idxs(
399
+ model_type=args.model_type,
400
+ open_set=args.open_set,
401
+ taglist=taglist
402
+ )
403
+
404
+ # set up threshold(s)
405
+ thresholds = load_thresholds(
406
+ threshold=args.threshold,
407
+ threshold_file=args.threshold_file,
408
+ model_type=args.model_type,
409
+ open_set=args.open_set,
410
+ class_idxs=class_idxs,
411
+ num_classes=len(taglist)
412
+ )
413
+
414
+ # inference
415
+ if Path(logit_file).is_file():
416
+
417
+ logits = torch.load(logit_file)
418
+
419
+ else:
420
+ # load model
421
+ if args.model_type == "ram_plus":
422
+ model = load_ram_plus(
423
+ backbone=args.backbone,
424
+ checkpoint=args.checkpoint,
425
+ input_size=args.input_size,
426
+ taglist=taglist,
427
+ tag_des = tag_des,
428
+ open_set=args.open_set,
429
+ class_idxs=class_idxs
430
+ )
431
+ elif args.model_type == "ram":
432
+ model = load_ram(
433
+ backbone=args.backbone,
434
+ checkpoint=args.checkpoint,
435
+ input_size=args.input_size,
436
+ taglist=taglist,
437
+ open_set=args.open_set,
438
+ class_idxs=class_idxs
439
+ )
440
+ elif args.model_type == "tag2text":
441
+ model = load_tag2text(
442
+ backbone=args.backbone,
443
+ checkpoint=args.checkpoint,
444
+ input_size=args.input_size
445
+ )
446
+
447
+ # inference
448
+ logits = torch.empty(len(imglist), len(taglist))
449
+ pos = 0
450
+ for imgs in tqdm(loader, desc="inference"):
451
+ if args.model_type == "ram_plus":
452
+ out = forward_ram_plus(model, imgs)
453
+ elif args.model_type == "ram":
454
+ out = forward_ram(model, imgs)
455
+ else:
456
+ out = forward_tag2text(model, class_idxs, imgs)
457
+ bs = imgs.shape[0]
458
+ logits[pos:pos+bs, :] = out.cpu()
459
+ pos += bs
460
+
461
+ # save logits, making threshold-tuning super fast
462
+ torch.save(logits, logit_file)
463
+
464
+ # filter with thresholds
465
+ pred_tags = []
466
+ for scores in logits.tolist():
467
+ pred_tags.append([
468
+ taglist[i] for i, s in enumerate(scores) if s >= thresholds[i]
469
+ ])
470
+
471
+ # generate result file
472
+ gen_pred_file(imglist, pred_tags, img_root, pred_file)
473
+
474
+ # evaluate and record
475
+ mAP, APs = get_mAP(logits.numpy(), annot_file, taglist)
476
+ CP, CR, Ps, Rs = get_PR(pred_file, annot_file, taglist)
477
+
478
+ with open(ap_file, "w", encoding="utf-8") as f:
479
+ f.write("Tag,AP\n")
480
+ for tag, AP in zip(taglist, APs):
481
+ f.write(f"{tag},{AP*100.0:.2f}\n")
482
+
483
+ with open(pr_file, "w", encoding="utf-8") as f:
484
+ f.write("Tag,Precision,Recall\n")
485
+ for tag, P, R in zip(taglist, Ps, Rs):
486
+ f.write(f"{tag},{P*100.0:.2f},{R*100.0:.2f}\n")
487
+
488
+ with open(summary_file, "w", encoding="utf-8") as f:
489
+ print_write(f, f"mAP: {mAP*100.0}")
490
+ print_write(f, f"CP: {CP*100.0}")
491
+ print_write(f, f"CR: {CR*100.0}")
external/Grounded-Segment-Anything/recognize-anything/finetune.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * RAM++ & RAM & Tag2Text finetune
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import os
7
+ import ruamel.yaml as yaml
8
+ import numpy as np
9
+ import random
10
+ import time
11
+ import datetime
12
+ import json
13
+ from pathlib import Path
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ import torch.backends.cudnn as cudnn
19
+ import torch.distributed as dist
20
+ from torch.utils.data import DataLoader
21
+
22
+ from ram.models import ram_plus, ram, tag2text
23
+ import utils
24
+ from utils import cosine_lr_schedule
25
+ from ram.data import create_dataset, create_sampler, create_loader
26
+
27
+ import clip
28
+
29
+ def build_text_embed(model_clip, caption):
30
+ run_on_gpu = torch.cuda.is_available()
31
+ with torch.no_grad():
32
+
33
+ texts = clip.tokenize(caption,truncate = True) # tokenize
34
+ if run_on_gpu:
35
+ texts = texts.cuda()
36
+ model_clip = model_clip.cuda()
37
+ text_embeddings = model_clip.encode_text(texts)
38
+ text_embeddings /= text_embeddings.norm(dim=-1, keepdim=True)
39
+ return text_embeddings
40
+
41
+
42
+
43
+ def train_ram_plus(model, data_loader, optimizer, epoch, device, config, model_clip):
44
+ # train
45
+ model.train()
46
+
47
+ metric_logger = utils.MetricLogger(delimiter=" ")
48
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
49
+ metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
50
+ metric_logger.add_meter('loss_dis', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
51
+ metric_logger.add_meter('loss_alignment', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
52
+
53
+ header = 'Train Epoch: [{}]'.format(epoch)
54
+ print_freq = 50
55
+
56
+ data_loader.sampler.set_epoch(epoch)
57
+
58
+ for i, (image, image_224, caption, image_tag, parse_tag) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
59
+
60
+ optimizer.zero_grad()
61
+
62
+ batch_text_embed = build_text_embed(model_clip,caption)
63
+
64
+ image = image.to(device,non_blocking=True)
65
+ image_224 = image_224.to(device,non_blocking=True)
66
+
67
+ with torch.no_grad():
68
+ clip_image_feature = model_clip.encode_image(image_224)
69
+
70
+ loss_tag, loss_dis, loss_alignment = model(image, caption, image_tag, clip_image_feature, batch_text_embed)
71
+ loss = loss_tag + loss_dis + loss_alignment
72
+
73
+ loss.backward()
74
+ optimizer.step()
75
+
76
+ metric_logger.update(loss_tag=loss_tag.item())
77
+ metric_logger.update(loss_dis=loss_dis.item())
78
+ metric_logger.update(loss_alignment=loss_alignment.item())
79
+ metric_logger.update(lr=optimizer.param_groups[0]["lr"])
80
+
81
+
82
+ # gather the stats from all processes
83
+ metric_logger.synchronize_between_processes()
84
+ print("Averaged stats:", metric_logger.global_avg())
85
+ return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
86
+
87
+
88
+
89
+ def train_ram(model, data_loader, optimizer, epoch, device, config, model_clip):
90
+ # train
91
+ model.train()
92
+
93
+ metric_logger = utils.MetricLogger(delimiter=" ")
94
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
95
+ metric_logger.add_meter('loss_t2t', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
96
+ metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
97
+ metric_logger.add_meter('loss_dis', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
98
+
99
+ header = 'Train Epoch: [{}]'.format(epoch)
100
+ print_freq = 50
101
+
102
+ data_loader.sampler.set_epoch(epoch)
103
+
104
+ for i, (image, image_224, caption, image_tag, parse_tag) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
105
+
106
+ optimizer.zero_grad()
107
+
108
+ image = image.to(device,non_blocking=True)
109
+ image_224 = image_224.to(device,non_blocking=True)
110
+
111
+ with torch.no_grad():
112
+ clip_image_feature = model_clip.encode_image(image_224)
113
+
114
+ loss_t2t, loss_tag, loss_dis = model(image, caption, image_tag, parse_tag, clip_image_feature)
115
+ loss = loss_t2t + loss_tag/(loss_tag/loss_t2t).detach() + loss_dis
116
+
117
+ loss.backward()
118
+ optimizer.step()
119
+
120
+ metric_logger.update(loss_t2t=loss_t2t.item())
121
+ metric_logger.update(loss_tag=loss_tag.item())
122
+ metric_logger.update(loss_dis=loss_dis.item())
123
+ metric_logger.update(lr=optimizer.param_groups[0]["lr"])
124
+
125
+
126
+ # gather the stats from all processes
127
+ metric_logger.synchronize_between_processes()
128
+ print("Averaged stats:", metric_logger.global_avg())
129
+ return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
130
+
131
+
132
+ def train_tag2text(model, data_loader, optimizer, epoch, device, config):
133
+ # train
134
+ model.train()
135
+
136
+ metric_logger = utils.MetricLogger(delimiter=" ")
137
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
138
+ metric_logger.add_meter('loss_t2t', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
139
+ metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
140
+
141
+ header = 'Train Epoch: [{}]'.format(epoch)
142
+ print_freq = 50
143
+
144
+ data_loader.sampler.set_epoch(epoch)
145
+
146
+ for i, (image, _, caption, _, parse_tag) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
147
+
148
+
149
+ optimizer.zero_grad()
150
+
151
+ image = image.to(device,non_blocking=True)
152
+
153
+ loss_t2t, loss_tag = model(image, caption, parse_tag)
154
+ loss = loss_t2t + loss_tag/(loss_tag/loss_t2t).detach()
155
+
156
+ loss.backward()
157
+ optimizer.step()
158
+
159
+ metric_logger.update(loss_t2t=loss_t2t.item())
160
+ metric_logger.update(loss_tag=loss_tag.item())
161
+ metric_logger.update(lr=optimizer.param_groups[0]["lr"])
162
+
163
+
164
+ # gather the stats from all processes
165
+ metric_logger.synchronize_between_processes()
166
+ print("Averaged stats:", metric_logger.global_avg())
167
+ return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
168
+
169
+
170
+ def main(args, config):
171
+ utils.init_distributed_mode(args)
172
+
173
+ device = torch.device(args.device)
174
+
175
+ # fix the seed for reproducibility
176
+ seed = args.seed + utils.get_rank()
177
+ torch.manual_seed(seed)
178
+ np.random.seed(seed)
179
+ random.seed(seed)
180
+ cudnn.benchmark = True
181
+
182
+ #### Dataset ####
183
+ print("Creating dataset")
184
+ datasets = [create_dataset('finetune', config, min_scale=0.2)]
185
+ print('number of training samples: %d'%len(datasets[0]))
186
+
187
+ num_tasks = utils.get_world_size()
188
+ global_rank = utils.get_rank()
189
+ samplers = create_sampler(datasets, [True], num_tasks, global_rank)
190
+
191
+ data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
192
+
193
+ print("Creating model")
194
+ if args.checkpoint:
195
+ print("load from:", args.checkpoint)
196
+
197
+ #### Model ####
198
+ if args.model_type == 'ram_plus':
199
+ print("Creating pretrained CLIP model")
200
+ model_clip, _ = clip.load("ViT-B/16", device=device)
201
+
202
+ print("Creating RAM model")
203
+ model = ram_plus(pretrained = args.checkpoint,image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
204
+ vit_ckpt_layer=config['vit_ckpt_layer'])
205
+
206
+ elif args.model_type == 'ram':
207
+ print("Creating pretrained CLIP model")
208
+ model_clip, _ = clip.load("ViT-B/16", device=device)
209
+
210
+ print("Creating RAM model")
211
+ model = ram(pretrained = args.checkpoint,image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
212
+ vit_ckpt_layer=config['vit_ckpt_layer'])
213
+
214
+ elif args.model_type == 'tag2text':
215
+ print("Creating Tag2Text model")
216
+ model = tag2text(pretrained = args.checkpoint,image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
217
+ vit_ckpt_layer=config['vit_ckpt_layer'], tag_list='ram/data/ram_tag_list.txt')
218
+ model = model.to(device)
219
+
220
+ ### Frozen CLIP model ###
221
+ model_clip = model_clip.to(device)
222
+ for _, param in model_clip.named_parameters():
223
+ param.requires_grad = False
224
+
225
+ ### Frozen label embedding for open-set recogniztion ###
226
+ model.label_embed.requires_grad = False
227
+ optimizer = torch.optim.AdamW(filter(lambda x: x.requires_grad, model.parameters()), lr=config['init_lr'], weight_decay=config['weight_decay'])
228
+
229
+ start_epoch = 0
230
+
231
+ model_without_ddp = model
232
+ if args.distributed:
233
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
234
+ model_without_ddp = model.module
235
+
236
+ print("Start training")
237
+ start_time = time.time()
238
+ for epoch in range(start_epoch, config['max_epoch']):
239
+
240
+ cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
241
+
242
+ if args.model_type == 'ram_plus':
243
+ train_stats = train_ram_plus(model, data_loader, optimizer, epoch, device, config, model_clip)
244
+ elif args.model_type == 'ram':
245
+ train_stats = train_ram(model, data_loader, optimizer, epoch, device, config, model_clip)
246
+ elif args.model_type == 'tag2text':
247
+ train_stats = train_tag2text(model, data_loader, optimizer, epoch, device, config)
248
+
249
+ if utils.is_main_process():
250
+ log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
251
+ 'epoch': epoch,
252
+ }
253
+ save_obj = {
254
+ 'model': model_without_ddp.state_dict(),
255
+ 'optimizer': optimizer.state_dict(),
256
+ 'config': config,
257
+ 'epoch': epoch,
258
+ }
259
+ torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
260
+
261
+ with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
262
+ f.write(json.dumps(log_stats) + "\n")
263
+
264
+ dist.barrier()
265
+
266
+ total_time = time.time() - start_time
267
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
268
+ print('Training time {}'.format(total_time_str))
269
+
270
+
271
+ if __name__ == '__main__':
272
+ parser = argparse.ArgumentParser()
273
+ parser.add_argument('--config', default='./configs/pretrain.yaml')
274
+ parser.add_argument("--model-type",type=str,choices=("ram_plus", "ram", "tag2text"),required=True)
275
+ parser.add_argument('--output-dir', default='output/Pretrain')
276
+ parser.add_argument('--checkpoint', default='')
277
+ parser.add_argument('--evaluate', action='store_true')
278
+ parser.add_argument('--device', default='cuda')
279
+ parser.add_argument('--seed', default=42, type=int)
280
+ parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
281
+ parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
282
+ parser.add_argument('--distributed', default=True, type=bool)
283
+ args = parser.parse_args()
284
+
285
+ config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
286
+
287
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
288
+
289
+ yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
290
+
291
+ main(args, config)
external/Grounded-Segment-Anything/recognize-anything/generate_tag_des_llm.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import json
3
+ from tqdm import tqdm
4
+ import argparse
5
+ from ram.utils.openset_utils import openimages_rare_unseen
6
+
7
+ parser = argparse.ArgumentParser(
8
+ description='Generate LLM tag descriptions for RAM++ open-set recognition')
9
+ parser.add_argument('--openai_api_key',
10
+ default='sk-xxxxx')
11
+ parser.add_argument('--output_file_path',
12
+ help='save path of llm tag descriptions',
13
+ default='datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json')
14
+
15
+
16
+ def analyze_tags(tag):
17
+ # Generate LLM tag descriptions
18
+
19
+ llm_prompts = [ f"Describe concisely what a(n) {tag} looks like:", \
20
+ f"How can you identify a(n) {tag} concisely?", \
21
+ f"What does a(n) {tag} look like concisely?",\
22
+ f"What are the identifying characteristics of a(n) {tag}:", \
23
+ f"Please provide a concise description of the visual characteristics of {tag}:"]
24
+
25
+ results = {}
26
+ result_lines = []
27
+
28
+ result_lines.append(f"a photo of a {tag}.")
29
+
30
+ for llm_prompt in tqdm(llm_prompts):
31
+
32
+ # send message
33
+ response = openai.ChatCompletion.create(
34
+ model="gpt-3.5-turbo",
35
+ messages=[{"role": "assistant", "content": llm_prompt}],
36
+ max_tokens=77,
37
+ temperature=0.99,
38
+ n=10,
39
+ stop=None
40
+ )
41
+
42
+ # parse the response
43
+ for item in response.choices:
44
+ result_lines.append(item.message['content'].strip())
45
+ results[tag] = result_lines
46
+ return results
47
+
48
+
49
+ if __name__ == "__main__":
50
+
51
+ args = parser.parse_args()
52
+
53
+ # set OpenAI API key
54
+ openai.api_key = args.openai_api_key
55
+
56
+ categories = openimages_rare_unseen
57
+
58
+ tag_descriptions = []
59
+
60
+ for tag in categories:
61
+ result = analyze_tags(tag)
62
+ tag_descriptions.append(result)
63
+
64
+ output_file_path = args.output_file_path
65
+
66
+ with open(output_file_path, 'w') as w:
67
+ json.dump(tag_descriptions, w, indent=3)
68
+
external/Grounded-Segment-Anything/recognize-anything/gui_demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
external/Grounded-Segment-Anything/recognize-anything/inference_ram.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * The Recognize Anything Model (RAM)
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import numpy as np
7
+ import random
8
+
9
+ import torch
10
+
11
+ from PIL import Image
12
+ from ram.models import ram
13
+ from ram import inference_ram as inference
14
+ from ram import get_transform
15
+
16
+
17
+ parser = argparse.ArgumentParser(
18
+ description='Tag2Text inferece for tagging and captioning')
19
+ parser.add_argument('--image',
20
+ metavar='DIR',
21
+ help='path to dataset',
22
+ default='images/demo/demo1.jpg')
23
+ parser.add_argument('--pretrained',
24
+ metavar='DIR',
25
+ help='path to pretrained model',
26
+ default='pretrained/ram_swin_large_14m.pth')
27
+ parser.add_argument('--image-size',
28
+ default=384,
29
+ type=int,
30
+ metavar='N',
31
+ help='input image size (default: 384)')
32
+
33
+
34
+ if __name__ == "__main__":
35
+
36
+ args = parser.parse_args()
37
+
38
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
+
40
+ transform = get_transform(image_size=args.image_size)
41
+
42
+ #######load model
43
+ model = ram(pretrained=args.pretrained,
44
+ image_size=args.image_size,
45
+ vit='swin_l')
46
+ model.eval()
47
+
48
+ model = model.to(device)
49
+
50
+ image = transform(Image.open(args.image)).unsqueeze(0).to(device)
51
+
52
+ res = inference(image, model)
53
+ print("Image Tags: ", res[0])
54
+ print("图像标签: ", res[1])
external/Grounded-Segment-Anything/recognize-anything/inference_ram_openset.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * The Recognize Anything Model (RAM) inference on unseen classes
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import numpy as np
7
+ import random
8
+
9
+ import torch
10
+
11
+ from PIL import Image
12
+ from ram.models import ram
13
+ from ram import inference_ram_openset as inference
14
+ from ram import get_transform
15
+
16
+ from ram.utils import build_openset_label_embedding
17
+ from torch import nn
18
+
19
+ parser = argparse.ArgumentParser(
20
+ description='Tag2Text inferece for tagging and captioning')
21
+ parser.add_argument('--image',
22
+ metavar='DIR',
23
+ help='path to dataset',
24
+ default='images/openset_example.jpg')
25
+ parser.add_argument('--pretrained',
26
+ metavar='DIR',
27
+ help='path to pretrained model',
28
+ default='pretrained/ram_swin_large_14m.pth')
29
+ parser.add_argument('--image-size',
30
+ default=384,
31
+ type=int,
32
+ metavar='N',
33
+ help='input image size (default: 448)')
34
+
35
+
36
+ if __name__ == "__main__":
37
+
38
+ args = parser.parse_args()
39
+
40
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
41
+
42
+ transform = get_transform(image_size=args.image_size)
43
+
44
+ #######load model
45
+ model = ram(pretrained=args.pretrained,
46
+ image_size=args.image_size,
47
+ vit='swin_l')
48
+
49
+ #######set openset interference
50
+ openset_label_embedding, openset_categories = build_openset_label_embedding()
51
+
52
+ model.tag_list = np.array(openset_categories)
53
+
54
+ model.label_embed = nn.Parameter(openset_label_embedding.float())
55
+
56
+ model.num_class = len(openset_categories)
57
+ # the threshold for unseen categories is often lower
58
+ model.class_threshold = torch.ones(model.num_class) * 0.5
59
+ #######
60
+
61
+ model.eval()
62
+
63
+ model = model.to(device)
64
+
65
+ image = transform(Image.open(args.image)).unsqueeze(0).to(device)
66
+
67
+ res = inference(image, model)
68
+ print("Image Tags: ", res)
external/Grounded-Segment-Anything/recognize-anything/inference_ram_plus.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * The Recognize Anything Plus Model (RAM++)
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import numpy as np
7
+ import random
8
+
9
+ import torch
10
+
11
+ from PIL import Image
12
+ from ram.models import ram_plus
13
+ from ram import inference_ram as inference
14
+ from ram import get_transform
15
+
16
+
17
+ parser = argparse.ArgumentParser(
18
+ description='Tag2Text inferece for tagging and captioning')
19
+ parser.add_argument('--image',
20
+ metavar='DIR',
21
+ help='path to dataset',
22
+ default='images/demo/demo1.jpg')
23
+ parser.add_argument('--pretrained',
24
+ metavar='DIR',
25
+ help='path to pretrained model',
26
+ default='pretrained/ram_plus_swin_large_14m.pth')
27
+ parser.add_argument('--image-size',
28
+ default=384,
29
+ type=int,
30
+ metavar='N',
31
+ help='input image size (default: 448)')
32
+
33
+
34
+ if __name__ == "__main__":
35
+
36
+ args = parser.parse_args()
37
+
38
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
+
40
+ transform = get_transform(image_size=args.image_size)
41
+
42
+ #######load model
43
+ model = ram_plus(pretrained=args.pretrained,
44
+ image_size=args.image_size,
45
+ vit='swin_l')
46
+ model.eval()
47
+
48
+ model = model.to(device)
49
+
50
+ image = transform(Image.open(args.image)).unsqueeze(0).to(device)
51
+
52
+ res = inference(image, model)
53
+ print("Image Tags: ", res[0])
54
+ print("图像标签: ", res[1])
external/Grounded-Segment-Anything/recognize-anything/inference_ram_plus_openset.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * The Recognize Anything Plus Model (RAM++) inference on unseen classes
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import numpy as np
7
+ import random
8
+
9
+ import torch
10
+
11
+ from PIL import Image
12
+ from ram.models import ram_plus
13
+ from ram import inference_ram_openset as inference
14
+ from ram import get_transform
15
+
16
+ from ram.utils import build_openset_llm_label_embedding
17
+ from torch import nn
18
+ import json
19
+
20
+ parser = argparse.ArgumentParser(
21
+ description='Tag2Text inferece for tagging and captioning')
22
+ parser.add_argument('--image',
23
+ metavar='DIR',
24
+ help='path to dataset',
25
+ default='images/openset_example.jpg')
26
+ parser.add_argument('--pretrained',
27
+ metavar='DIR',
28
+ help='path to pretrained model',
29
+ default='pretrained/ram_plus_swin_large_14m.pth')
30
+ parser.add_argument('--image-size',
31
+ default=384,
32
+ type=int,
33
+ metavar='N',
34
+ help='input image size (default: 448)')
35
+ parser.add_argument('--llm_tag_des',
36
+ metavar='DIR',
37
+ help='path to LLM tag descriptions',
38
+ default='datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json')
39
+
40
+ if __name__ == "__main__":
41
+
42
+ args = parser.parse_args()
43
+
44
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
45
+
46
+ transform = get_transform(image_size=args.image_size)
47
+
48
+ #######load model
49
+ model = ram_plus(pretrained=args.pretrained,
50
+ image_size=args.image_size,
51
+ vit='swin_l')
52
+
53
+ #######set openset interference
54
+
55
+ print('Building tag embedding:')
56
+ with open(args.llm_tag_des, 'rb') as fo:
57
+ llm_tag_des = json.load(fo)
58
+ openset_label_embedding, openset_categories = build_openset_llm_label_embedding(llm_tag_des)
59
+
60
+ model.tag_list = np.array(openset_categories)
61
+
62
+ model.label_embed = nn.Parameter(openset_label_embedding.float())
63
+
64
+ model.num_class = len(openset_categories)
65
+ # the threshold for unseen categories is often lower
66
+ model.class_threshold = torch.ones(model.num_class) * 0.5
67
+ #######
68
+
69
+ model.eval()
70
+
71
+ model = model.to(device)
72
+
73
+ image = transform(Image.open(args.image)).unsqueeze(0).to(device)
74
+
75
+ res = inference(image, model)
76
+ print("Image Tags: ", res)
external/Grounded-Segment-Anything/recognize-anything/inference_tag2text.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * The Tag2Text Model
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import numpy as np
7
+ import random
8
+
9
+ import torch
10
+
11
+ from PIL import Image
12
+ from ram.models import tag2text
13
+ from ram import inference_tag2text as inference
14
+ from ram import get_transform
15
+
16
+
17
+ parser = argparse.ArgumentParser(
18
+ description='Tag2Text inferece for tagging and captioning')
19
+ parser.add_argument('--image',
20
+ metavar='DIR',
21
+ help='path to dataset',
22
+ default='images/1641173_2291260800.jpg')
23
+ parser.add_argument('--pretrained',
24
+ metavar='DIR',
25
+ help='path to pretrained model',
26
+ default='pretrained/tag2text_swin_14m.pth')
27
+ parser.add_argument('--image-size',
28
+ default=384,
29
+ type=int,
30
+ metavar='N',
31
+ help='input image size (default: 384)')
32
+ parser.add_argument('--thre',
33
+ default=0.68,
34
+ type=float,
35
+ metavar='N',
36
+ help='threshold value')
37
+ parser.add_argument('--specified-tags',
38
+ default='None',
39
+ help='User input specified tags')
40
+
41
+
42
+ if __name__ == "__main__":
43
+
44
+ args = parser.parse_args()
45
+
46
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
47
+
48
+ transform = get_transform(image_size=args.image_size)
49
+
50
+ # delete some tags that may disturb captioning
51
+ # 127: "quarter"; 2961: "back", 3351: "two"; 3265: "three"; 3338: "four"; 3355: "five"; 3359: "one"
52
+ delete_tag_index = [127,2961, 3351, 3265, 3338, 3355, 3359]
53
+
54
+ #######load model
55
+ model = tag2text(pretrained=args.pretrained,
56
+ image_size=args.image_size,
57
+ vit='swin_b',
58
+ delete_tag_index=delete_tag_index)
59
+ model.threshold = args.thre # threshold for tagging
60
+ model.eval()
61
+
62
+ model = model.to(device)
63
+
64
+ image = transform(Image.open(args.image)).unsqueeze(0).to(device)
65
+
66
+ res = inference(image, model, args.specified_tags)
67
+ print("Model Identified Tags: ", res[0])
68
+ print("User Specified Tags: ", res[1])
69
+ print("Image Caption: ", res[2])
external/Grounded-Segment-Anything/recognize-anything/pretrain.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * RAM++ & RAM & Tag2Text pretrain
3
+ * Written by Xinyu Huang
4
+ '''
5
+ import argparse
6
+ import os
7
+ import ruamel.yaml as yaml
8
+ import numpy as np
9
+ import random
10
+ import time
11
+ import datetime
12
+ import json
13
+ from pathlib import Path
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ import torch.backends.cudnn as cudnn
19
+ import torch.distributed as dist
20
+ from torch.utils.data import DataLoader
21
+
22
+ from ram.models import ram_plus, ram, tag2text
23
+ import utils
24
+ from utils import warmup_lr_schedule, step_lr_schedule
25
+ from ram.data import create_dataset, create_sampler, create_loader
26
+
27
+ import clip
28
+
29
+ def build_text_embed(model_clip, caption):
30
+ run_on_gpu = torch.cuda.is_available()
31
+ with torch.no_grad():
32
+
33
+ texts = clip.tokenize(caption,truncate = True) # tokenize
34
+ if run_on_gpu:
35
+ texts = texts.cuda()
36
+ model_clip = model_clip.cuda()
37
+ text_embeddings = model_clip.encode_text(texts)
38
+ text_embeddings /= text_embeddings.norm(dim=-1, keepdim=True)
39
+ # text_embedding = text_embeddings.mean(dim=0)
40
+ # text_embedding /= text_embedding.norm()
41
+ return text_embeddings
42
+
43
+
44
+
45
+ def train_ram_plus(model, data_loader, optimizer, epoch, device, config, model_clip):
46
+ # train
47
+ model.train()
48
+
49
+ metric_logger = utils.MetricLogger(delimiter=" ")
50
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
51
+ metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
52
+ metric_logger.add_meter('loss_dis', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
53
+ metric_logger.add_meter('loss_alignment', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
54
+
55
+ header = 'Train Epoch: [{}]'.format(epoch)
56
+ print_freq = 50
57
+
58
+ data_loader.sampler.set_epoch(epoch)
59
+
60
+ for i, (image, caption, image_tag, parse_tag) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
61
+
62
+ if epoch==0:
63
+ warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
64
+
65
+ optimizer.zero_grad()
66
+
67
+ batch_text_embed = build_text_embed(model_clip,caption)
68
+
69
+ image = image.to(device,non_blocking=True)
70
+
71
+ with torch.no_grad():
72
+ clip_image_feature = model_clip.encode_image(image)
73
+
74
+ loss_tag, loss_dis, loss_alignment = model(image, caption, image_tag, clip_image_feature, batch_text_embed)
75
+ loss = loss_tag + loss_dis + loss_alignment
76
+
77
+ loss.backward()
78
+ optimizer.step()
79
+
80
+ metric_logger.update(loss_tag=loss_tag.item())
81
+ metric_logger.update(loss_dis=loss_dis.item())
82
+ metric_logger.update(loss_alignment=loss_alignment.item())
83
+ metric_logger.update(lr=optimizer.param_groups[0]["lr"])
84
+
85
+
86
+ # gather the stats from all processes
87
+ metric_logger.synchronize_between_processes()
88
+ print("Averaged stats:", metric_logger.global_avg())
89
+ return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
90
+
91
+
92
+
93
+ def train_ram(model, data_loader, optimizer, epoch, device, config, model_clip):
94
+ # train
95
+ model.train()
96
+
97
+ metric_logger = utils.MetricLogger(delimiter=" ")
98
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
99
+ metric_logger.add_meter('loss_t2t', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
100
+ metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
101
+ metric_logger.add_meter('loss_dis', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
102
+
103
+ header = 'Train Epoch: [{}]'.format(epoch)
104
+ print_freq = 50
105
+
106
+ data_loader.sampler.set_epoch(epoch)
107
+
108
+ for i, (image, caption, image_tag, parse_tag) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
109
+
110
+ if epoch==0:
111
+ warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
112
+
113
+ optimizer.zero_grad()
114
+
115
+ image = image.to(device,non_blocking=True)
116
+
117
+ with torch.no_grad():
118
+ clip_image_feature = model_clip.encode_image(image)
119
+
120
+ loss_t2t, loss_tag, loss_dis = model(image, caption, image_tag, parse_tag, clip_image_feature)
121
+ loss = loss_t2t + loss_tag/(loss_tag/loss_t2t).detach() + loss_dis
122
+
123
+ loss.backward()
124
+ optimizer.step()
125
+
126
+ metric_logger.update(loss_t2t=loss_t2t.item())
127
+ metric_logger.update(loss_tag=loss_tag.item())
128
+ metric_logger.update(loss_dis=loss_dis.item())
129
+ metric_logger.update(lr=optimizer.param_groups[0]["lr"])
130
+
131
+
132
+ # gather the stats from all processes
133
+ metric_logger.synchronize_between_processes()
134
+ print("Averaged stats:", metric_logger.global_avg())
135
+ return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
136
+
137
+
138
+ def train_tag2text(model, data_loader, optimizer, epoch, device, config):
139
+ # train
140
+ model.train()
141
+
142
+ metric_logger = utils.MetricLogger(delimiter=" ")
143
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
144
+ metric_logger.add_meter('loss_t2t', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
145
+ metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
146
+
147
+ header = 'Train Epoch: [{}]'.format(epoch)
148
+ print_freq = 50
149
+
150
+ data_loader.sampler.set_epoch(epoch)
151
+
152
+ for i, (image, caption, _, parse_tag) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
153
+
154
+ if epoch==0:
155
+ warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
156
+
157
+ optimizer.zero_grad()
158
+
159
+ image = image.to(device,non_blocking=True)
160
+
161
+ loss_t2t, loss_tag = model(image, caption, parse_tag)
162
+ loss = loss_t2t + loss_tag/(loss_tag/loss_t2t).detach()
163
+
164
+ loss.backward()
165
+ optimizer.step()
166
+
167
+ metric_logger.update(loss_t2t=loss_t2t.item())
168
+ metric_logger.update(loss_tag=loss_tag.item())
169
+ metric_logger.update(lr=optimizer.param_groups[0]["lr"])
170
+
171
+
172
+ # gather the stats from all processes
173
+ metric_logger.synchronize_between_processes()
174
+ print("Averaged stats:", metric_logger.global_avg())
175
+ return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
176
+
177
+
178
+ def main(args, config):
179
+ utils.init_distributed_mode(args)
180
+
181
+ device = torch.device(args.device)
182
+
183
+ # fix the seed for reproducibility
184
+ seed = args.seed + utils.get_rank()
185
+ torch.manual_seed(seed)
186
+ np.random.seed(seed)
187
+ random.seed(seed)
188
+ cudnn.benchmark = True
189
+
190
+ #### Dataset ####
191
+ print("Creating dataset")
192
+ datasets = [create_dataset('pretrain', config, min_scale=0.2)]
193
+ print('number of training samples: %d'%len(datasets[0]))
194
+
195
+ num_tasks = utils.get_world_size()
196
+ global_rank = utils.get_rank()
197
+ samplers = create_sampler(datasets, [True], num_tasks, global_rank)
198
+
199
+ data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
200
+
201
+ #### Model ####
202
+ if args.model_type == 'ram_plus':
203
+ print("Creating pretrained CLIP model")
204
+ model_clip, _ = clip.load("ViT-B/16", device=device)
205
+
206
+ print("Creating RAM model")
207
+ model = ram_plus(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
208
+ vit_ckpt_layer=config['vit_ckpt_layer'], stage = 'train_from_scratch')
209
+
210
+ elif args.model_type == 'ram':
211
+ print("Creating pretrained CLIP model")
212
+ model_clip, _ = clip.load("ViT-B/16", device=device)
213
+
214
+ print("Creating RAM model")
215
+ model = ram(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
216
+ vit_ckpt_layer=config['vit_ckpt_layer'], stage = 'train_from_scratch')
217
+
218
+ elif args.model_type == 'tag2text':
219
+ print("Creating Tag2Text model")
220
+ model = tag2text(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
221
+ vit_ckpt_layer=config['vit_ckpt_layer'], stage = 'train_from_scratch', tag_list='ram/data/ram_tag_list.txt')
222
+ model = model.to(device)
223
+
224
+ ### Frozen CLIP model ###
225
+ model_clip = model_clip.to(device)
226
+ for _, param in model_clip.named_parameters():
227
+ param.requires_grad = False
228
+
229
+ ### Frozen label embedding for open-set recogniztion ###
230
+ model.label_embed.requires_grad = False
231
+ optimizer = torch.optim.AdamW(filter(lambda x: x.requires_grad, model.parameters()), lr=config['init_lr'], weight_decay=config['weight_decay'])
232
+
233
+ start_epoch = 0
234
+ if args.checkpoint:
235
+ checkpoint = torch.load(args.checkpoint, map_location='cpu')
236
+ state_dict = checkpoint['model']
237
+ model.load_state_dict(state_dict)
238
+
239
+ optimizer.load_state_dict(checkpoint['optimizer'])
240
+ start_epoch = checkpoint['epoch']+1
241
+ print('resume checkpoint from %s'%args.checkpoint)
242
+
243
+ model_without_ddp = model
244
+ if args.distributed:
245
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
246
+ model_without_ddp = model.module
247
+
248
+ print("Start training")
249
+ start_time = time.time()
250
+ for epoch in range(start_epoch, config['max_epoch']):
251
+
252
+ step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
253
+
254
+ if args.model_type == 'ram_plus':
255
+ train_stats = train_ram_plus(model, data_loader, optimizer, epoch, device, config, model_clip)
256
+ elif args.model_type == 'ram':
257
+ train_stats = train_ram(model, data_loader, optimizer, epoch, device, config, model_clip)
258
+ elif args.model_type == 'tag2text':
259
+ train_stats = train_tag2text(model, data_loader, optimizer, epoch, device, config)
260
+
261
+ if utils.is_main_process():
262
+ log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
263
+ 'epoch': epoch,
264
+ }
265
+ save_obj = {
266
+ 'model': model_without_ddp.state_dict(),
267
+ 'optimizer': optimizer.state_dict(),
268
+ 'config': config,
269
+ 'epoch': epoch,
270
+ }
271
+ torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
272
+
273
+ with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
274
+ f.write(json.dumps(log_stats) + "\n")
275
+
276
+ dist.barrier()
277
+
278
+ total_time = time.time() - start_time
279
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
280
+ print('Training time {}'.format(total_time_str))
281
+
282
+
283
+ if __name__ == '__main__':
284
+ parser = argparse.ArgumentParser()
285
+ parser.add_argument('--config', default='./configs/pretrain.yaml')
286
+ parser.add_argument("--model-type",type=str,choices=("ram_plus", "ram", "tag2text"),required=True)
287
+ parser.add_argument('--output-dir', default='output/Pretrain')
288
+ parser.add_argument('--checkpoint', default='')
289
+ parser.add_argument('--evaluate', action='store_true')
290
+ parser.add_argument('--device', default='cuda')
291
+ parser.add_argument('--seed', default=42, type=int)
292
+ parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
293
+ parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
294
+ parser.add_argument('--distributed', default=True, type=bool)
295
+ args = parser.parse_args()
296
+
297
+ config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
298
+
299
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
300
+
301
+ yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
302
+
303
+ main(args, config)
external/Grounded-Segment-Anything/recognize-anything/recognize_anything_demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
external/Grounded-Segment-Anything/recognize-anything/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ timm==0.4.12
2
+ transformers>=4.25.1
3
+ fairscale==0.4.4
4
+ pycocoevalcap
5
+ torch
6
+ torchvision
7
+ Pillow
8
+ scipy
9
+ clip @ git+https://github.com/openai/CLIP.git
external/Grounded-Segment-Anything/recognize-anything/setup.cfg ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [metadata]
2
+ name = ram
3
+ version = 0.0.1
4
+ description = Recognize Anything Plus Model, Recognize Anything Model and Tag2Text Model
5
+
6
+ [options]
7
+ packages = find:
8
+ include_package_data = True
9
+
10
+ [options.packages.find]
11
+ exclude =
12
+ datasets
13
+ images
14
+ outputs
15
+ pretrained
external/Grounded-Segment-Anything/recognize-anything/setup.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import setuptools
2
+ setuptools.setup()
external/Grounded-Segment-Anything/recognize-anything/utils.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
3
+ """Decay the learning rate"""
4
+ lr = (init_lr - min_lr) * 0.5 * (1. + math.cos(math.pi * epoch / max_epoch)) + min_lr
5
+ for param_group in optimizer.param_groups:
6
+ param_group['lr'] = lr
7
+
8
+ def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
9
+ """Warmup the learning rate"""
10
+ lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max_step)
11
+ for param_group in optimizer.param_groups:
12
+
13
+ param_group['lr'] = lr
14
+
15
+ def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
16
+ """Decay the learning rate"""
17
+ lr = max(min_lr, init_lr * (decay_rate**epoch))
18
+ for param_group in optimizer.param_groups:
19
+ param_group['lr'] = lr
20
+
21
+ import numpy as np
22
+ import io
23
+ import os
24
+ import time
25
+ from collections import defaultdict, deque
26
+ import datetime
27
+
28
+ import torch
29
+ import torch.distributed as dist
30
+
31
+ class SmoothedValue(object):
32
+ """Track a series of values and provide access to smoothed values over a
33
+ window or the global series average.
34
+ """
35
+
36
+ def __init__(self, window_size=20, fmt=None):
37
+ if fmt is None:
38
+ fmt = "{median:.4f} ({global_avg:.4f})"
39
+ self.deque = deque(maxlen=window_size)
40
+ self.total = 0.0
41
+ self.count = 0
42
+ self.fmt = fmt
43
+
44
+ def update(self, value, n=1):
45
+ self.deque.append(value)
46
+ self.count += n
47
+ self.total += value * n
48
+
49
+ def synchronize_between_processes(self):
50
+ """
51
+ Warning: does not synchronize the deque!
52
+ """
53
+ if not is_dist_avail_and_initialized():
54
+ return
55
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
56
+ dist.barrier()
57
+ dist.all_reduce(t)
58
+ t = t.tolist()
59
+ self.count = int(t[0])
60
+ self.total = t[1]
61
+
62
+ @property
63
+ def median(self):
64
+ d = torch.tensor(list(self.deque))
65
+ return d.median().item()
66
+
67
+ @property
68
+ def avg(self):
69
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
70
+ return d.mean().item()
71
+
72
+ @property
73
+ def global_avg(self):
74
+ return self.total / self.count
75
+
76
+ @property
77
+ def max(self):
78
+ return max(self.deque)
79
+
80
+ @property
81
+ def value(self):
82
+ return self.deque[-1]
83
+
84
+ def __str__(self):
85
+ return self.fmt.format(
86
+ median=self.median,
87
+ avg=self.avg,
88
+ global_avg=self.global_avg,
89
+ max=self.max,
90
+ value=self.value)
91
+
92
+
93
+ class MetricLogger(object):
94
+ def __init__(self, delimiter="\t"):
95
+ self.meters = defaultdict(SmoothedValue)
96
+ self.delimiter = delimiter
97
+
98
+ def update(self, **kwargs):
99
+ for k, v in kwargs.items():
100
+ if isinstance(v, torch.Tensor):
101
+ v = v.item()
102
+ assert isinstance(v, (float, int))
103
+ self.meters[k].update(v)
104
+
105
+ def __getattr__(self, attr):
106
+ if attr in self.meters:
107
+ return self.meters[attr]
108
+ if attr in self.__dict__:
109
+ return self.__dict__[attr]
110
+ raise AttributeError("'{}' object has no attribute '{}'".format(
111
+ type(self).__name__, attr))
112
+
113
+ def __str__(self):
114
+ loss_str = []
115
+ for name, meter in self.meters.items():
116
+ loss_str.append(
117
+ "{}: {}".format(name, str(meter))
118
+ )
119
+ return self.delimiter.join(loss_str)
120
+
121
+ def global_avg(self):
122
+ loss_str = []
123
+ for name, meter in self.meters.items():
124
+ loss_str.append(
125
+ "{}: {:.4f}".format(name, meter.global_avg)
126
+ )
127
+ return self.delimiter.join(loss_str)
128
+
129
+ def synchronize_between_processes(self):
130
+ for meter in self.meters.values():
131
+ meter.synchronize_between_processes()
132
+
133
+ def add_meter(self, name, meter):
134
+ self.meters[name] = meter
135
+
136
+ def log_every(self, iterable, print_freq, header=None):
137
+ i = 0
138
+ if not header:
139
+ header = ''
140
+ start_time = time.time()
141
+ end = time.time()
142
+ iter_time = SmoothedValue(fmt='{avg:.4f}')
143
+ data_time = SmoothedValue(fmt='{avg:.4f}')
144
+ space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
145
+ log_msg = [
146
+ header,
147
+ '[{0' + space_fmt + '}/{1}]',
148
+ 'eta: {eta}',
149
+ '{meters}',
150
+ 'time: {time}',
151
+ 'data: {data}'
152
+ ]
153
+ if torch.cuda.is_available():
154
+ log_msg.append('max mem: {memory:.0f}')
155
+ log_msg = self.delimiter.join(log_msg)
156
+ MB = 1024.0 * 1024.0
157
+ for obj in iterable:
158
+ data_time.update(time.time() - end)
159
+ yield obj
160
+ iter_time.update(time.time() - end)
161
+ if i % print_freq == 0 or i == len(iterable) - 1:
162
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
163
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
164
+ if torch.cuda.is_available():
165
+ print(log_msg.format(
166
+ i, len(iterable), eta=eta_string,
167
+ meters=str(self),
168
+ time=str(iter_time), data=str(data_time),
169
+ memory=torch.cuda.max_memory_allocated() / MB))
170
+ else:
171
+ print(log_msg.format(
172
+ i, len(iterable), eta=eta_string,
173
+ meters=str(self),
174
+ time=str(iter_time), data=str(data_time)))
175
+ i += 1
176
+ end = time.time()
177
+ total_time = time.time() - start_time
178
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
179
+ print('{} Total time: {} ({:.4f} s / it)'.format(
180
+ header, total_time_str, total_time / len(iterable)))
181
+
182
+
183
+ class AttrDict(dict):
184
+ def __init__(self, *args, **kwargs):
185
+ super(AttrDict, self).__init__(*args, **kwargs)
186
+ self.__dict__ = self
187
+
188
+
189
+ def compute_acc(logits, label, reduction='mean'):
190
+ ret = (torch.argmax(logits, dim=1) == label).float()
191
+ if reduction == 'none':
192
+ return ret.detach()
193
+ elif reduction == 'mean':
194
+ return ret.mean().item()
195
+
196
+ def compute_n_params(model, return_str=True):
197
+ tot = 0
198
+ for p in model.parameters():
199
+ w = 1
200
+ for x in p.shape:
201
+ w *= x
202
+ tot += w
203
+ if return_str:
204
+ if tot >= 1e6:
205
+ return '{:.1f}M'.format(tot / 1e6)
206
+ else:
207
+ return '{:.1f}K'.format(tot / 1e3)
208
+ else:
209
+ return tot
210
+
211
+ def setup_for_distributed(is_master):
212
+ """
213
+ This function disables printing when not in master process
214
+ """
215
+ import builtins as __builtin__
216
+ builtin_print = __builtin__.print
217
+
218
+ def print(*args, **kwargs):
219
+ force = kwargs.pop('force', False)
220
+ if is_master or force:
221
+ builtin_print(*args, **kwargs)
222
+
223
+ __builtin__.print = print
224
+
225
+
226
+ def is_dist_avail_and_initialized():
227
+ if not dist.is_available():
228
+ return False
229
+ if not dist.is_initialized():
230
+ return False
231
+ return True
232
+
233
+
234
+ def get_world_size():
235
+ if not is_dist_avail_and_initialized():
236
+ return 1
237
+ return dist.get_world_size()
238
+
239
+
240
+ def get_rank():
241
+ if not is_dist_avail_and_initialized():
242
+ return 0
243
+ return dist.get_rank()
244
+
245
+
246
+ def is_main_process():
247
+ return get_rank() == 0
248
+
249
+
250
+ def save_on_master(*args, **kwargs):
251
+ if is_main_process():
252
+ torch.save(*args, **kwargs)
253
+
254
+
255
+ def init_distributed_mode(args):
256
+ if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
257
+ args.rank = int(os.environ["RANK"])
258
+ args.world_size = int(os.environ['WORLD_SIZE'])
259
+ args.gpu = int(os.environ['LOCAL_RANK'])
260
+ elif 'SLURM_PROCID' in os.environ:
261
+ args.rank = int(os.environ['SLURM_PROCID'])
262
+ args.gpu = args.rank % torch.cuda.device_count()
263
+ else:
264
+ print('Not using distributed mode')
265
+ args.distributed = False
266
+ return
267
+
268
+ args.distributed = True
269
+
270
+ torch.cuda.set_device(args.gpu)
271
+ args.dist_backend = 'nccl'
272
+ print('| distributed init (rank {}, word {}): {}'.format(
273
+ args.rank, args.world_size, args.dist_url), flush=True)
274
+ torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
275
+ world_size=args.world_size, rank=args.rank)
276
+ torch.distributed.barrier()
277
+ setup_for_distributed(args.rank == 0)
278
+
279
+
external/Grounded-Segment-Anything/voxelnext_3d_box/README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3D-Box via Segment Anything
2
+
3
+ We extend [Segment Anything](https://github.com/facebookresearch/segment-anything) to 3D perception by combining it with [VoxelNeXt](https://github.com/dvlab-research/VoxelNeXt). Note that this project is still in progress. We are improving it and developing more examples. Any issue or pull request is welcome!
4
+
5
+ <p align="center"> <img src="images/sam-voxelnext.png" width="100%"> </p>
6
+
7
+ ## Why this project?
8
+ [Segment Anything](https://github.com/facebookresearch/segment-anything) and its following projects
9
+ focus on 2D images. In this project, we extend the scope to 3D world by combining [Segment Anything](https://github.com/facebookresearch/segment-anything) and [VoxelNeXt](https://github.com/dvlab-research/VoxelNeXt). When we provide a prompt (e.g., a point / box), the result is not only 2D segmentation mask, but also 3D boxes.
10
+
11
+ The core idea is that [VoxelNeXt](https://github.com/dvlab-research/VoxelNeXt) is a fully sparse 3D detector. It predicts 3D object upon each sparse voxel. We project 3D sparse voxels onto 2D images. And then 3D boxes can be generated for voxels in the SAM mask.
12
+
13
+ - This project makes 3D object detection to be promptable.
14
+ - VoxelNeXt is based on sparse voxels that are easy to be related to the mask generated from segment anything.
15
+ - This project could facilitate 3D box labeling. 3D box can be obtained via a simple click on image. It might largely save human efforts, especially on autonuous driving scenes.
16
+
17
+ ## Installation
18
+ 1. Basic requirements
19
+ `pip install -r requirements.txt
20
+ `
21
+ 2. Segment anything
22
+ `pip install git+https://github.com/facebookresearch/segment-anything.git
23
+ `
24
+ 3. spconv
25
+ `pip install spconv
26
+ `
27
+ or cuda version spconv `pip install spconv-cu111` based on your cuda version. Please use spconv 2.2 / 2.3 version, for example spconv==2.3.5
28
+
29
+
30
+ ## Getting Started
31
+ Please try it via [seg_anything_and_3D.ipynb](seg_anything_and_3D.ipynb).
32
+ We provide this example on nuScenes dataset. You can use other image-points pairs.
33
+
34
+ - The demo point for one frame is provided here [points_demo.npy](https://drive.google.com/file/d/1br0VDamameu7B1G1p4HEjj6LshGs5dHB/view?usp=share_link).
35
+ - The point to image translation infos on nuScenes val can be download [here](https://drive.google.com/file/d/1nJqdfs0gMTIo4fjOwytSbM0fdBOJ4IGb/view?usp=share_link).
36
+ - The weight in the demo is [voxelnext_nuscenes_kernel1.pth](https://drive.google.com/file/d/17mQRXXUsaD0dlRzAKep3MQjfj8ugDsp9/view?usp=share_link).
37
+ - The nuScenes info file is [nuscenes_infos_10sweeps_val.pkl](https://drive.google.com/file/d/1Kaxtubzr1GofcoFz97S6qwAIG2wzhPo_/view?usp=share_link). This is generated from [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) codebase.
38
+
39
+
40
+ <p align="center"> <img src="images/mask_box.png" width="100%"> </p>
41
+ <p align="center"> <img src="images/image_boxes1.png" width="100%"> </p>
42
+ <p align="center"> <img src="images/image_boxes2.png" width="100%"> </p>
43
+ <p align="center"> <img src="images/image_boxes3.png" width="100%"> </p>
44
+
45
+ ## TODO List
46
+ - - [ ] Zero-shot version VoxelNeXt.
47
+ - - [ ] Examples on more datasets.
48
+ - - [ ] Indoor scenes.
49
+
50
+ ## Citation
51
+ If you find this project useful in your research, please consider citing:
52
+ ```
53
+ @article{kirillov2023segany,
54
+ title={Segment Anything},
55
+ author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross},
56
+ journal={arXiv:2304.02643},
57
+ year={2023}
58
+ }
59
+
60
+ @inproceedings{chen2023voxenext,
61
+ title={VoxelNeXt: Fully Sparse VoxelNet for 3D Object Detection and Tracking},
62
+ author={Yukang Chen and Jianhui Liu and Xiangyu Zhang and Xiaojuan Qi and Jiaya Jia},
63
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
64
+ year={2023}
65
+ }
66
+
67
+ ```
68
+
69
+ ## Acknowledgement
70
+ - [Segment Anything](https://github.com/facebookresearch/segment-anything)
71
+ - [VoxelNeXt](https://github.com/dvlab-research/VoxelNeXt)
72
+ - [UVTR](https://github.com/dvlab-research/UVTR) for 3D to 2D translation.
external/Grounded-Segment-Anything/voxelnext_3d_box/__init__.py ADDED
File without changes
external/Grounded-Segment-Anything/voxelnext_3d_box/config.yaml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SAM_TYPE: "vit_h"
2
+ SAM_CHECKPOINT: "sam_vit_h_4b8939.pth"
3
+
4
+ POINT_CLOUD_RANGE: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
5
+ USED_FEATURE_LIST: ['x', 'y', 'z', 'intensity', 'timestamp']
6
+ DATA_PROCESSOR:
7
+ - NAME: mask_points_and_boxes_outside_range
8
+ REMOVE_OUTSIDE_BOXES: True
9
+
10
+ - NAME: shuffle_points
11
+ SHUFFLE_ENABLED: {
12
+ 'train': True,
13
+ 'test': True
14
+ }
15
+
16
+ - NAME: transform_points_to_voxels
17
+ VOXEL_SIZE: [0.075, 0.075, 0.2]
18
+ MAX_POINTS_PER_VOXEL: 10
19
+ MAX_NUMBER_OF_VOXELS: {
20
+ 'train': 120000,
21
+ 'test': 160000
22
+ }
23
+
24
+ VOXELNEXT_CHECKPOINT: "voxelnext_nuscenes_kernel1.pth"
25
+ INPUT_CHANNELS: 5
26
+ GRID_SIZE: [1440, 1440, 40]
27
+
28
+ CLASS_NAMES: ['car','truck', 'construction_vehicle', 'bus', 'trailer',
29
+ 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone']
30
+
31
+ KERNEL_SIZE_HEAD: 1
32
+
33
+ VOXEL_SIZE: [0.075, 0.075, 0.2]
34
+ CLASS_NAMES_EACH_HEAD: [
35
+ ['car'],
36
+ ['truck', 'construction_vehicle'],
37
+ ['bus', 'trailer'],
38
+ ['barrier'],
39
+ ['motorcycle', 'bicycle'],
40
+ ['pedestrian', 'traffic_cone'],
41
+ ]
42
+
43
+ SEPARATE_HEAD_CFG:
44
+ HEAD_ORDER: ['center', 'center_z', 'dim', 'rot', 'vel']
45
+ HEAD_DICT: {
46
+ 'center': {'out_channels': 2, 'num_conv': 2},
47
+ 'center_z': {'out_channels': 1, 'num_conv': 2},
48
+ 'dim': {'out_channels': 3, 'num_conv': 2},
49
+ 'rot': {'out_channels': 2, 'num_conv': 2},
50
+ 'vel': {'out_channels': 2, 'num_conv': 2},
51
+ }
52
+
53
+ POST_PROCESSING:
54
+ SCORE_THRESH: 0
55
+ POST_CENTER_LIMIT_RANGE: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
56
+ MAX_OBJ_PER_SAMPLE: 500
external/Grounded-Segment-Anything/voxelnext_3d_box/model.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ from .models.data_processor import DataProcessor
5
+ from .models.mean_vfe import MeanVFE
6
+ from .models.spconv_backbone_voxelnext import VoxelResBackBone8xVoxelNeXt
7
+ from .models.voxelnext_head import VoxelNeXtHead
8
+
9
+ from .utils.image_projection import _proj_voxel_image
10
+ from segment_anything import SamPredictor, sam_model_registry
11
+
12
+ class VoxelNeXt(nn.Module):
13
+ def __init__(self, model_cfg):
14
+ super().__init__()
15
+
16
+ point_cloud_range = np.array(model_cfg.POINT_CLOUD_RANGE, dtype=np.float32)
17
+
18
+ self.data_processor = DataProcessor(
19
+ model_cfg.DATA_PROCESSOR, point_cloud_range=point_cloud_range,
20
+ training=False, num_point_features=len(model_cfg.USED_FEATURE_LIST)
21
+ )
22
+
23
+ input_channels = model_cfg.get('INPUT_CHANNELS', 5)
24
+ grid_size = np.array(model_cfg.get('GRID_SIZE', [1440, 1440, 40]))
25
+
26
+ class_names = model_cfg.get('CLASS_NAMES')
27
+ kernel_size_head = model_cfg.get('KERNEL_SIZE_HEAD', 1)
28
+ self.point_cloud_range = torch.Tensor(model_cfg.get('POINT_CLOUD_RANGE', [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]))
29
+ self.voxel_size = torch.Tensor(model_cfg.get('VOXEL_SIZE', [0.075, 0.075, 0.2]))
30
+ CLASS_NAMES_EACH_HEAD = model_cfg.get('CLASS_NAMES_EACH_HEAD')
31
+ SEPARATE_HEAD_CFG = model_cfg.get('SEPARATE_HEAD_CFG')
32
+ POST_PROCESSING = model_cfg.get('POST_PROCESSING')
33
+ self.voxelization = MeanVFE()
34
+ self.backbone_3d = VoxelResBackBone8xVoxelNeXt(input_channels, grid_size)
35
+ self.dense_head = VoxelNeXtHead(class_names, self.point_cloud_range, self.voxel_size, kernel_size_head,
36
+ CLASS_NAMES_EACH_HEAD, SEPARATE_HEAD_CFG, POST_PROCESSING)
37
+
38
+
39
+ class Model(nn.Module):
40
+ def __init__(self, model_cfg, device="cuda"):
41
+ super().__init__()
42
+
43
+ sam_type = model_cfg.get('SAM_TYPE', "vit_b")
44
+ sam_checkpoint = model_cfg.get('SAM_CHECKPOINT', "/data/sam_vit_b_01ec64.pth")
45
+
46
+ sam = sam_model_registry[sam_type](checkpoint=sam_checkpoint).to(device=device)
47
+ self.sam_predictor = SamPredictor(sam)
48
+
49
+ voxelnext_checkpoint = model_cfg.get('VOXELNEXT_CHECKPOINT', "/data/voxelnext_nuscenes_kernel1.pth")
50
+ model_dict = torch.load(voxelnext_checkpoint)
51
+ self.voxelnext = VoxelNeXt(model_cfg).to(device=device)
52
+ self.voxelnext.load_state_dict(model_dict)
53
+ self.point_features = {}
54
+ self.device = device
55
+
56
+ def image_embedding(self, image):
57
+ self.sam_predictor.set_image(image)
58
+
59
+ def point_embedding(self, data_dict, image_id):
60
+ data_dict = self.voxelnext.data_processor.forward(
61
+ data_dict=data_dict
62
+ )
63
+ data_dict['voxels'] = torch.Tensor(data_dict['voxels']).to(self.device)
64
+ data_dict['voxel_num_points'] = torch.Tensor(data_dict['voxel_num_points']).to(self.device)
65
+ data_dict['voxel_coords'] = torch.Tensor(data_dict['voxel_coords']).to(self.device)
66
+
67
+ data_dict = self.voxelnext.voxelization(data_dict)
68
+ n_voxels = data_dict['voxel_coords'].shape[0]
69
+ device = data_dict['voxel_coords'].device
70
+ dtype = data_dict['voxel_coords'].dtype
71
+ data_dict['voxel_coords'] = torch.cat([torch.zeros((n_voxels, 1), device=device, dtype=dtype), data_dict['voxel_coords']], dim=1)
72
+ data_dict['batch_size'] = 1
73
+
74
+ if not image_id in self.point_features:
75
+ data_dict = self.voxelnext.backbone_3d(data_dict)
76
+ self.point_features[image_id] = data_dict
77
+ else:
78
+ data_dict = self.point_features[image_id]
79
+ pred_dicts = self.voxelnext.dense_head(data_dict)
80
+
81
+ voxel_coords = data_dict['out_voxels'][pred_dicts[0]['voxel_ids'].squeeze(-1)] * self.voxelnext.dense_head.feature_map_stride
82
+
83
+ return pred_dicts, voxel_coords
84
+
85
+ def generate_3D_box(self, lidar2img_rt, mask, voxel_coords, pred_dicts, quality_score=0.1):
86
+ device = voxel_coords.device
87
+ points_image, depth = _proj_voxel_image(voxel_coords, lidar2img_rt, self.voxelnext.voxel_size.to(device), self.voxelnext.point_cloud_range.to(device))
88
+ points = points_image.permute(1, 0).int().cpu().numpy()
89
+ selected_voxels = torch.zeros_like(depth).squeeze(0)
90
+
91
+ for i in range(points.shape[0]):
92
+ point = points[i]
93
+ if point[0] < 0 or point[1] < 0 or point[0] >= mask.shape[1] or point[1] >= mask.shape[0]:
94
+ continue
95
+ if mask[point[1], point[0]]:
96
+ selected_voxels[i] = 1
97
+
98
+ mask_extra = (pred_dicts[0]['pred_scores'] > quality_score)
99
+ if mask_extra.sum() == 0:
100
+ print("no high quality 3D box related.")
101
+ return None
102
+
103
+ selected_voxels *= mask_extra
104
+ if selected_voxels.sum() > 0:
105
+ selected_box_id = pred_dicts[0]['pred_scores'][selected_voxels.bool()].argmax()
106
+ selected_box = pred_dicts[0]['pred_boxes'][selected_voxels.bool()][selected_box_id]
107
+ else:
108
+ grid_x, grid_y = torch.meshgrid(torch.arange(mask.shape[0]), torch.arange(mask.shape[1]))
109
+ mask_x, mask_y = grid_x[mask], grid_y[mask]
110
+ mask_center = torch.Tensor([mask_y.float().mean(), mask_x.float().mean()]).to(
111
+ pred_dicts[0]['pred_boxes'].device).unsqueeze(1)
112
+
113
+ dist = ((points_image - mask_center) ** 2).sum(0)
114
+ selected_id = dist[mask_extra].argmin()
115
+ selected_box = pred_dicts[0]['pred_boxes'][mask_extra][selected_id]
116
+ return selected_box
117
+
118
+ def forward(self, image, point_dict, prompt_point, lidar2img_rt, image_id, quality_score=0.1):
119
+ self.image_embedding(image)
120
+ pred_dicts, voxel_coords = self.point_embedding(point_dict, image_id)
121
+
122
+ masks, scores, _ = self.sam_predictor.predict(point_coords=prompt_point, point_labels=np.array([1]))
123
+ mask = masks[0]
124
+
125
+ box3d = self.generate_3D_box(lidar2img_rt, mask, voxel_coords, pred_dicts, quality_score=quality_score)
126
+ return mask, box3d
127
+
128
+
129
+ if __name__ == '__main__':
130
+ cfg_dataset = 'nuscenes_dataset.yaml'
131
+ cfg_model = 'config.yaml'
132
+
133
+ dataset_cfg = cfg_from_yaml_file(cfg_dataset, cfg)
134
+ model_cfg = cfg_from_yaml_file(cfg_model, cfg)
135
+
136
+ nuscenes_dataset = NuScenesDataset(dataset_cfg)
137
+ model = Model(model_cfg)
138
+
139
+ index = 0
140
+ data_dict = nuscenes_dataset._get_points(index)
141
+ model.point_embedding(data_dict)
142
+
external/Grounded-Segment-Anything/voxelnext_3d_box/requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ torch
3
+ torchvision
4
+ easydict
5
+ pyyaml
6
+ opencv-python
7
+ pycocotools
8
+ matplotlib
9
+ onnxruntime
10
+ onnx
external/PerspectiveFields/.gitattributes ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ models/paramnet_360cities_edina_rpf.pth filter=lfs diff=lfs merge=lfs -text
2
+ models/paramnet_gsv_rpfpp.pth filter=lfs diff=lfs merge=lfs -text
3
+ models/paramnet_gsv_rpf.pth filter=lfs diff=lfs merge=lfs -text
4
+ assets/imgs/ filter=lfs diff=lfs merge=lfs -text
5
+ models/cvpr2023.pth filter=lfs diff=lfs merge=lfs -text
6
+ models/paramnet_360cities_edina_rpfpp.pth filter=lfs diff=lfs merge=lfs -text
external/PerspectiveFields/.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ */__pycache__/*
3
+ .python-version
4
+ *.so
5
+ *.pyc
6
+ *.egg-info/
7
+ *.pth
8
+ */.ipynb_checkpoints/*
9
+ .ipynb_checkpoints/
10
+ flagged/
external/PerspectiveFields/LICENSE ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Adobe Research License Terms
2
+
3
+ 1. You may use, reproduce, modify, and display the research materials provided under this license (the “Research
4
+ Materials”) solely for noncommercial purposes. Noncommercial purposes include academic research, teaching, and
5
+ testing, but do not include commercial licensing or distribution, development of commercial products, or any other
6
+ activity which results in commercial gain. You may not redistribute the Research Materials.
7
+
8
+ 2. You agree to (a) comply with all laws and regulations applicable to your use of the Research Materials under this license,
9
+ including but not limited to any import or export laws; (b) preserve any copyright or other notices from the Research
10
+ Materials; and (c) for any Research Materials in object code, not attempt to modify, reverse engineer, or decompile
11
+ such Research Materials except as permitted by applicable law.
12
+
13
+ 3. THE RESEARCH MATERIALS ARE PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, AND YOU ASSUME ALL RISKS
14
+ ASSOCIATED WITH THEIR USE. IN NO EVENT WILL ANYONE BE LIABLE TO YOU FOR ANY ACTUAL, INCIDENTAL, SPECIAL,
15
+ OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION WITH USE OF THE RESEARCH MATERIALS.
external/PerspectiveFields/README.md ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- omit in toc -->
2
+ Perspective Fields for Single Image Camera Calibration
3
+ ================================================================
4
+ [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/jinlinyi/PerspectiveFields)
5
+
6
+ ### [Project Page](https://jinlinyi.github.io/PerspectiveFields/) | [Paper](https://arxiv.org/abs/2212.03239) | [Live Demo 🤗](https://huggingface.co/spaces/jinlinyi/PerspectiveFields)
7
+
8
+ CVPR 2023 (✨Highlight)
9
+ <h4>
10
+
11
+ [Linyi Jin](https://jinlinyi.github.io/)<sup>1</sup>, [Jianming Zhang](https://jimmie33.github.io/)<sup>2</sup>, [Yannick Hold-Geoffroy](https://yannickhold.com/)<sup>2</sup>, [Oliver Wang](http://www.oliverwang.info/)<sup>2</sup>, [Kevin Matzen](http://kmatzen.com/)<sup>2</sup>, [Matthew Sticha](https://www.linkedin.com/in/matthew-sticha-746325202/)<sup>1</sup>, [David Fouhey](https://web.eecs.umich.edu/~fouhey/)<sup>1</sup>
12
+
13
+ <span style="font-size: 14pt; color: #555555">
14
+ <sup>1</sup>University of Michigan, <sup>2</sup>Adobe Research
15
+ </span>
16
+ </h4>
17
+ <hr>
18
+
19
+ <p align="center">
20
+
21
+ ![alt text](assets/teaser-field.jpg)
22
+ </p>
23
+ We propose Perspective Fields as a representation that models the local perspective properties of an image. Perspective Fields contain per-pixel information about the camera view, parameterized as an up vector and a latitude value.
24
+
25
+ <p align="center">
26
+ <img height="100" alt="swiping-1" src="assets/swiping-1.gif"> <img height="100" alt="swiping-2" src="assets/swiping-2.gif"> <img height="100" alt="swiping-3" src="assets/swiping-3.gif"> <img height="100" alt="swiping-4" src="assets/swiping-4.gif">
27
+ </p>
28
+
29
+ 📷 From Perspective Fields, you can also get camera parameters if you assume certain camera models. We provide models to recover camera roll, pitch, fov and principal point location.
30
+
31
+ <p align="center">
32
+ <img src="assets/vancouver/IMG_2481.jpg" alt="Image 1" height="200px" style="margin-right:10px;">
33
+ <img src="assets/vancouver/pred_pers.png" alt="Image 2" height="200px" style="margin-center:10px;">
34
+ <img src="assets/vancouver/pred_param.png" alt="Image 2" height="200px" style="margin-left:10px;">
35
+ </p>
36
+
37
+ <!-- omit in toc -->
38
+ Updates
39
+ ------------------
40
+ - [April 2024]: 🚀 We've launched an inference version (`main` branch) with minimal dependencies. For training and evaluation, please checkout [`train_eval` branch](https://github.com/jinlinyi/PerspectiveFields/tree/train_eval).
41
+ - [July 2023]: We released a new model trained on [360cities](https://www.360cities.net/) and [EDINA](https://github.com/tien-d/EgoDepthNormal/blob/main/README_dataset.md) dataset, consisting of indoor🏠, outdoor🏙️, natural🌳, and egocentric👋 data!
42
+ - [May 2023]: Live demo released 🤗. https://huggingface.co/spaces/jinlinyi/PerspectiveFields. Thanks Huggingface for funding this demo!
43
+
44
+ <!-- omit in toc -->
45
+ Table of Contents
46
+ ------------------
47
+ - [Environment Setup](#environment-setup)
48
+ - [Inference](#inference)
49
+ - [Train / Eval](#train--eval)
50
+ - [Demo](#demo)
51
+ - [Model Zoo](#model-zoo)
52
+ - [Coordinate Frame](#coordinate-frame)
53
+ - [Camera Parameters to Perspective Fields](#camera-parameters-to-perspective-fields)
54
+ - [Visualize Perspective Fields](#visualize-perspective-fields)
55
+ - [Citation](#citation)
56
+ - [Acknowledgment](#acknowledgment)
57
+
58
+
59
+ [1]: ./docs/environment.md
60
+ [2]: ./jupyter-notebooks/camera2perspective.ipynb
61
+ [3]: ./jupyter-notebooks/predict_perspective_fields.ipynb
62
+ [4]: ./jupyter-notebooks/perspective_paramnet.ipynb
63
+ [5]: ./docs/train.md
64
+ [6]: ./docs/test.md
65
+ [7]: ./docs/models.md
66
+
67
+
68
+
69
+ ## Environment Setup
70
+ ### Inference
71
+ PerspectiveFields requires python >= 3.8 and [PyTorch](https://pytorch.org/).
72
+ | ***Pro tip:*** *use [mamba](https://github.com/conda-forge/miniforge) in place of conda for much faster installs.*
73
+ ```bash
74
+ # install pytorch compatible to your system https://pytorch.org/get-started/previous-versions/
75
+ conda install pytorch=1.10.0 torchvision cudatoolkit=11.3 -c pytorch
76
+ pip install git+https://github.com/jinlinyi/PerspectiveFields.git
77
+ ```
78
+ Alternatively, install the package locally,
79
+ ```bash
80
+ git clone git@github.com:jinlinyi/PerspectiveFields.git
81
+ # create virtual env
82
+ conda create -n perspective python=3.9
83
+ conda activate perspective
84
+ # install pytorch compatible to your system https://pytorch.org/get-started/previous-versions/
85
+ # conda install pytorch torchvision cudatoolkit -c pytorch
86
+ conda install pytorch=1.10.0 torchvision cudatoolkit=11.3 -c pytorch
87
+ # install Perspective Fields.
88
+ cd PerspectiveFields
89
+ pip install -e .
90
+ ```
91
+
92
+ ### Train / Eval
93
+ For training and evaluation, please checkout the [`train_eval` branch](https://github.com/jinlinyi/PerspectiveFields/tree/train_eval).
94
+
95
+
96
+ ## Demo
97
+ Here is a minimal script to run on a single image, see [`demo/demo.py`](demo/demo.py):
98
+ ```python
99
+ import cv2
100
+ from perspective2d import PerspectiveFields
101
+ # specify model version
102
+ version = 'Paramnet-360Cities-edina-centered'
103
+ # load model
104
+ pf_model = PerspectiveFields(version).eval().cuda()
105
+ # load image
106
+ img_bgr = cv2.imread('assets/imgs/cityscape.jpg')
107
+ # inference
108
+ predictions = pf_model.inference(img_bgr=img_bgr)
109
+
110
+ # alternatively, inference a batch of images
111
+ predictions = pf_model.inference_batch(img_bgr_list=[img_bgr_0, img_bgr_1, img_bgr_2])
112
+ ```
113
+ - Or checkout [Live Demo 🤗](https://huggingface.co/spaces/jinlinyi/PerspectiveFields).
114
+ - Notebook to [Predict Perspective Fields](./notebooks/predict_perspective_fields.ipynb).
115
+
116
+
117
+ ## Model Zoo
118
+ | Model Name and Weights | Training Dataset | Config File | Outputs | Expected input |
119
+ | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
120
+ | [NEW][Paramnet-360Cities-edina-centered](https://www.dropbox.com/s/z2dja70bgy007su/paramnet_360cities_edina_rpf.pth) | [360cities](https://www.360cities.net/) and [EDINA](https://github.com/tien-d/EgoDepthNormal/blob/main/README_dataset.md) | [paramnet_360cities_edina_rpf.yaml](models/paramnet_360cities_edina_rpf.yaml) | Perspective Field + camera parameters (roll, pitch, vfov) | Uncropped, indoor🏠, outdoor🏙️, natural🌳, and egocentric👋 data |
121
+ | [NEW][Paramnet-360Cities-edina-uncentered](https://www.dropbox.com/s/nt29e1pi83mm1va/paramnet_360cities_edina_rpfpp.pth) | [360cities](https://www.360cities.net/) and [EDINA](https://github.com/tien-d/EgoDepthNormal/blob/main/README_dataset.md) | [paramnet_360cities_edina_rpfpp.yaml](models/paramnet_360cities_edina_rpfpp.yaml) | Perspective Field + camera parameters (roll, pitch, vfov, cx, cy) | Cropped, indoor🏠, outdoor🏙️, natural🌳, and egocentric👋 data |
122
+ | [PersNet-360Cities](https://www.dropbox.com/s/czqrepqe7x70b7y/cvpr2023.pth) | [360cities](https://www.360cities.net) | [cvpr2023.yaml](models/cvpr2023.yaml) | Perspective Field | Indoor🏠, outdoor🏙️, and natural🌳 data. |
123
+ | [PersNet_paramnet-GSV-centered](https://www.dropbox.com/s/g6xwbgnkggapyeu/paramnet_gsv_rpf.pth) | [GSV](https://research.google/pubs/pub36899/) | [paramnet_gsv_rpf.yaml](models/paramnet_gsv_rpf.yaml) | Perspective Field + camera parameters (roll, pitch, vfov) | Uncropped, street view🏙️ data. |
124
+ | [PersNet_Paramnet-GSV-uncentered](https://www.dropbox.com/s/ufdadxigewakzlz/paramnet_gsv_rpfpp.pth) | [GSV](https://research.google/pubs/pub36899/) | [paramnet_gsv_rpfpp.yaml](models/paramnet_gsv_rpfpp.yaml) | Perspective Field + camera parameters (roll, pitch, vfov, cx, cy) | Cropped, street view🏙️ data. |
125
+
126
+ ## Coordinate Frame
127
+
128
+ <p align="center">
129
+
130
+ ![alt text](assets/coordinate.png)
131
+
132
+ `yaw / azimuth`: camera rotation about the y-axis
133
+ `pitch / elevation`: camera rotation about the x-axis
134
+ `roll`: camera rotation about the z-axis
135
+
136
+ Extrinsics: `rotz(roll).dot(rotx(elevation)).dot(roty(azimuth))`
137
+
138
+ </p>
139
+
140
+
141
+ ## Camera Parameters to Perspective Fields
142
+ Checkout [Jupyter Notebook](./notebooks/camera2perspective.ipynb).
143
+ Perspective Fields can be calculated from camera parameters. If you prefer, you can also manually calculate the corresponding Up-vector and Latitude map by following Equations 1 and 2 in our paper.
144
+ Our code currently supports:
145
+ 1) [Pinhole model](https://hedivision.github.io/Pinhole.html) [Hartley and Zisserman 2004] (Perspective Projection)
146
+ ```python
147
+ from perspective2d.utils.panocam import PanoCam
148
+ # define parameters
149
+ roll = 0
150
+ pitch = 20
151
+ vfov = 70
152
+ width = 640
153
+ height = 480
154
+ # get Up-vectors.
155
+ up = PanoCam.get_up(np.radians(vfov), width, height, np.radians(pitch), np.radians(roll))
156
+ # get Latitude.
157
+ lati = PanoCam.get_lat(np.radians(vfov), width, height, np.radians(pitch), np.radians(roll))
158
+ ```
159
+ 2) [Unified Spherical Model](https://drive.google.com/file/d/1pZgR3wNS6Mvb87W0ixOHmEVV6tcI8d50/view) [Barreto 2006; Mei and Rives 2007] (Distortion).
160
+ ```python
161
+ xi = 0.5 # distortion parameter from Unified Spherical Model
162
+
163
+ x = -np.sin(np.radians(vfov/2))
164
+ z = np.sqrt(1 - x**2)
165
+ f_px_effective = -0.5*(width/2)*(xi+z)/x
166
+ crop, _, _, _, up, lat, xy_map = PanoCam.crop_distortion(equi_img,
167
+ f=f_px_effective,
168
+ xi=xi,
169
+ H=height,
170
+ W=width,
171
+ az=yaw, # degrees
172
+ el=-pitch,
173
+ roll=-roll)
174
+ ```
175
+
176
+ ## Visualize Perspective Fields
177
+ We provide a one-line code to blend Perspective Fields onto input image.
178
+ ```python
179
+ import matplotlib.pyplot as plt
180
+ from perspective2d.utils import draw_perspective_fields
181
+ # Draw up and lati on img. lati is in radians.
182
+ blend = draw_perspective_fields(img, up, lati)
183
+ # visualize with matplotlib
184
+ plt.imshow(blend)
185
+ plt.show()
186
+ ```
187
+ Perspective Fields can serve as an easy visual check for correctness of the camera parameters.
188
+
189
+ - For example, we can visualize the Perspective Fields based on calibration results from this awesome [repo](https://github.com/dompm/spherical-distortion-dataset).
190
+
191
+
192
+ <p align="center">
193
+
194
+ ![alt text](assets/distortion_vis.png)
195
+
196
+ - Left: We plot the perspective fields based on the numbers printed on the image, they look accurate😊;
197
+
198
+ - Mid: If we try a number that is 10% off (0.72*0.9=0.648), we see mismatch in Up directions at the top right corner;
199
+
200
+ - Right: If distortion is 20% off (0.72*0.8=0.576), the mismatch becomes more obvious.
201
+ </p>
202
+
203
+
204
+ Citation
205
+ --------
206
+ If you find this code useful, please consider citing:
207
+
208
+ ```text
209
+ @inproceedings{jin2023perspective,
210
+ title={Perspective Fields for Single Image Camera Calibration},
211
+ author={Linyi Jin and Jianming Zhang and Yannick Hold-Geoffroy and Oliver Wang and Kevin Matzen and Matthew Sticha and David F. Fouhey},
212
+ booktitle = {CVPR},
213
+ year={2023}
214
+ }
215
+ ```
216
+
217
+ Acknowledgment
218
+ --------------
219
+ This work was partially funded by the DARPA Machine Common Sense Program.
220
+ We thank authors from [A Deep Perceptual Measure for Lens and Camera Calibration](https://github.com/dompm/spherical-distortion-dataset) for releasing their code on Unified Spherical Model.
external/PerspectiveFields/demo/demo.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ import os
4
+ import numpy as np
5
+ from perspective2d import PerspectiveFields
6
+ from perspective2d.utils import draw_perspective_fields, draw_from_r_p_f_cx_cy
7
+
8
+
9
+
10
+ def log_results(img_rgb, pred, output_folder, param_on):
11
+ """
12
+ Save perspective field prediction visualizations.
13
+
14
+ Args:
15
+ img_rgb (np.ndarray): The input image in RGB format.
16
+ pred (dict): The model predictions.
17
+ output_folder (str): The path to save the visualizations to.
18
+ param_on (bool): A flag indicating whether to include parameter predictions.
19
+
20
+ Returns:
21
+ None
22
+ """
23
+ def resize_fix_aspect_ratio(img, field, target_width=None, target_height=None):
24
+ """
25
+ Resize image and perspective field to target width or height while maintaining aspect ratio.
26
+ """
27
+ height = img.shape[0]
28
+ width = img.shape[1]
29
+ if target_height is None:
30
+ factor = target_width / width
31
+ elif target_width is None:
32
+ factor = target_height / height
33
+ else:
34
+ factor = max(target_width / width, target_height / height)
35
+ if factor == target_width / width:
36
+ target_height = int(height * factor)
37
+ else:
38
+ target_width = int(width * factor)
39
+
40
+ img = cv2.resize(img, (target_width, target_height))
41
+ for key in field:
42
+ if key not in ["up", "lati"]:
43
+ continue
44
+ tmp = field[key].numpy()
45
+ transpose = len(tmp.shape) == 3
46
+ if transpose:
47
+ tmp = tmp.transpose(1, 2, 0)
48
+ tmp = cv2.resize(tmp, (target_width, target_height))
49
+ if transpose:
50
+ tmp = tmp.transpose(2, 0, 1)
51
+ field[key] = torch.tensor(tmp)
52
+ return img, field
53
+
54
+ os.makedirs(output_folder, exist_ok=True)
55
+ field = {
56
+ "up": pred["pred_gravity_original"].cpu().detach(),
57
+ "lati": pred["pred_latitude_original"].cpu().detach(),
58
+ }
59
+ img_rgb, field = resize_fix_aspect_ratio(img_rgb, field, 640)
60
+ pred_vis = draw_perspective_fields(
61
+ img_rgb, field["up"], torch.deg2rad(field["lati"]), color=(0,1,0), return_img=False
62
+ )
63
+ pred_vis.save(os.path.join(output_folder, "perspective_pred"))
64
+
65
+ if not param_on:
66
+ return
67
+
68
+ # Draw perspective field from ParamNet predictions
69
+ param_vis = draw_from_r_p_f_cx_cy(
70
+ img_rgb,
71
+ pred["pred_roll"].item(),
72
+ pred["pred_pitch"].item(),
73
+ pred["pred_general_vfov"].item(),
74
+ pred["pred_rel_cx"].item(),
75
+ pred["pred_rel_cy"].item(),
76
+ "deg",
77
+ up_color=(0, 1, 0),
78
+ ).astype(np.uint8)
79
+
80
+ param_vis = cv2.cvtColor(param_vis, cv2.COLOR_RGB2BGR)
81
+ pred_roll = f"roll: {pred['pred_roll'].item() :.2f}"
82
+ pred_pitch = f"pitch: {pred['pred_pitch'].item() :.2f}"
83
+ pred_vfov = f"vfov: {pred['pred_general_vfov'].item() :.2f}"
84
+ pred_cx = f"cx: {pred['pred_rel_cx'].item() :.2f}"
85
+ pred_cy = f"cy: {pred['pred_rel_cy'].item() :.2f}"
86
+
87
+ print(pred_roll)
88
+ print(pred_pitch)
89
+ print(pred_vfov)
90
+ print(pred_cx)
91
+ print(pred_cy)
92
+ # Write parameter predictions on the visualization
93
+ font = cv2.FONT_HERSHEY_SIMPLEX
94
+ font_scale = 0.75
95
+ param_vis = cv2.putText(
96
+ param_vis,
97
+ pred_roll,
98
+ (int(param_vis.shape[1] * 0.6) - 2, int(param_vis.shape[0] * 0.1)),
99
+ font,
100
+ font_scale,
101
+ (0, 0, 255),
102
+ 2,
103
+ )
104
+ param_vis = cv2.putText(
105
+ param_vis,
106
+ pred_pitch,
107
+ (int(param_vis.shape[1] * 0.6) - 2, int(param_vis.shape[0] * 0.1) + 25),
108
+ font,
109
+ font_scale,
110
+ (0, 0, 255),
111
+ 2,
112
+ )
113
+ param_vis = cv2.putText(
114
+ param_vis,
115
+ pred_vfov,
116
+ (int(param_vis.shape[1] * 0.6) - 2, int(param_vis.shape[0] * 0.1) + 50),
117
+ font,
118
+ font_scale,
119
+ (0, 0, 255),
120
+ 2,
121
+ )
122
+ param_vis = cv2.putText(
123
+ param_vis,
124
+ pred_cx,
125
+ (int(param_vis.shape[1] * 0.6) - 2, int(param_vis.shape[0] * 0.1) + 75),
126
+ font,
127
+ font_scale,
128
+ (0, 0, 255),
129
+ 2,
130
+ )
131
+ param_vis = cv2.putText(
132
+ param_vis,
133
+ pred_cy,
134
+ (int(param_vis.shape[1] * 0.6) - 2, int(param_vis.shape[0] * 0.1) + 100),
135
+ font,
136
+ font_scale,
137
+ (0, 0, 255),
138
+ 2,
139
+ )
140
+ cv2.imwrite(os.path.join(output_folder, "param_pred.png"), param_vis)
141
+
142
+
143
+ PerspectiveFields.versions()
144
+
145
+ version = 'Paramnet-360Cities-edina-centered'
146
+ # version = 'Paramnet-360Cities-edina-uncentered'
147
+ # version = 'PersNet_Paramnet-GSV-centered'
148
+ # version = 'PersNet_Paramnet-GSV-uncentered'
149
+ # version = 'PersNet-360Cities'
150
+ pf_model = PerspectiveFields(version).eval().cuda()
151
+ img_bgr = cv2.imread('assets/imgs/cityscape.jpg')
152
+ predictions = pf_model.inference(img_bgr=img_bgr)
153
+
154
+ log_results(img_bgr[..., ::-1], predictions, output_folder="debug", param_on=pf_model.param_on)
155
+
156
+ print("\nexpected output: ")
157
+ print("""roll: 4.54
158
+ pitch: 48.88
159
+ vfov: 52.82
160
+ cx: 0.00
161
+ cy: 0.00""")
162
+
163
+ print("Alternatively, inference a batch of images")
164
+ predictions = pf_model.inference_batch(img_bgr_list=[img_bgr, img_bgr, img_bgr])
165
+ breakpoint()
external/PerspectiveFields/notebooks/camera2perspective.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
external/PerspectiveFields/notebooks/predict_perspective_fields.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
external/PerspectiveFields/perspective2d/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .perspectivefields import PerspectiveFields
2
+
external/PerspectiveFields/perspective2d/config/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .config import get_perspective2d_cfg_defaults
external/PerspectiveFields/perspective2d/config/config.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from yacs.config import CfgNode as CN
2
+
3
+
4
+ def get_perspective2d_cfg_defaults():
5
+ """
6
+ PerspectiveNet and ParamNet configs.
7
+ """
8
+ cfg = CN()
9
+ cfg.VIS_PERIOD = 100
10
+ cfg.INPUT = CN()
11
+ cfg.INPUT.ONLINE_CROP = False
12
+ cfg.INPUT.FORMAT = "BGR"
13
+ cfg.DATASETS = CN()
14
+ cfg.DATASETS.TRAIN = []
15
+ cfg.DATASETS.TEST = []
16
+
17
+ cfg.DATALOADER = CN()
18
+ cfg.DATALOADER.AUGMENTATION = False
19
+ cfg.DATALOADER.AUGMENTATION_TYPE = "geometry"
20
+ cfg.DATALOADER.RESIZE = [320, 320] # Height, Width
21
+ cfg.DATALOADER.AUGMENTATION_FUN = "default"
22
+ cfg.DATALOADER.NO_GEOMETRY_AUG = False # requested by R3 cvpr2023
23
+
24
+ cfg.MODEL = CN()
25
+ cfg.MODEL.GRAVITY_ON = False
26
+ cfg.MODEL.LATITUDE_ON = False
27
+ cfg.MODEL.RECOVER_RPF = False
28
+ cfg.MODEL.RECOVER_PP = False
29
+
30
+ cfg.MODEL.BACKBONE = CN()
31
+ cfg.MODEL.BACKBONE.NAME = "mitb3"
32
+
33
+ cfg.MODEL.PERSFORMER_HEADS = CN()
34
+ cfg.MODEL.WEIGHTS = ""
35
+ cfg.MODEL.PERSFORMER_HEADS.NAME = "StandardPersformerHeads"
36
+ cfg.MODEL.LATITUDE_DECODER = CN()
37
+ cfg.MODEL.LATITUDE_DECODER.NAME = "LatitudeDecoder"
38
+ cfg.MODEL.LATITUDE_DECODER.LOSS_WEIGHT = 1.0
39
+ cfg.MODEL.LATITUDE_DECODER.LOSS_TYPE = "regression"
40
+ cfg.MODEL.LATITUDE_DECODER.NUM_CLASSES = 1
41
+ cfg.MODEL.LATITUDE_DECODER.IGNORE_VALUE = -1
42
+ cfg.MODEL.GRAVITY_DECODER = CN()
43
+ cfg.MODEL.GRAVITY_DECODER.NAME = "GravityDecoder"
44
+ cfg.MODEL.GRAVITY_DECODER.LOSS_WEIGHT = 1.0
45
+ cfg.MODEL.GRAVITY_DECODER.LOSS_TYPE = "classification"
46
+ cfg.MODEL.GRAVITY_DECODER.NUM_CLASSES = 73
47
+ cfg.MODEL.GRAVITY_DECODER.IGNORE_VALUE = 72
48
+ cfg.MODEL.HEIGHT_DECODER = CN()
49
+ cfg.MODEL.HEIGHT_DECODER.NAME = "HeightDecoder"
50
+ cfg.MODEL.HEIGHT_DECODER.LOSS_WEIGHT = 1.0
51
+
52
+ cfg.MODEL.PARAM_DECODER = CN()
53
+ cfg.MODEL.PARAM_DECODER.NAME = "ParamNet"
54
+ cfg.MODEL.PARAM_DECODER.LOSS_TYPE = "regression"
55
+ cfg.MODEL.PARAM_DECODER.LOSS_WEIGHT = 1.0
56
+ cfg.MODEL.PARAM_DECODER.PREDICT_PARAMS = [
57
+ "roll",
58
+ "pitch",
59
+ "rel_focal",
60
+ "rel_cx",
61
+ "rel_cy",
62
+ ]
63
+ cfg.MODEL.PARAM_DECODER.SYNTHETIC_PRETRAIN = False
64
+ cfg.MODEL.PARAM_DECODER.INPUT_SIZE = 320
65
+ cfg.MODEL.PARAM_DECODER.DEBUG_LAT = False
66
+ cfg.MODEL.PARAM_DECODER.DEBUG_UP = False
67
+
68
+ cfg.MODEL.FREEZE = []
69
+ cfg.DEBUG_ON = False
70
+ cfg.OVERFIT_ON = False
71
+
72
+ """
73
+ The configs below are not used.
74
+ """
75
+ cfg.MODEL.CENTER_ON = False
76
+ cfg.MODEL.HEIGHT_ON = False
77
+ cfg.MODEL.PIXEL_MEAN = [103.53, 116.28, 123.675]
78
+ cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
79
+
80
+ cfg.MODEL.FPN_HEADS = CN()
81
+ cfg.MODEL.FPN_HEADS.NAME = "StandardFPNHeads"
82
+ # Gravity
83
+
84
+ cfg.MODEL.FPN_GRAVITY_HEAD = CN()
85
+ cfg.MODEL.FPN_GRAVITY_HEAD.NAME = "GravityFPNHead"
86
+ cfg.MODEL.FPN_GRAVITY_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
87
+ # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
88
+ # the correposnding pixel.
89
+ cfg.MODEL.FPN_GRAVITY_HEAD.IGNORE_VALUE = 360
90
+ # Number of classes in the semantic segmentation head
91
+ cfg.MODEL.FPN_GRAVITY_HEAD.NUM_CLASSES = 361
92
+ # Number of channels in the 3x3 convs inside semantic-FPN heads.
93
+ cfg.MODEL.FPN_GRAVITY_HEAD.CONVS_DIM = 128
94
+ # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
95
+ cfg.MODEL.FPN_GRAVITY_HEAD.COMMON_STRIDE = 4
96
+ # Normalization method for the convolution layers. Options: "" (no norm), "GN".
97
+ cfg.MODEL.FPN_GRAVITY_HEAD.NORM = "GN"
98
+ cfg.MODEL.FPN_GRAVITY_HEAD.LOSS_WEIGHT = 1.0
99
+
100
+ # Latitude
101
+
102
+ cfg.MODEL.FPN_LATITUDE_HEAD = CN()
103
+ cfg.MODEL.FPN_LATITUDE_HEAD.NAME = "LatitudeFPNHead"
104
+ cfg.MODEL.FPN_LATITUDE_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
105
+ # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
106
+ # the correposnding pixel.
107
+ cfg.MODEL.FPN_LATITUDE_HEAD.IGNORE_VALUE = -1
108
+ # Number of classes in the semantic segmentation head
109
+ cfg.MODEL.FPN_LATITUDE_HEAD.NUM_CLASSES = 9
110
+ # Number of channels in the 3x3 convs inside semantic-FPN heads.
111
+ cfg.MODEL.FPN_LATITUDE_HEAD.CONVS_DIM = 128
112
+ # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
113
+ cfg.MODEL.FPN_LATITUDE_HEAD.COMMON_STRIDE = 4
114
+ # Normalization method for the convolution layers. Options: "" (no norm), "GN".
115
+ cfg.MODEL.FPN_LATITUDE_HEAD.NORM = "GN"
116
+ cfg.MODEL.FPN_LATITUDE_HEAD.LOSS_WEIGHT = 1.0
117
+ # Center
118
+
119
+ cfg.MODEL.FPN_CENTER_HEAD = CN()
120
+ cfg.MODEL.FPN_CENTER_HEAD.NAME = "CenterFPNHead"
121
+ cfg.MODEL.FPN_CENTER_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
122
+ # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
123
+ # the correposnding pixel.
124
+ cfg.MODEL.FPN_CENTER_HEAD.IGNORE_VALUE = 360
125
+ # Number of classes in the semantic segmentation head
126
+ cfg.MODEL.FPN_CENTER_HEAD.NUM_CLASSES = 30
127
+ # Number of channels in the 3x3 convs inside semantic-FPN heads.
128
+ cfg.MODEL.FPN_CENTER_HEAD.CONVS_DIM = 128
129
+ # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
130
+ cfg.MODEL.FPN_CENTER_HEAD.COMMON_STRIDE = 4
131
+ # Normalization method for the convolution layers. Options: "" (no norm), "GN".
132
+ cfg.MODEL.FPN_CENTER_HEAD.NORM = "GN"
133
+ cfg.MODEL.FPN_CENTER_HEAD.LOSS_WEIGHT = 1.0
134
+
135
+ ############################################################
136
+
137
+ return cfg