Spaces:
Runtime error
Runtime error
xinwei89
commited on
Commit
Β·
8ae7071
1
Parent(s):
8fc220a
test
Browse files- app.py +19 -59
- app_old.py +65 -0
- backend.py +93 -0
- {model_weights β building_model_weight}/README.md +0 -0
- {model_weights β building_model_weight}/buildings_poc_cfg.yml +0 -0
- building_model_weight/buildingsv1_best.pth +3 -0
- building_model_weight/buildingsv1_cfg.yaml +325 -0
- {model_weights β building_model_weight}/model_final.pth +0 -0
- {model_weights β building_model_weight}/tree_cfg.yml +0 -0
- {model_weights β building_model_weight}/tree_model.pth +0 -0
- tree_model_weights/README.md +3 -0
- tree_model_weights/buildings_poc_cfg.yml +325 -0
- tree_model_weights/model_final.pth +3 -0
- tree_model_weights/tree_cfg.yml +325 -0
- tree_model_weights/tree_model.pth +3 -0
- {model_weights β tree_model_weights}/treev1_best.pth +0 -0
- {model_weights β tree_model_weights}/treev1_cfg.yaml +0 -0
app.py
CHANGED
|
@@ -1,65 +1,25 @@
|
|
| 1 |
-
"""
|
| 2 |
-
tree-segmentation
|
| 3 |
-
Proof of concept showing effectiveness of a fine tuned instance segmentation model for detecting trees.
|
| 4 |
-
"""
|
| 5 |
-
import os
|
| 6 |
-
import cv2
|
| 7 |
-
os.system("pip install 'git+https://github.com/facebookresearch/detectron2.git'")
|
| 8 |
-
from transformers import DetrFeatureExtractor, DetrForSegmentation
|
| 9 |
-
from PIL import Image
|
| 10 |
import gradio as gr
|
| 11 |
-
|
| 12 |
-
import torch
|
| 13 |
-
import torchvision
|
| 14 |
-
import detectron2
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
from detectron2.utils.visualizer import Visualizer
|
| 23 |
-
from detectron2.utils.visualizer import ColorMode
|
| 24 |
-
from detectron2.data import MetadataCatalog, DatasetCatalog
|
| 25 |
-
from detectron2.checkpoint import DetectionCheckpointer
|
| 26 |
|
| 27 |
-
cfg = get_cfg()
|
| 28 |
-
cfg.merge_from_file("model_weights/treev1_cfg.yaml")
|
| 29 |
-
cfg.MODEL.DEVICE='cpu'
|
| 30 |
-
cfg.MODEL.WEIGHTS = "model_weights/treev1_best.pth"
|
| 31 |
-
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
|
| 32 |
-
|
| 33 |
-
def segment_image(im):
|
| 34 |
-
|
| 35 |
-
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.25
|
| 36 |
-
predictor = DefaultPredictor(cfg)
|
| 37 |
-
im = np.array(im)
|
| 38 |
-
outputs = predictor(im)
|
| 39 |
-
v = Visualizer(im[:, :, ::-1],
|
| 40 |
-
scale=0.5,
|
| 41 |
-
instance_mode=ColorMode.SEGMENTATION
|
| 42 |
-
)
|
| 43 |
-
print(len(outputs["instances"])," trees detected.")
|
| 44 |
-
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
| 45 |
-
|
| 46 |
-
return Image.fromarray(out.get_image()[:, :, ::-1])
|
| 47 |
-
|
| 48 |
-
# gradio components
|
| 49 |
-
"""
|
| 50 |
-
gr_slider_confidence = gr.inputs.Slider(0,1,.1,.7,
|
| 51 |
-
label='Set confidence threshold % for masks')
|
| 52 |
-
"""
|
| 53 |
# gradio outputs
|
| 54 |
-
|
| 55 |
-
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
-
|
| 61 |
-
gr.Interface(segment_image,
|
| 62 |
-
inputs = inputs,
|
| 63 |
-
outputs = outputs,
|
| 64 |
-
title = title,
|
| 65 |
-
description = description).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from backend import visualize_image
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
+
# gradio inputs
|
| 5 |
+
image_input = gr.inputs.Image(type="pil", label="Input Image")
|
| 6 |
+
mode_dropdown = gr.inputs.Dropdown(["Trees", "Buildings", "Both"])
|
| 7 |
+
tree_threshold_slider = gr.inputs.Slider(0, 1, 0.1, 0.7, label='Set confidence threshold % for trees')
|
| 8 |
+
building_threshold_slider = gr.inputs.Slider(0, 1, 0.1, 0.7, label='Set confidence threshold % for buildings')
|
| 9 |
+
color_mode_select = gr.inputs.Radio(["Black/white", "Random", "Segmentation"])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
# gradio outputs
|
| 12 |
+
output_image = gr.outputs.Image(type="pil", label="Output Image")
|
| 13 |
+
title = "Building Segmentation"
|
| 14 |
+
description = "An instance segmentation demo for identifying boundaries of buildings in aerial images using DETR (End-to-End Object Detection) model with MaskRCNN-101 backbone"
|
| 15 |
|
| 16 |
+
# gradio interface
|
| 17 |
+
interface = gr.Interface(
|
| 18 |
+
fn=visualize_image,
|
| 19 |
+
inputs=[image_input, mode_dropdown, tree_threshold_slider, building_threshold_slider, color_mode_select],
|
| 20 |
+
outputs=output_image,
|
| 21 |
+
title=title,
|
| 22 |
+
description=description
|
| 23 |
+
)
|
| 24 |
|
| 25 |
+
interface.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_old.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
tree-segmentation
|
| 3 |
+
Proof of concept showing effectiveness of a fine tuned instance segmentation model for detecting trees.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import cv2
|
| 7 |
+
os.system("pip install 'git+https://github.com/facebookresearch/detectron2.git'")
|
| 8 |
+
from transformers import DetrFeatureExtractor, DetrForSegmentation
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import gradio as gr
|
| 11 |
+
import numpy as np
|
| 12 |
+
import torch
|
| 13 |
+
import torchvision
|
| 14 |
+
import detectron2
|
| 15 |
+
|
| 16 |
+
# import some common detectron2 utilities
|
| 17 |
+
import itertools
|
| 18 |
+
import seaborn as sns
|
| 19 |
+
from detectron2 import model_zoo
|
| 20 |
+
from detectron2.engine import DefaultPredictor
|
| 21 |
+
from detectron2.config import get_cfg
|
| 22 |
+
from detectron2.utils.visualizer import Visualizer
|
| 23 |
+
from detectron2.utils.visualizer import ColorMode
|
| 24 |
+
from detectron2.data import MetadataCatalog, DatasetCatalog
|
| 25 |
+
from detectron2.checkpoint import DetectionCheckpointer
|
| 26 |
+
|
| 27 |
+
cfg = get_cfg()
|
| 28 |
+
cfg.merge_from_file("model_weights/treev1_cfg.yaml")
|
| 29 |
+
cfg.MODEL.DEVICE='cpu'
|
| 30 |
+
cfg.MODEL.WEIGHTS = "model_weights/treev1_best.pth"
|
| 31 |
+
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
|
| 32 |
+
|
| 33 |
+
def segment_image(im):
|
| 34 |
+
|
| 35 |
+
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.25
|
| 36 |
+
predictor = DefaultPredictor(cfg)
|
| 37 |
+
im = np.array(im)
|
| 38 |
+
outputs = predictor(im)
|
| 39 |
+
v = Visualizer(im[:, :, ::-1],
|
| 40 |
+
scale=0.5,
|
| 41 |
+
instance_mode=ColorMode.SEGMENTATION
|
| 42 |
+
)
|
| 43 |
+
print(len(outputs["instances"])," trees detected.")
|
| 44 |
+
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
| 45 |
+
|
| 46 |
+
return Image.fromarray(out.get_image()[:, :, ::-1])
|
| 47 |
+
|
| 48 |
+
# gradio components
|
| 49 |
+
"""
|
| 50 |
+
gr_slider_confidence = gr.inputs.Slider(0,1,.1,.7,
|
| 51 |
+
label='Set confidence threshold % for masks')
|
| 52 |
+
"""
|
| 53 |
+
# gradio outputs
|
| 54 |
+
inputs = gr.inputs.Image(type="pil", label="Input Image")
|
| 55 |
+
outputs = gr.outputs.Image(type="pil", label="Output Image")
|
| 56 |
+
|
| 57 |
+
title = "Tree Segmentation"
|
| 58 |
+
description = "An instance segmentation demo for identifying trees in aerial images using DETR (End-to-End Object Detection) model with MaskRCNN-101 backbone"
|
| 59 |
+
|
| 60 |
+
# Create user interface and launch
|
| 61 |
+
gr.Interface(segment_image,
|
| 62 |
+
inputs = inputs,
|
| 63 |
+
outputs = outputs,
|
| 64 |
+
title = title,
|
| 65 |
+
description = description).launch(debug=True)
|
backend.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
tree-segmentation
|
| 3 |
+
Proof of concept showing effectiveness of a fine tuned instance segmentation model for detecting trees.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import cv2
|
| 7 |
+
os.system("pip install 'git+https://github.com/facebookresearch/detectron2.git'")
|
| 8 |
+
from transformers import DetrFeatureExtractor, DetrForSegmentation
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import gradio as gr
|
| 11 |
+
import numpy as np
|
| 12 |
+
import torch
|
| 13 |
+
import torchvision
|
| 14 |
+
import detectron2
|
| 15 |
+
|
| 16 |
+
# import some common detectron2 utilities
|
| 17 |
+
import itertools
|
| 18 |
+
import seaborn as sns
|
| 19 |
+
from detectron2 import model_zoo
|
| 20 |
+
from detectron2.engine import DefaultPredictor
|
| 21 |
+
from detectron2.config import get_cfg
|
| 22 |
+
from detectron2.utils.visualizer import Visualizer
|
| 23 |
+
from detectron2.utils.visualizer import ColorMode
|
| 24 |
+
from detectron2.data import MetadataCatalog, DatasetCatalog
|
| 25 |
+
from detectron2.checkpoint import DetectionCheckpointer
|
| 26 |
+
from detectron2.utils.visualizer import ColorMode
|
| 27 |
+
from detectron2.structures import Instances
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Model for trees
|
| 31 |
+
tree_cfg = get_cfg()
|
| 32 |
+
tree_cfg.merge_from_file("tree_model_weights/tree_cfg.yml")
|
| 33 |
+
tree_cfg.MODEL.DEVICE='cpu'
|
| 34 |
+
tree_cfg.MODEL.WEIGHTS = "tree_model_weights/treev1_best.pth"
|
| 35 |
+
tree_cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
|
| 36 |
+
tree_predictor = DefaultPredictor(tree_cfg)
|
| 37 |
+
|
| 38 |
+
# Model for buildings
|
| 39 |
+
building_cfg = get_cfg()
|
| 40 |
+
building_cfg.merge_from_file("building_model_weight/building_poc_cfg.yml")
|
| 41 |
+
building_cfg.MODEL.DEVICE='cpu'
|
| 42 |
+
building_cfg.MODEL.WEIGHTS = "building_model_weight/model_final.pth"
|
| 43 |
+
building_cfg.MODEL.ROI_HEADS.NUM_CLASSES = 8
|
| 44 |
+
building_predictor = DefaultPredictor(building_cfg)
|
| 45 |
+
|
| 46 |
+
# A function that runs the buildings model on an given image and confidence threshold
|
| 47 |
+
def segment_building(im, confidence_threshold):
|
| 48 |
+
building_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold
|
| 49 |
+
im = np.array(im)
|
| 50 |
+
outputs = building_predictor(im)
|
| 51 |
+
building_instances = outputs["instances"].to("cpu")
|
| 52 |
+
|
| 53 |
+
return building_instances
|
| 54 |
+
|
| 55 |
+
# A function that runs the trees model on an given image and confidence threshold
|
| 56 |
+
def segment_tree(im, confidence_threshold):
|
| 57 |
+
tree_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold
|
| 58 |
+
im = np.array(im)
|
| 59 |
+
outputs = tree_predictor(im)
|
| 60 |
+
tree_instances = outputs["instances"].to("cpu")
|
| 61 |
+
|
| 62 |
+
return tree_instances
|
| 63 |
+
|
| 64 |
+
# Function to map strings to color mode
|
| 65 |
+
def map_color_mode(color_mode):
|
| 66 |
+
if color_mode == "Black/white":
|
| 67 |
+
return ColorMode.IMAGE_BW
|
| 68 |
+
elif color_mode == "Random":
|
| 69 |
+
return ColorMode.IMAGE
|
| 70 |
+
elif color_mode == "Segmentation":
|
| 71 |
+
return ColorMode.SEGMENTATION
|
| 72 |
+
|
| 73 |
+
def visualize_image(im, mode, tree_threshold, building_threshold, color_mode=ColorMode.SEGMENTATION):
|
| 74 |
+
color_mode = map_color_mode(color_mode)
|
| 75 |
+
|
| 76 |
+
if mode == "Trees":
|
| 77 |
+
instances = segment_tree(im, tree_threshold)
|
| 78 |
+
elif mode == "Buildings":
|
| 79 |
+
instances = segment_building(im, building_threshold)
|
| 80 |
+
elif mode == "Both":
|
| 81 |
+
tree_instances = segment_tree(im, tree_threshold)
|
| 82 |
+
building_instances = segment_building(im, building_threshold)
|
| 83 |
+
instances = Instances.cat([tree_instances, building_instances])
|
| 84 |
+
|
| 85 |
+
visualizer = Visualizer(im[:, :, ::-1],
|
| 86 |
+
scale=0.5,
|
| 87 |
+
instance_mode=color_mode)
|
| 88 |
+
|
| 89 |
+
output_image = visualizer.draw_instance_predictions(instances)
|
| 90 |
+
|
| 91 |
+
return Image.fromarray(output_image.get_image()[:, :, ::-1])
|
| 92 |
+
|
| 93 |
+
|
{model_weights β building_model_weight}/README.md
RENAMED
|
File without changes
|
{model_weights β building_model_weight}/buildings_poc_cfg.yml
RENAMED
|
File without changes
|
building_model_weight/buildingsv1_best.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:061e1e613db507f4023fb91731cde779240793414d7a6477604ca9473bfa05c2
|
| 3 |
+
size 254201166
|
building_model_weight/buildingsv1_cfg.yaml
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDNN_BENCHMARK: false
|
| 2 |
+
DATALOADER:
|
| 3 |
+
ASPECT_RATIO_GROUPING: true
|
| 4 |
+
FILTER_EMPTY_ANNOTATIONS: true
|
| 5 |
+
NUM_WORKERS: 2
|
| 6 |
+
REPEAT_THRESHOLD: 0.0
|
| 7 |
+
SAMPLER_TRAIN: TrainingSampler
|
| 8 |
+
DATASETS:
|
| 9 |
+
PRECOMPUTED_PROPOSAL_TOPK_TEST: 1000
|
| 10 |
+
PRECOMPUTED_PROPOSAL_TOPK_TRAIN: 2000
|
| 11 |
+
PROPOSAL_FILES_TEST: []
|
| 12 |
+
PROPOSAL_FILES_TRAIN: []
|
| 13 |
+
TEST:
|
| 14 |
+
- trees_test
|
| 15 |
+
TRAIN:
|
| 16 |
+
- trees_train
|
| 17 |
+
GLOBAL:
|
| 18 |
+
HACK: 1.0
|
| 19 |
+
INPUT:
|
| 20 |
+
CROP:
|
| 21 |
+
ENABLED: false
|
| 22 |
+
SIZE:
|
| 23 |
+
- 0.9
|
| 24 |
+
- 0.9
|
| 25 |
+
TYPE: relative_range
|
| 26 |
+
FORMAT: BGR
|
| 27 |
+
MASK_FORMAT: polygon
|
| 28 |
+
MAX_SIZE_TEST: 1333
|
| 29 |
+
MAX_SIZE_TRAIN: 1333
|
| 30 |
+
MIN_SIZE_TEST: 800
|
| 31 |
+
MIN_SIZE_TRAIN:
|
| 32 |
+
- 640
|
| 33 |
+
- 672
|
| 34 |
+
- 704
|
| 35 |
+
- 736
|
| 36 |
+
- 768
|
| 37 |
+
- 800
|
| 38 |
+
MIN_SIZE_TRAIN_SAMPLING: choice
|
| 39 |
+
RANDOM_FLIP: horizontal
|
| 40 |
+
MODEL:
|
| 41 |
+
ANCHOR_GENERATOR:
|
| 42 |
+
ANGLES:
|
| 43 |
+
- - -90
|
| 44 |
+
- 0
|
| 45 |
+
- 90
|
| 46 |
+
ASPECT_RATIOS:
|
| 47 |
+
- - 0.5
|
| 48 |
+
- 1.0
|
| 49 |
+
- 2.0
|
| 50 |
+
NAME: DefaultAnchorGenerator
|
| 51 |
+
OFFSET: 0.0
|
| 52 |
+
SIZES:
|
| 53 |
+
- - 32
|
| 54 |
+
- - 64
|
| 55 |
+
- - 128
|
| 56 |
+
- - 256
|
| 57 |
+
- - 512
|
| 58 |
+
BACKBONE:
|
| 59 |
+
FREEZE_AT: 2
|
| 60 |
+
NAME: build_resnet_fpn_backbone
|
| 61 |
+
DEVICE: cuda
|
| 62 |
+
FPN:
|
| 63 |
+
FUSE_TYPE: sum
|
| 64 |
+
IN_FEATURES:
|
| 65 |
+
- res2
|
| 66 |
+
- res3
|
| 67 |
+
- res4
|
| 68 |
+
- res5
|
| 69 |
+
NORM: ''
|
| 70 |
+
OUT_CHANNELS: 256
|
| 71 |
+
KEYPOINT_ON: false
|
| 72 |
+
LOAD_PROPOSALS: false
|
| 73 |
+
MASK_ON: true
|
| 74 |
+
META_ARCHITECTURE: GeneralizedRCNN
|
| 75 |
+
PANOPTIC_FPN:
|
| 76 |
+
COMBINE:
|
| 77 |
+
ENABLED: true
|
| 78 |
+
INSTANCES_CONFIDENCE_THRESH: 0.5
|
| 79 |
+
OVERLAP_THRESH: 0.5
|
| 80 |
+
STUFF_AREA_LIMIT: 4096
|
| 81 |
+
INSTANCE_LOSS_WEIGHT: 1.0
|
| 82 |
+
PIXEL_MEAN:
|
| 83 |
+
- 103.53
|
| 84 |
+
- 116.28
|
| 85 |
+
- 123.675
|
| 86 |
+
PIXEL_STD:
|
| 87 |
+
- 1.0
|
| 88 |
+
- 1.0
|
| 89 |
+
- 1.0
|
| 90 |
+
PROPOSAL_GENERATOR:
|
| 91 |
+
MIN_SIZE: 0
|
| 92 |
+
NAME: RPN
|
| 93 |
+
RESNETS:
|
| 94 |
+
DEFORM_MODULATED: false
|
| 95 |
+
DEFORM_NUM_GROUPS: 1
|
| 96 |
+
DEFORM_ON_PER_STAGE:
|
| 97 |
+
- false
|
| 98 |
+
- false
|
| 99 |
+
- false
|
| 100 |
+
- false
|
| 101 |
+
DEPTH: 101
|
| 102 |
+
NORM: FrozenBN
|
| 103 |
+
NUM_GROUPS: 1
|
| 104 |
+
OUT_FEATURES:
|
| 105 |
+
- res2
|
| 106 |
+
- res3
|
| 107 |
+
- res4
|
| 108 |
+
- res5
|
| 109 |
+
RES2_OUT_CHANNELS: 256
|
| 110 |
+
RES5_DILATION: 1
|
| 111 |
+
STEM_OUT_CHANNELS: 64
|
| 112 |
+
STRIDE_IN_1X1: true
|
| 113 |
+
WIDTH_PER_GROUP: 64
|
| 114 |
+
RETINANET:
|
| 115 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 116 |
+
BBOX_REG_WEIGHTS: &id002
|
| 117 |
+
- 1.0
|
| 118 |
+
- 1.0
|
| 119 |
+
- 1.0
|
| 120 |
+
- 1.0
|
| 121 |
+
FOCAL_LOSS_ALPHA: 0.25
|
| 122 |
+
FOCAL_LOSS_GAMMA: 2.0
|
| 123 |
+
IN_FEATURES:
|
| 124 |
+
- p3
|
| 125 |
+
- p4
|
| 126 |
+
- p5
|
| 127 |
+
- p6
|
| 128 |
+
- p7
|
| 129 |
+
IOU_LABELS:
|
| 130 |
+
- 0
|
| 131 |
+
- -1
|
| 132 |
+
- 1
|
| 133 |
+
IOU_THRESHOLDS:
|
| 134 |
+
- 0.4
|
| 135 |
+
- 0.5
|
| 136 |
+
NMS_THRESH_TEST: 0.5
|
| 137 |
+
NORM: ''
|
| 138 |
+
NUM_CLASSES: 80
|
| 139 |
+
NUM_CONVS: 4
|
| 140 |
+
PRIOR_PROB: 0.01
|
| 141 |
+
SCORE_THRESH_TEST: 0.05
|
| 142 |
+
SMOOTH_L1_LOSS_BETA: 0.1
|
| 143 |
+
TOPK_CANDIDATES_TEST: 1000
|
| 144 |
+
ROI_BOX_CASCADE_HEAD:
|
| 145 |
+
BBOX_REG_WEIGHTS:
|
| 146 |
+
- &id001
|
| 147 |
+
- 10.0
|
| 148 |
+
- 10.0
|
| 149 |
+
- 5.0
|
| 150 |
+
- 5.0
|
| 151 |
+
- - 20.0
|
| 152 |
+
- 20.0
|
| 153 |
+
- 10.0
|
| 154 |
+
- 10.0
|
| 155 |
+
- - 30.0
|
| 156 |
+
- 30.0
|
| 157 |
+
- 15.0
|
| 158 |
+
- 15.0
|
| 159 |
+
IOUS:
|
| 160 |
+
- 0.5
|
| 161 |
+
- 0.6
|
| 162 |
+
- 0.7
|
| 163 |
+
ROI_BOX_HEAD:
|
| 164 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 165 |
+
BBOX_REG_LOSS_WEIGHT: 1.0
|
| 166 |
+
BBOX_REG_WEIGHTS: *id001
|
| 167 |
+
CLS_AGNOSTIC_BBOX_REG: false
|
| 168 |
+
CONV_DIM: 256
|
| 169 |
+
FC_DIM: 1024
|
| 170 |
+
FED_LOSS_FREQ_WEIGHT_POWER: 0.5
|
| 171 |
+
FED_LOSS_NUM_CLASSES: 50
|
| 172 |
+
NAME: FastRCNNConvFCHead
|
| 173 |
+
NORM: ''
|
| 174 |
+
NUM_CONV: 0
|
| 175 |
+
NUM_FC: 2
|
| 176 |
+
POOLER_RESOLUTION: 7
|
| 177 |
+
POOLER_SAMPLING_RATIO: 0
|
| 178 |
+
POOLER_TYPE: ROIAlignV2
|
| 179 |
+
SMOOTH_L1_BETA: 0.0
|
| 180 |
+
TRAIN_ON_PRED_BOXES: false
|
| 181 |
+
USE_FED_LOSS: false
|
| 182 |
+
USE_SIGMOID_CE: false
|
| 183 |
+
ROI_HEADS:
|
| 184 |
+
BATCH_SIZE_PER_IMAGE: 512
|
| 185 |
+
IN_FEATURES:
|
| 186 |
+
- p2
|
| 187 |
+
- p3
|
| 188 |
+
- p4
|
| 189 |
+
- p5
|
| 190 |
+
IOU_LABELS:
|
| 191 |
+
- 0
|
| 192 |
+
- 1
|
| 193 |
+
IOU_THRESHOLDS:
|
| 194 |
+
- 0.5
|
| 195 |
+
NAME: StandardROIHeads
|
| 196 |
+
NMS_THRESH_TEST: 0.5
|
| 197 |
+
NUM_CLASSES: 81
|
| 198 |
+
POSITIVE_FRACTION: 0.25
|
| 199 |
+
PROPOSAL_APPEND_GT: true
|
| 200 |
+
SCORE_THRESH_TEST: 0.7
|
| 201 |
+
ROI_KEYPOINT_HEAD:
|
| 202 |
+
CONV_DIMS:
|
| 203 |
+
- 512
|
| 204 |
+
- 512
|
| 205 |
+
- 512
|
| 206 |
+
- 512
|
| 207 |
+
- 512
|
| 208 |
+
- 512
|
| 209 |
+
- 512
|
| 210 |
+
- 512
|
| 211 |
+
LOSS_WEIGHT: 1.0
|
| 212 |
+
MIN_KEYPOINTS_PER_IMAGE: 1
|
| 213 |
+
NAME: KRCNNConvDeconvUpsampleHead
|
| 214 |
+
NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: true
|
| 215 |
+
NUM_KEYPOINTS: 17
|
| 216 |
+
POOLER_RESOLUTION: 14
|
| 217 |
+
POOLER_SAMPLING_RATIO: 0
|
| 218 |
+
POOLER_TYPE: ROIAlignV2
|
| 219 |
+
ROI_MASK_HEAD:
|
| 220 |
+
CLS_AGNOSTIC_MASK: false
|
| 221 |
+
CONV_DIM: 256
|
| 222 |
+
NAME: MaskRCNNConvUpsampleHead
|
| 223 |
+
NORM: ''
|
| 224 |
+
NUM_CONV: 4
|
| 225 |
+
POOLER_RESOLUTION: 14
|
| 226 |
+
POOLER_SAMPLING_RATIO: 0
|
| 227 |
+
POOLER_TYPE: ROIAlignV2
|
| 228 |
+
RPN:
|
| 229 |
+
BATCH_SIZE_PER_IMAGE: 256
|
| 230 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 231 |
+
BBOX_REG_LOSS_WEIGHT: 1.0
|
| 232 |
+
BBOX_REG_WEIGHTS: *id002
|
| 233 |
+
BOUNDARY_THRESH: -1
|
| 234 |
+
CONV_DIMS:
|
| 235 |
+
- -1
|
| 236 |
+
HEAD_NAME: StandardRPNHead
|
| 237 |
+
IN_FEATURES:
|
| 238 |
+
- p2
|
| 239 |
+
- p3
|
| 240 |
+
- p4
|
| 241 |
+
- p5
|
| 242 |
+
- p6
|
| 243 |
+
IOU_LABELS:
|
| 244 |
+
- 0
|
| 245 |
+
- -1
|
| 246 |
+
- 1
|
| 247 |
+
IOU_THRESHOLDS:
|
| 248 |
+
- 0.3
|
| 249 |
+
- 0.7
|
| 250 |
+
LOSS_WEIGHT: 1.0
|
| 251 |
+
NMS_THRESH: 0.7
|
| 252 |
+
POSITIVE_FRACTION: 0.5
|
| 253 |
+
POST_NMS_TOPK_TEST: 1000
|
| 254 |
+
POST_NMS_TOPK_TRAIN: 1000
|
| 255 |
+
PRE_NMS_TOPK_TEST: 1000
|
| 256 |
+
PRE_NMS_TOPK_TRAIN: 2000
|
| 257 |
+
SMOOTH_L1_BETA: 0.0
|
| 258 |
+
SEM_SEG_HEAD:
|
| 259 |
+
COMMON_STRIDE: 4
|
| 260 |
+
CONVS_DIM: 128
|
| 261 |
+
IGNORE_VALUE: 255
|
| 262 |
+
IN_FEATURES:
|
| 263 |
+
- p2
|
| 264 |
+
- p3
|
| 265 |
+
- p4
|
| 266 |
+
- p5
|
| 267 |
+
LOSS_WEIGHT: 1.0
|
| 268 |
+
NAME: SemSegFPNHead
|
| 269 |
+
NORM: GN
|
| 270 |
+
NUM_CLASSES: 54
|
| 271 |
+
WEIGHTS: /home/ubuntu/ssd/building_out_0311_10000/model_final.pth
|
| 272 |
+
OUTPUT_DIR: /home/ubuntu/ssd/building_out_0311_10000
|
| 273 |
+
SEED: -1
|
| 274 |
+
SOLVER:
|
| 275 |
+
AMP:
|
| 276 |
+
ENABLED: false
|
| 277 |
+
BASE_LR: 0.00025
|
| 278 |
+
BASE_LR_END: 0.0
|
| 279 |
+
BIAS_LR_FACTOR: 1.0
|
| 280 |
+
CHECKPOINT_PERIOD: 5000
|
| 281 |
+
CLIP_GRADIENTS:
|
| 282 |
+
CLIP_TYPE: value
|
| 283 |
+
CLIP_VALUE: 1.0
|
| 284 |
+
ENABLED: false
|
| 285 |
+
NORM_TYPE: 2.0
|
| 286 |
+
GAMMA: 0.1
|
| 287 |
+
IMS_PER_BATCH: 8
|
| 288 |
+
LR_SCHEDULER_NAME: WarmupMultiStepLR
|
| 289 |
+
MAX_ITER: 10000
|
| 290 |
+
MOMENTUM: 0.9
|
| 291 |
+
NESTEROV: false
|
| 292 |
+
NUM_DECAYS: 3
|
| 293 |
+
REFERENCE_WORLD_SIZE: 0
|
| 294 |
+
RESCALE_INTERVAL: false
|
| 295 |
+
STEPS: []
|
| 296 |
+
WARMUP_FACTOR: 0.001
|
| 297 |
+
WARMUP_ITERS: 1000
|
| 298 |
+
WARMUP_METHOD: linear
|
| 299 |
+
WEIGHT_DECAY: 0.0001
|
| 300 |
+
WEIGHT_DECAY_BIAS: null
|
| 301 |
+
WEIGHT_DECAY_NORM: 0.0
|
| 302 |
+
TEST:
|
| 303 |
+
AUG:
|
| 304 |
+
ENABLED: false
|
| 305 |
+
FLIP: true
|
| 306 |
+
MAX_SIZE: 4000
|
| 307 |
+
MIN_SIZES:
|
| 308 |
+
- 400
|
| 309 |
+
- 500
|
| 310 |
+
- 600
|
| 311 |
+
- 700
|
| 312 |
+
- 800
|
| 313 |
+
- 900
|
| 314 |
+
- 1000
|
| 315 |
+
- 1100
|
| 316 |
+
- 1200
|
| 317 |
+
DETECTIONS_PER_IMAGE: 100
|
| 318 |
+
EVAL_PERIOD: 0
|
| 319 |
+
EXPECTED_RESULTS: []
|
| 320 |
+
KEYPOINT_OKS_SIGMAS: []
|
| 321 |
+
PRECISE_BN:
|
| 322 |
+
ENABLED: false
|
| 323 |
+
NUM_ITER: 200
|
| 324 |
+
VERSION: 2
|
| 325 |
+
VIS_PERIOD: 0
|
{model_weights β building_model_weight}/model_final.pth
RENAMED
|
File without changes
|
{model_weights β building_model_weight}/tree_cfg.yml
RENAMED
|
File without changes
|
{model_weights β building_model_weight}/tree_model.pth
RENAMED
|
File without changes
|
tree_model_weights/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Model Weights
|
| 2 |
+
|
| 3 |
+
This is where pretrained model weights are stored.
|
tree_model_weights/buildings_poc_cfg.yml
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDNN_BENCHMARK: false
|
| 2 |
+
DATALOADER:
|
| 3 |
+
ASPECT_RATIO_GROUPING: true
|
| 4 |
+
FILTER_EMPTY_ANNOTATIONS: true
|
| 5 |
+
NUM_WORKERS: 2
|
| 6 |
+
REPEAT_THRESHOLD: 0.0
|
| 7 |
+
SAMPLER_TRAIN: TrainingSampler
|
| 8 |
+
DATASETS:
|
| 9 |
+
PRECOMPUTED_PROPOSAL_TOPK_TEST: 1000
|
| 10 |
+
PRECOMPUTED_PROPOSAL_TOPK_TRAIN: 2000
|
| 11 |
+
PROPOSAL_FILES_TEST: []
|
| 12 |
+
PROPOSAL_FILES_TRAIN: []
|
| 13 |
+
TEST:
|
| 14 |
+
- urban-small_test
|
| 15 |
+
TRAIN:
|
| 16 |
+
- urban-small_train
|
| 17 |
+
GLOBAL:
|
| 18 |
+
HACK: 1.0
|
| 19 |
+
INPUT:
|
| 20 |
+
CROP:
|
| 21 |
+
ENABLED: false
|
| 22 |
+
SIZE:
|
| 23 |
+
- 0.9
|
| 24 |
+
- 0.9
|
| 25 |
+
TYPE: relative_range
|
| 26 |
+
FORMAT: BGR
|
| 27 |
+
MASK_FORMAT: polygon
|
| 28 |
+
MAX_SIZE_TEST: 1333
|
| 29 |
+
MAX_SIZE_TRAIN: 1333
|
| 30 |
+
MIN_SIZE_TEST: 800
|
| 31 |
+
MIN_SIZE_TRAIN:
|
| 32 |
+
- 640
|
| 33 |
+
- 672
|
| 34 |
+
- 704
|
| 35 |
+
- 736
|
| 36 |
+
- 768
|
| 37 |
+
- 800
|
| 38 |
+
MIN_SIZE_TRAIN_SAMPLING: choice
|
| 39 |
+
RANDOM_FLIP: horizontal
|
| 40 |
+
MODEL:
|
| 41 |
+
ANCHOR_GENERATOR:
|
| 42 |
+
ANGLES:
|
| 43 |
+
- - -90
|
| 44 |
+
- 0
|
| 45 |
+
- 90
|
| 46 |
+
ASPECT_RATIOS:
|
| 47 |
+
- - 0.5
|
| 48 |
+
- 1.0
|
| 49 |
+
- 2.0
|
| 50 |
+
NAME: DefaultAnchorGenerator
|
| 51 |
+
OFFSET: 0.0
|
| 52 |
+
SIZES:
|
| 53 |
+
- - 32
|
| 54 |
+
- - 64
|
| 55 |
+
- - 128
|
| 56 |
+
- - 256
|
| 57 |
+
- - 512
|
| 58 |
+
BACKBONE:
|
| 59 |
+
FREEZE_AT: 2
|
| 60 |
+
NAME: build_resnet_fpn_backbone
|
| 61 |
+
DEVICE: cuda
|
| 62 |
+
FPN:
|
| 63 |
+
FUSE_TYPE: sum
|
| 64 |
+
IN_FEATURES:
|
| 65 |
+
- res2
|
| 66 |
+
- res3
|
| 67 |
+
- res4
|
| 68 |
+
- res5
|
| 69 |
+
NORM: ''
|
| 70 |
+
OUT_CHANNELS: 256
|
| 71 |
+
KEYPOINT_ON: false
|
| 72 |
+
LOAD_PROPOSALS: false
|
| 73 |
+
MASK_ON: true
|
| 74 |
+
META_ARCHITECTURE: GeneralizedRCNN
|
| 75 |
+
PANOPTIC_FPN:
|
| 76 |
+
COMBINE:
|
| 77 |
+
ENABLED: true
|
| 78 |
+
INSTANCES_CONFIDENCE_THRESH: 0.5
|
| 79 |
+
OVERLAP_THRESH: 0.5
|
| 80 |
+
STUFF_AREA_LIMIT: 4096
|
| 81 |
+
INSTANCE_LOSS_WEIGHT: 1.0
|
| 82 |
+
PIXEL_MEAN:
|
| 83 |
+
- 103.53
|
| 84 |
+
- 116.28
|
| 85 |
+
- 123.675
|
| 86 |
+
PIXEL_STD:
|
| 87 |
+
- 1.0
|
| 88 |
+
- 1.0
|
| 89 |
+
- 1.0
|
| 90 |
+
PROPOSAL_GENERATOR:
|
| 91 |
+
MIN_SIZE: 0
|
| 92 |
+
NAME: RPN
|
| 93 |
+
RESNETS:
|
| 94 |
+
DEFORM_MODULATED: false
|
| 95 |
+
DEFORM_NUM_GROUPS: 1
|
| 96 |
+
DEFORM_ON_PER_STAGE:
|
| 97 |
+
- false
|
| 98 |
+
- false
|
| 99 |
+
- false
|
| 100 |
+
- false
|
| 101 |
+
DEPTH: 101
|
| 102 |
+
NORM: FrozenBN
|
| 103 |
+
NUM_GROUPS: 1
|
| 104 |
+
OUT_FEATURES:
|
| 105 |
+
- res2
|
| 106 |
+
- res3
|
| 107 |
+
- res4
|
| 108 |
+
- res5
|
| 109 |
+
RES2_OUT_CHANNELS: 256
|
| 110 |
+
RES5_DILATION: 1
|
| 111 |
+
STEM_OUT_CHANNELS: 64
|
| 112 |
+
STRIDE_IN_1X1: true
|
| 113 |
+
WIDTH_PER_GROUP: 64
|
| 114 |
+
RETINANET:
|
| 115 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 116 |
+
BBOX_REG_WEIGHTS: &id002
|
| 117 |
+
- 1.0
|
| 118 |
+
- 1.0
|
| 119 |
+
- 1.0
|
| 120 |
+
- 1.0
|
| 121 |
+
FOCAL_LOSS_ALPHA: 0.25
|
| 122 |
+
FOCAL_LOSS_GAMMA: 2.0
|
| 123 |
+
IN_FEATURES:
|
| 124 |
+
- p3
|
| 125 |
+
- p4
|
| 126 |
+
- p5
|
| 127 |
+
- p6
|
| 128 |
+
- p7
|
| 129 |
+
IOU_LABELS:
|
| 130 |
+
- 0
|
| 131 |
+
- -1
|
| 132 |
+
- 1
|
| 133 |
+
IOU_THRESHOLDS:
|
| 134 |
+
- 0.4
|
| 135 |
+
- 0.5
|
| 136 |
+
NMS_THRESH_TEST: 0.5
|
| 137 |
+
NORM: ''
|
| 138 |
+
NUM_CLASSES: 80
|
| 139 |
+
NUM_CONVS: 4
|
| 140 |
+
PRIOR_PROB: 0.01
|
| 141 |
+
SCORE_THRESH_TEST: 0.05
|
| 142 |
+
SMOOTH_L1_LOSS_BETA: 0.1
|
| 143 |
+
TOPK_CANDIDATES_TEST: 1000
|
| 144 |
+
ROI_BOX_CASCADE_HEAD:
|
| 145 |
+
BBOX_REG_WEIGHTS:
|
| 146 |
+
- &id001
|
| 147 |
+
- 10.0
|
| 148 |
+
- 10.0
|
| 149 |
+
- 5.0
|
| 150 |
+
- 5.0
|
| 151 |
+
- - 20.0
|
| 152 |
+
- 20.0
|
| 153 |
+
- 10.0
|
| 154 |
+
- 10.0
|
| 155 |
+
- - 30.0
|
| 156 |
+
- 30.0
|
| 157 |
+
- 15.0
|
| 158 |
+
- 15.0
|
| 159 |
+
IOUS:
|
| 160 |
+
- 0.5
|
| 161 |
+
- 0.6
|
| 162 |
+
- 0.7
|
| 163 |
+
ROI_BOX_HEAD:
|
| 164 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 165 |
+
BBOX_REG_LOSS_WEIGHT: 1.0
|
| 166 |
+
BBOX_REG_WEIGHTS: *id001
|
| 167 |
+
CLS_AGNOSTIC_BBOX_REG: false
|
| 168 |
+
CONV_DIM: 256
|
| 169 |
+
FC_DIM: 1024
|
| 170 |
+
FED_LOSS_FREQ_WEIGHT_POWER: 0.5
|
| 171 |
+
FED_LOSS_NUM_CLASSES: 50
|
| 172 |
+
NAME: FastRCNNConvFCHead
|
| 173 |
+
NORM: ''
|
| 174 |
+
NUM_CONV: 0
|
| 175 |
+
NUM_FC: 2
|
| 176 |
+
POOLER_RESOLUTION: 7
|
| 177 |
+
POOLER_SAMPLING_RATIO: 0
|
| 178 |
+
POOLER_TYPE: ROIAlignV2
|
| 179 |
+
SMOOTH_L1_BETA: 0.0
|
| 180 |
+
TRAIN_ON_PRED_BOXES: false
|
| 181 |
+
USE_FED_LOSS: false
|
| 182 |
+
USE_SIGMOID_CE: false
|
| 183 |
+
ROI_HEADS:
|
| 184 |
+
BATCH_SIZE_PER_IMAGE: 512
|
| 185 |
+
IN_FEATURES:
|
| 186 |
+
- p2
|
| 187 |
+
- p3
|
| 188 |
+
- p4
|
| 189 |
+
- p5
|
| 190 |
+
IOU_LABELS:
|
| 191 |
+
- 0
|
| 192 |
+
- 1
|
| 193 |
+
IOU_THRESHOLDS:
|
| 194 |
+
- 0.5
|
| 195 |
+
NAME: StandardROIHeads
|
| 196 |
+
NMS_THRESH_TEST: 0.5
|
| 197 |
+
NUM_CLASSES: 8
|
| 198 |
+
POSITIVE_FRACTION: 0.25
|
| 199 |
+
PROPOSAL_APPEND_GT: true
|
| 200 |
+
SCORE_THRESH_TEST: 0.25
|
| 201 |
+
ROI_KEYPOINT_HEAD:
|
| 202 |
+
CONV_DIMS:
|
| 203 |
+
- 512
|
| 204 |
+
- 512
|
| 205 |
+
- 512
|
| 206 |
+
- 512
|
| 207 |
+
- 512
|
| 208 |
+
- 512
|
| 209 |
+
- 512
|
| 210 |
+
- 512
|
| 211 |
+
LOSS_WEIGHT: 1.0
|
| 212 |
+
MIN_KEYPOINTS_PER_IMAGE: 1
|
| 213 |
+
NAME: KRCNNConvDeconvUpsampleHead
|
| 214 |
+
NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: true
|
| 215 |
+
NUM_KEYPOINTS: 17
|
| 216 |
+
POOLER_RESOLUTION: 14
|
| 217 |
+
POOLER_SAMPLING_RATIO: 0
|
| 218 |
+
POOLER_TYPE: ROIAlignV2
|
| 219 |
+
ROI_MASK_HEAD:
|
| 220 |
+
CLS_AGNOSTIC_MASK: false
|
| 221 |
+
CONV_DIM: 256
|
| 222 |
+
NAME: MaskRCNNConvUpsampleHead
|
| 223 |
+
NORM: ''
|
| 224 |
+
NUM_CONV: 4
|
| 225 |
+
POOLER_RESOLUTION: 14
|
| 226 |
+
POOLER_SAMPLING_RATIO: 0
|
| 227 |
+
POOLER_TYPE: ROIAlignV2
|
| 228 |
+
RPN:
|
| 229 |
+
BATCH_SIZE_PER_IMAGE: 256
|
| 230 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 231 |
+
BBOX_REG_LOSS_WEIGHT: 1.0
|
| 232 |
+
BBOX_REG_WEIGHTS: *id002
|
| 233 |
+
BOUNDARY_THRESH: -1
|
| 234 |
+
CONV_DIMS:
|
| 235 |
+
- -1
|
| 236 |
+
HEAD_NAME: StandardRPNHead
|
| 237 |
+
IN_FEATURES:
|
| 238 |
+
- p2
|
| 239 |
+
- p3
|
| 240 |
+
- p4
|
| 241 |
+
- p5
|
| 242 |
+
- p6
|
| 243 |
+
IOU_LABELS:
|
| 244 |
+
- 0
|
| 245 |
+
- -1
|
| 246 |
+
- 1
|
| 247 |
+
IOU_THRESHOLDS:
|
| 248 |
+
- 0.3
|
| 249 |
+
- 0.7
|
| 250 |
+
LOSS_WEIGHT: 1.0
|
| 251 |
+
NMS_THRESH: 0.7
|
| 252 |
+
POSITIVE_FRACTION: 0.5
|
| 253 |
+
POST_NMS_TOPK_TEST: 1000
|
| 254 |
+
POST_NMS_TOPK_TRAIN: 1000
|
| 255 |
+
PRE_NMS_TOPK_TEST: 1000
|
| 256 |
+
PRE_NMS_TOPK_TRAIN: 2000
|
| 257 |
+
SMOOTH_L1_BETA: 0.0
|
| 258 |
+
SEM_SEG_HEAD:
|
| 259 |
+
COMMON_STRIDE: 4
|
| 260 |
+
CONVS_DIM: 128
|
| 261 |
+
IGNORE_VALUE: 255
|
| 262 |
+
IN_FEATURES:
|
| 263 |
+
- p2
|
| 264 |
+
- p3
|
| 265 |
+
- p4
|
| 266 |
+
- p5
|
| 267 |
+
LOSS_WEIGHT: 1.0
|
| 268 |
+
NAME: SemSegFPNHead
|
| 269 |
+
NORM: GN
|
| 270 |
+
NUM_CLASSES: 54
|
| 271 |
+
WEIGHTS: ./output/model_final.pth
|
| 272 |
+
OUTPUT_DIR: ./output
|
| 273 |
+
SEED: -1
|
| 274 |
+
SOLVER:
|
| 275 |
+
AMP:
|
| 276 |
+
ENABLED: false
|
| 277 |
+
BASE_LR: 0.00025
|
| 278 |
+
BASE_LR_END: 0.0
|
| 279 |
+
BIAS_LR_FACTOR: 1.0
|
| 280 |
+
CHECKPOINT_PERIOD: 5000
|
| 281 |
+
CLIP_GRADIENTS:
|
| 282 |
+
CLIP_TYPE: value
|
| 283 |
+
CLIP_VALUE: 1.0
|
| 284 |
+
ENABLED: false
|
| 285 |
+
NORM_TYPE: 2.0
|
| 286 |
+
GAMMA: 0.1
|
| 287 |
+
IMS_PER_BATCH: 2
|
| 288 |
+
LR_SCHEDULER_NAME: WarmupMultiStepLR
|
| 289 |
+
MAX_ITER: 3000
|
| 290 |
+
MOMENTUM: 0.9
|
| 291 |
+
NESTEROV: false
|
| 292 |
+
NUM_DECAYS: 3
|
| 293 |
+
REFERENCE_WORLD_SIZE: 0
|
| 294 |
+
RESCALE_INTERVAL: false
|
| 295 |
+
STEPS: []
|
| 296 |
+
WARMUP_FACTOR: 0.001
|
| 297 |
+
WARMUP_ITERS: 1000
|
| 298 |
+
WARMUP_METHOD: linear
|
| 299 |
+
WEIGHT_DECAY: 0.0001
|
| 300 |
+
WEIGHT_DECAY_BIAS: null
|
| 301 |
+
WEIGHT_DECAY_NORM: 0.0
|
| 302 |
+
TEST:
|
| 303 |
+
AUG:
|
| 304 |
+
ENABLED: false
|
| 305 |
+
FLIP: true
|
| 306 |
+
MAX_SIZE: 4000
|
| 307 |
+
MIN_SIZES:
|
| 308 |
+
- 400
|
| 309 |
+
- 500
|
| 310 |
+
- 600
|
| 311 |
+
- 700
|
| 312 |
+
- 800
|
| 313 |
+
- 900
|
| 314 |
+
- 1000
|
| 315 |
+
- 1100
|
| 316 |
+
- 1200
|
| 317 |
+
DETECTIONS_PER_IMAGE: 100
|
| 318 |
+
EVAL_PERIOD: 0
|
| 319 |
+
EXPECTED_RESULTS: []
|
| 320 |
+
KEYPOINT_OKS_SIGMAS: []
|
| 321 |
+
PRECISE_BN:
|
| 322 |
+
ENABLED: false
|
| 323 |
+
NUM_ITER: 200
|
| 324 |
+
VERSION: 2
|
| 325 |
+
VIS_PERIOD: 0
|
tree_model_weights/model_final.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42b87453ef364938795263c24d9be8cbc4cd7e01c3390690a6c4cfb71a6ce23e
|
| 3 |
+
size 503364111
|
tree_model_weights/tree_cfg.yml
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDNN_BENCHMARK: false
|
| 2 |
+
DATALOADER:
|
| 3 |
+
ASPECT_RATIO_GROUPING: true
|
| 4 |
+
FILTER_EMPTY_ANNOTATIONS: true
|
| 5 |
+
NUM_WORKERS: 2
|
| 6 |
+
REPEAT_THRESHOLD: 0.0
|
| 7 |
+
SAMPLER_TRAIN: TrainingSampler
|
| 8 |
+
DATASETS:
|
| 9 |
+
PRECOMPUTED_PROPOSAL_TOPK_TEST: 1000
|
| 10 |
+
PRECOMPUTED_PROPOSAL_TOPK_TRAIN: 2000
|
| 11 |
+
PROPOSAL_FILES_TEST: []
|
| 12 |
+
PROPOSAL_FILES_TRAIN: []
|
| 13 |
+
TEST:
|
| 14 |
+
- urban-trees-fdokv_test
|
| 15 |
+
TRAIN:
|
| 16 |
+
- urban-trees-fdokv_train
|
| 17 |
+
GLOBAL:
|
| 18 |
+
HACK: 1.0
|
| 19 |
+
INPUT:
|
| 20 |
+
CROP:
|
| 21 |
+
ENABLED: false
|
| 22 |
+
SIZE:
|
| 23 |
+
- 0.9
|
| 24 |
+
- 0.9
|
| 25 |
+
TYPE: relative_range
|
| 26 |
+
FORMAT: BGR
|
| 27 |
+
MASK_FORMAT: polygon
|
| 28 |
+
MAX_SIZE_TEST: 1333
|
| 29 |
+
MAX_SIZE_TRAIN: 1333
|
| 30 |
+
MIN_SIZE_TEST: 800
|
| 31 |
+
MIN_SIZE_TRAIN:
|
| 32 |
+
- 640
|
| 33 |
+
- 672
|
| 34 |
+
- 704
|
| 35 |
+
- 736
|
| 36 |
+
- 768
|
| 37 |
+
- 800
|
| 38 |
+
MIN_SIZE_TRAIN_SAMPLING: choice
|
| 39 |
+
RANDOM_FLIP: horizontal
|
| 40 |
+
MODEL:
|
| 41 |
+
ANCHOR_GENERATOR:
|
| 42 |
+
ANGLES:
|
| 43 |
+
- - -90
|
| 44 |
+
- 0
|
| 45 |
+
- 90
|
| 46 |
+
ASPECT_RATIOS:
|
| 47 |
+
- - 0.5
|
| 48 |
+
- 1.0
|
| 49 |
+
- 2.0
|
| 50 |
+
NAME: DefaultAnchorGenerator
|
| 51 |
+
OFFSET: 0.0
|
| 52 |
+
SIZES:
|
| 53 |
+
- - 32
|
| 54 |
+
- - 64
|
| 55 |
+
- - 128
|
| 56 |
+
- - 256
|
| 57 |
+
- - 512
|
| 58 |
+
BACKBONE:
|
| 59 |
+
FREEZE_AT: 2
|
| 60 |
+
NAME: build_resnet_fpn_backbone
|
| 61 |
+
DEVICE: cuda
|
| 62 |
+
FPN:
|
| 63 |
+
FUSE_TYPE: sum
|
| 64 |
+
IN_FEATURES:
|
| 65 |
+
- res2
|
| 66 |
+
- res3
|
| 67 |
+
- res4
|
| 68 |
+
- res5
|
| 69 |
+
NORM: ''
|
| 70 |
+
OUT_CHANNELS: 256
|
| 71 |
+
KEYPOINT_ON: false
|
| 72 |
+
LOAD_PROPOSALS: false
|
| 73 |
+
MASK_ON: true
|
| 74 |
+
META_ARCHITECTURE: GeneralizedRCNN
|
| 75 |
+
PANOPTIC_FPN:
|
| 76 |
+
COMBINE:
|
| 77 |
+
ENABLED: true
|
| 78 |
+
INSTANCES_CONFIDENCE_THRESH: 0.5
|
| 79 |
+
OVERLAP_THRESH: 0.5
|
| 80 |
+
STUFF_AREA_LIMIT: 4096
|
| 81 |
+
INSTANCE_LOSS_WEIGHT: 1.0
|
| 82 |
+
PIXEL_MEAN:
|
| 83 |
+
- 103.53
|
| 84 |
+
- 116.28
|
| 85 |
+
- 123.675
|
| 86 |
+
PIXEL_STD:
|
| 87 |
+
- 1.0
|
| 88 |
+
- 1.0
|
| 89 |
+
- 1.0
|
| 90 |
+
PROPOSAL_GENERATOR:
|
| 91 |
+
MIN_SIZE: 0
|
| 92 |
+
NAME: RPN
|
| 93 |
+
RESNETS:
|
| 94 |
+
DEFORM_MODULATED: false
|
| 95 |
+
DEFORM_NUM_GROUPS: 1
|
| 96 |
+
DEFORM_ON_PER_STAGE:
|
| 97 |
+
- false
|
| 98 |
+
- false
|
| 99 |
+
- false
|
| 100 |
+
- false
|
| 101 |
+
DEPTH: 101
|
| 102 |
+
NORM: FrozenBN
|
| 103 |
+
NUM_GROUPS: 1
|
| 104 |
+
OUT_FEATURES:
|
| 105 |
+
- res2
|
| 106 |
+
- res3
|
| 107 |
+
- res4
|
| 108 |
+
- res5
|
| 109 |
+
RES2_OUT_CHANNELS: 256
|
| 110 |
+
RES5_DILATION: 1
|
| 111 |
+
STEM_OUT_CHANNELS: 64
|
| 112 |
+
STRIDE_IN_1X1: true
|
| 113 |
+
WIDTH_PER_GROUP: 64
|
| 114 |
+
RETINANET:
|
| 115 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 116 |
+
BBOX_REG_WEIGHTS: &id002
|
| 117 |
+
- 1.0
|
| 118 |
+
- 1.0
|
| 119 |
+
- 1.0
|
| 120 |
+
- 1.0
|
| 121 |
+
FOCAL_LOSS_ALPHA: 0.25
|
| 122 |
+
FOCAL_LOSS_GAMMA: 2.0
|
| 123 |
+
IN_FEATURES:
|
| 124 |
+
- p3
|
| 125 |
+
- p4
|
| 126 |
+
- p5
|
| 127 |
+
- p6
|
| 128 |
+
- p7
|
| 129 |
+
IOU_LABELS:
|
| 130 |
+
- 0
|
| 131 |
+
- -1
|
| 132 |
+
- 1
|
| 133 |
+
IOU_THRESHOLDS:
|
| 134 |
+
- 0.4
|
| 135 |
+
- 0.5
|
| 136 |
+
NMS_THRESH_TEST: 0.5
|
| 137 |
+
NORM: ''
|
| 138 |
+
NUM_CLASSES: 80
|
| 139 |
+
NUM_CONVS: 4
|
| 140 |
+
PRIOR_PROB: 0.01
|
| 141 |
+
SCORE_THRESH_TEST: 0.05
|
| 142 |
+
SMOOTH_L1_LOSS_BETA: 0.1
|
| 143 |
+
TOPK_CANDIDATES_TEST: 1000
|
| 144 |
+
ROI_BOX_CASCADE_HEAD:
|
| 145 |
+
BBOX_REG_WEIGHTS:
|
| 146 |
+
- &id001
|
| 147 |
+
- 10.0
|
| 148 |
+
- 10.0
|
| 149 |
+
- 5.0
|
| 150 |
+
- 5.0
|
| 151 |
+
- - 20.0
|
| 152 |
+
- 20.0
|
| 153 |
+
- 10.0
|
| 154 |
+
- 10.0
|
| 155 |
+
- - 30.0
|
| 156 |
+
- 30.0
|
| 157 |
+
- 15.0
|
| 158 |
+
- 15.0
|
| 159 |
+
IOUS:
|
| 160 |
+
- 0.5
|
| 161 |
+
- 0.6
|
| 162 |
+
- 0.7
|
| 163 |
+
ROI_BOX_HEAD:
|
| 164 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 165 |
+
BBOX_REG_LOSS_WEIGHT: 1.0
|
| 166 |
+
BBOX_REG_WEIGHTS: *id001
|
| 167 |
+
CLS_AGNOSTIC_BBOX_REG: false
|
| 168 |
+
CONV_DIM: 256
|
| 169 |
+
FC_DIM: 1024
|
| 170 |
+
FED_LOSS_FREQ_WEIGHT_POWER: 0.5
|
| 171 |
+
FED_LOSS_NUM_CLASSES: 50
|
| 172 |
+
NAME: FastRCNNConvFCHead
|
| 173 |
+
NORM: ''
|
| 174 |
+
NUM_CONV: 0
|
| 175 |
+
NUM_FC: 2
|
| 176 |
+
POOLER_RESOLUTION: 7
|
| 177 |
+
POOLER_SAMPLING_RATIO: 0
|
| 178 |
+
POOLER_TYPE: ROIAlignV2
|
| 179 |
+
SMOOTH_L1_BETA: 0.0
|
| 180 |
+
TRAIN_ON_PRED_BOXES: false
|
| 181 |
+
USE_FED_LOSS: false
|
| 182 |
+
USE_SIGMOID_CE: false
|
| 183 |
+
ROI_HEADS:
|
| 184 |
+
BATCH_SIZE_PER_IMAGE: 512
|
| 185 |
+
IN_FEATURES:
|
| 186 |
+
- p2
|
| 187 |
+
- p3
|
| 188 |
+
- p4
|
| 189 |
+
- p5
|
| 190 |
+
IOU_LABELS:
|
| 191 |
+
- 0
|
| 192 |
+
- 1
|
| 193 |
+
IOU_THRESHOLDS:
|
| 194 |
+
- 0.5
|
| 195 |
+
NAME: StandardROIHeads
|
| 196 |
+
NMS_THRESH_TEST: 0.5
|
| 197 |
+
NUM_CLASSES: 2
|
| 198 |
+
POSITIVE_FRACTION: 0.25
|
| 199 |
+
PROPOSAL_APPEND_GT: true
|
| 200 |
+
SCORE_THRESH_TEST: 0.25
|
| 201 |
+
ROI_KEYPOINT_HEAD:
|
| 202 |
+
CONV_DIMS:
|
| 203 |
+
- 512
|
| 204 |
+
- 512
|
| 205 |
+
- 512
|
| 206 |
+
- 512
|
| 207 |
+
- 512
|
| 208 |
+
- 512
|
| 209 |
+
- 512
|
| 210 |
+
- 512
|
| 211 |
+
LOSS_WEIGHT: 1.0
|
| 212 |
+
MIN_KEYPOINTS_PER_IMAGE: 1
|
| 213 |
+
NAME: KRCNNConvDeconvUpsampleHead
|
| 214 |
+
NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: true
|
| 215 |
+
NUM_KEYPOINTS: 17
|
| 216 |
+
POOLER_RESOLUTION: 14
|
| 217 |
+
POOLER_SAMPLING_RATIO: 0
|
| 218 |
+
POOLER_TYPE: ROIAlignV2
|
| 219 |
+
ROI_MASK_HEAD:
|
| 220 |
+
CLS_AGNOSTIC_MASK: false
|
| 221 |
+
CONV_DIM: 256
|
| 222 |
+
NAME: MaskRCNNConvUpsampleHead
|
| 223 |
+
NORM: ''
|
| 224 |
+
NUM_CONV: 4
|
| 225 |
+
POOLER_RESOLUTION: 14
|
| 226 |
+
POOLER_SAMPLING_RATIO: 0
|
| 227 |
+
POOLER_TYPE: ROIAlignV2
|
| 228 |
+
RPN:
|
| 229 |
+
BATCH_SIZE_PER_IMAGE: 256
|
| 230 |
+
BBOX_REG_LOSS_TYPE: smooth_l1
|
| 231 |
+
BBOX_REG_LOSS_WEIGHT: 1.0
|
| 232 |
+
BBOX_REG_WEIGHTS: *id002
|
| 233 |
+
BOUNDARY_THRESH: -1
|
| 234 |
+
CONV_DIMS:
|
| 235 |
+
- -1
|
| 236 |
+
HEAD_NAME: StandardRPNHead
|
| 237 |
+
IN_FEATURES:
|
| 238 |
+
- p2
|
| 239 |
+
- p3
|
| 240 |
+
- p4
|
| 241 |
+
- p5
|
| 242 |
+
- p6
|
| 243 |
+
IOU_LABELS:
|
| 244 |
+
- 0
|
| 245 |
+
- -1
|
| 246 |
+
- 1
|
| 247 |
+
IOU_THRESHOLDS:
|
| 248 |
+
- 0.3
|
| 249 |
+
- 0.7
|
| 250 |
+
LOSS_WEIGHT: 1.0
|
| 251 |
+
NMS_THRESH: 0.7
|
| 252 |
+
POSITIVE_FRACTION: 0.5
|
| 253 |
+
POST_NMS_TOPK_TEST: 1000
|
| 254 |
+
POST_NMS_TOPK_TRAIN: 1000
|
| 255 |
+
PRE_NMS_TOPK_TEST: 1000
|
| 256 |
+
PRE_NMS_TOPK_TRAIN: 2000
|
| 257 |
+
SMOOTH_L1_BETA: 0.0
|
| 258 |
+
SEM_SEG_HEAD:
|
| 259 |
+
COMMON_STRIDE: 4
|
| 260 |
+
CONVS_DIM: 128
|
| 261 |
+
IGNORE_VALUE: 255
|
| 262 |
+
IN_FEATURES:
|
| 263 |
+
- p2
|
| 264 |
+
- p3
|
| 265 |
+
- p4
|
| 266 |
+
- p5
|
| 267 |
+
LOSS_WEIGHT: 1.0
|
| 268 |
+
NAME: SemSegFPNHead
|
| 269 |
+
NORM: GN
|
| 270 |
+
NUM_CLASSES: 54
|
| 271 |
+
WEIGHTS: ./output/model_final.pth
|
| 272 |
+
OUTPUT_DIR: ./output
|
| 273 |
+
SEED: -1
|
| 274 |
+
SOLVER:
|
| 275 |
+
AMP:
|
| 276 |
+
ENABLED: false
|
| 277 |
+
BASE_LR: 0.00025
|
| 278 |
+
BASE_LR_END: 0.0
|
| 279 |
+
BIAS_LR_FACTOR: 1.0
|
| 280 |
+
CHECKPOINT_PERIOD: 5000
|
| 281 |
+
CLIP_GRADIENTS:
|
| 282 |
+
CLIP_TYPE: value
|
| 283 |
+
CLIP_VALUE: 1.0
|
| 284 |
+
ENABLED: false
|
| 285 |
+
NORM_TYPE: 2.0
|
| 286 |
+
GAMMA: 0.1
|
| 287 |
+
IMS_PER_BATCH: 2
|
| 288 |
+
LR_SCHEDULER_NAME: WarmupMultiStepLR
|
| 289 |
+
MAX_ITER: 3000
|
| 290 |
+
MOMENTUM: 0.9
|
| 291 |
+
NESTEROV: false
|
| 292 |
+
NUM_DECAYS: 3
|
| 293 |
+
REFERENCE_WORLD_SIZE: 0
|
| 294 |
+
RESCALE_INTERVAL: false
|
| 295 |
+
STEPS: []
|
| 296 |
+
WARMUP_FACTOR: 0.001
|
| 297 |
+
WARMUP_ITERS: 1000
|
| 298 |
+
WARMUP_METHOD: linear
|
| 299 |
+
WEIGHT_DECAY: 0.0001
|
| 300 |
+
WEIGHT_DECAY_BIAS: null
|
| 301 |
+
WEIGHT_DECAY_NORM: 0.0
|
| 302 |
+
TEST:
|
| 303 |
+
AUG:
|
| 304 |
+
ENABLED: false
|
| 305 |
+
FLIP: true
|
| 306 |
+
MAX_SIZE: 4000
|
| 307 |
+
MIN_SIZES:
|
| 308 |
+
- 400
|
| 309 |
+
- 500
|
| 310 |
+
- 600
|
| 311 |
+
- 700
|
| 312 |
+
- 800
|
| 313 |
+
- 900
|
| 314 |
+
- 1000
|
| 315 |
+
- 1100
|
| 316 |
+
- 1200
|
| 317 |
+
DETECTIONS_PER_IMAGE: 100
|
| 318 |
+
EVAL_PERIOD: 0
|
| 319 |
+
EXPECTED_RESULTS: []
|
| 320 |
+
KEYPOINT_OKS_SIGMAS: []
|
| 321 |
+
PRECISE_BN:
|
| 322 |
+
ENABLED: false
|
| 323 |
+
NUM_ITER: 200
|
| 324 |
+
VERSION: 2
|
| 325 |
+
VIS_PERIOD: 0
|
tree_model_weights/tree_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:552742a48b35b5d30104379da031963eb696758794d6c23c36b24dd35c32cc8b
|
| 3 |
+
size 503106240
|
{model_weights β tree_model_weights}/treev1_best.pth
RENAMED
|
File without changes
|
{model_weights β tree_model_weights}/treev1_cfg.yaml
RENAMED
|
File without changes
|