Commit f418a1f9 authored by Micaela Verucchi's avatar Micaela Verucchi
Browse files

Add bdd new tests, add BDD100K_val download, fix yolo3-512 link



Signed-off-by: default avatarMicaela Verucchi <micaelaverucchi@gmail.com>
parent 2fbac770
......@@ -115,6 +115,9 @@ target_link_libraries(test_yolo3_flir tkDNN)
add_executable(test_mobilenetv2ssd tests/mobilenetv2ssd/mobilenetv2ssd.cpp)
target_link_libraries(test_mobilenetv2ssd tkDNN)
add_executable(test_bdd-mobilenetv2ssd tests/bdd-mobilenetv2ssd/bdd-mobilenetv2ssd.cpp)
target_link_libraries(test_bdd-mobilenetv2ssd tkDNN)
add_executable(test_mobilenetv2ssd512 tests/mobilenetv2ssd512/mobilenetv2ssd512.cpp)
target_link_libraries(test_mobilenetv2ssd512 tkDNN)
......@@ -124,6 +127,9 @@ target_link_libraries(test_resnet101 tkDNN)
add_executable(test_csresnext50-panet-spp tests/csresnext50-panet-spp/csresnext50-panet-spp.cpp)
target_link_libraries(test_csresnext50-panet-spp tkDNN)
add_executable(test_bdd-csresnext50-panet-spp tests/bdd-csresnext50-panet-spp/bdd-csresnext50-panet-spp.cpp)
target_link_libraries(test_bdd-csresnext50-panet-spp tkDNN)
add_executable(test_resnet101_cnet tests/resnet101_cnet/resnet101_cnet.cpp)
target_link_libraries(test_resnet101_cnet tkDNN)
......
......@@ -167,10 +167,16 @@ N.b. INT8 calibration requires TensorRT version greater than or equal to 6.0
To compute mAP, precision, recall and f1score, run the map_demo.
A validation set is needed. To download COCO_val2017 run (form the root folder):
A validation set is needed.
To download COCO_val2017 (80 classes) run (form the root folder):
```
bash scripts/download_validation.sh
bash scripts/download_validation.sh COCO
```
To download Berkeley_val (10 classes) run (form the root folder):
```
bash scripts/download_validation.sh BDD
```
To compute the map, the following parameters are needed:
```
./map_demo <network rt> <network type [y|c|m]> <labels file path> <config file path>
......@@ -199,7 +205,7 @@ cd build
| yolo_tiny | YOLO v2 tiny<sup>1</sup> | [COCO 2014](http://cocodataset.org/) | 80 | 416x416 | [weights](https://cloud.hipert.unimore.it/s/m3orfJr8pGrN5mQ/download) |
| yolo_voc | YOLO v2<sup>1</sup> | [VOC ](http://host.robots.ox.ac.uk/pascal/VOC/) | 21 | 416x416 | [weights](https://cloud.hipert.unimore.it/s/DJC5Fi2pEjfNDP9/download) |
| yolo3 | YOLO v3<sup>2</sup> | [COCO 2014](http://cocodataset.org/) | 80 | 416x416 | [weights](https://cloud.hipert.unimore.it/s/jPXmHyptpLoNdNR/download) |
| yolo3_512 | YOLO v3<sup>2</sup> | [COCO 2017](http://cocodataset.org/) | 80 | 512x512 | [weights](https://cloud.hipert.unimore.it/s/e7HfScx77JEHeYb/download) |
| yolo3_512 | YOLO v3<sup>2</sup> | [COCO 2017](http://cocodataset.org/) | 80 | 512x512 | [weights](https://cloud.hipert.unimore.it/s/RGecMeGLD4cXEWL/download) |
| yolo3_berkeley | YOLO v3<sup>2</sup> | [BDD100K ](https://bair.berkeley.edu/blog/2018/05/30/bdd/) | 10 | 320x544 | [weights](https://cloud.hipert.unimore.it/s/o5cHa4AjTKS64oD/download) |
| yolo3_coco4 | YOLO v3<sup>2</sup> | [COCO 2014](http://cocodataset.org/) | 4 | 416x416 | [weights](https://cloud.hipert.unimore.it/s/o27NDzSAartbyc4/download) |
| yolo3_flir | YOLO v3<sup>2</sup> | [FREE FLIR](https://www.flir.com/oem/adas/adas-dataset-form/) | 3 | 320x544 | [weights](https://cloud.hipert.unimore.it/s/62DECncmF6bMMiH/download) |
......
#!/bin/bash
cd demo
wget https://cloud.hipert.unimore.it/s/LNxBDk4wzqXPL8c/download -O COCO_val2017.zip
unzip -d COCO_val2017 COCO_val2017.zip
rm COCO_val2017.zip
cd COCO_val2017/
realpath labels/* > all_labels.txt
function elaborate_testset {
wget $1 -O $2.zip
unzip -d $2 $2.zip
rm $2.zip
cd $2/
realpath labels/* > all_labels.txt
realpath images/* > all_images.txt
cd ..
}
cd demo
for valset in $@
do
if [ $valset = "COCO" ]; then
echo "Downloading $valset validation set in demo"
elaborate_testset "https://cloud.hipert.unimore.it/s/LNxBDk4wzqXPL8c/download" "COCO_val2017"
elif [ $valset = "BDD" ]; then
echo "Downloading $valset validation set in demo"
elaborate_testset "https://cloud.hipert.unimore.it/s/bikqk3FzCq2tg4D/download" "BDD100K_val"
fi
done
......@@ -172,7 +172,12 @@ bool MobilenetDetection::init(const std::string& tensor_path, const int n_classe
colors[c] = cv::Scalar(int(255.0 * b), int(255.0 * g), int(255.0 * r));
}
if(classes == 21){
if(classes == 11){ //BDD
const char *classes_names_[] = {
"person","car","truck","bus","motor","bike","rider","traffic light","traffic sign","train"};
classesNames = std::vector<std::string>(classes_names_, std::end(classes_names_));
}
else if(classes == 21){ //VOC
const char *classes_names_[] = {
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
"car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
......@@ -180,7 +185,7 @@ bool MobilenetDetection::init(const std::string& tensor_path, const int n_classe
classesNames = std::vector<std::string>(classes_names_, std::end(classes_names_));
}
else if (classes == 81){
else if (classes == 81){ //COCO
const char *classes_names_[] = {
"person" , "bicycle" , "car" , "motorbike" , "aeroplane" , "bus" ,
"train" , "truck" , "boat" , "traffic light" , "fire hydrant" , "stop sign" ,
......
This diff is collapsed.
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=32
subdivisions=16
width=544
height=320
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1
#19:104x104 38:52x52 65:26x26 80:13x13 for 416
[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
# 1-1
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 1-2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 1-3
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 1-T
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-16
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
# 2-1
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 2-2
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 2-3
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 2-T
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-16
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[route]
layers = -2
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
# 3-1
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 3-2
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 3-3
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 3-4
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 3-5
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# 3-T
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[route]
layers = -1,-24
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3