Commit fa2b3d26 authored by micaela's avatar micaela
Browse files

Fix typos (#107)



Signed-off-by: default avatarmicaela <micaelaverucchi@gmail.com>
parent 38106a94
......@@ -15,7 +15,7 @@ M. Verucchi, G. Brilli, D. Sapienza, M. Verasani, M. Arena, F. Gatti, A. Capoton
```
## FPS Results
Inference FPS of yolov4 with tkDNN, average of 1200 images with the same dimesion as the input size, on
Inference FPS of yolov4 with tkDNN, average of 1200 images with the same dimension as the input size, on
* RTX 2080Ti (CUDA 10.2, TensorRT 7.0.0, Cudnn 7.6.5);
* Xavier AGX, Jetpack 4.3 (CUDA 10.0, CUDNN 7.6.3, tensorrt 6.0.1 );
* Tx2, Jetpack 4.2 (CUDA 10.0, CUDNN 7.3.1, tensorrt 5.0.6 );
......@@ -169,7 +169,7 @@ tkDNN implement and easy parser for darknet cfg files, a network can be converte
tk::dnn::Network *net = tk::dnn::darknetParser("yolov4.cfg", "yolov4/layers", "coco.names");
net->print();
```
All models from darknet are now parsed directly from cfg, you still need to export the weights with the descripted tools in the previus section.
All models from darknet are now parsed directly from cfg, you still need to export the weights with the described tools in the previous section.
<details>
<summary>Supported layers</summary>
convolutional
......@@ -203,7 +203,7 @@ cmake .. -DDEBUG=True
make
```
Once you have succesfully created your rt file, run the demo:
Once you have successfully created your rt file, run the demo:
```
./demo yolo4_fp32.rt ../demo/yolo_test.mp4 y
```
......@@ -247,7 +247,7 @@ You should provide image_list.txt and label_list.txt, using training images. How
```
bash scripts/download_validation.sh COCO
```
to automatically download COCO2017 validation (inside demo folder) and create those needed file. Use BDD insted of COCO to download BDD validation.
to automatically download COCO2017 validation (inside demo folder) and create those needed file. Use BDD instead of COCO to download BDD validation.
Then a complete example using yolo3 and COCO dataset would be:
```
......@@ -269,8 +269,8 @@ N.B.
export TKDNN_BATCHSIZE=2
# build tensorRT files
```
This will create a TensorRT file with the desidered **max** batch size.
The test will still run with a batch of 1, but the created tensorRT can manage the desidered batch size.
This will create a TensorRT file with the desired **max** batch size.
The test will still run with a batch of 1, but the created tensorRT can manage the desired batch size.
### Test batch Inference
This will test the network with random input and check if the output of each batch is the same.
......@@ -316,7 +316,7 @@ cd build
./map_demo dla34_cnet_FP32.rt c ../demo/COCO_val2017/all_labels.txt ../demo/config.yaml
```
This demo also creates a json file named ```net_name_COCO_res.json``` containing all the detections computed. The detections are in COCO format, the correct format to subit the results to [CodaLab COCO detection challenge](https://competitions.codalab.org/competitions/20794#participate).
This demo also creates a json file named ```net_name_COCO_res.json``` containing all the detections computed. The detections are in COCO format, the correct format to submit the results to [CodaLab COCO detection challenge](https://competitions.codalab.org/competitions/20794#participate).
## Existing tests and supported networks
......
......@@ -76,10 +76,10 @@ class DetectionNN {
~DetectionNN(){};
/**
* Method used to inialize the class, allocate memory and compute
* Method used to initialize the class, allocate memory and compute
* needed data.
*
* @param tensor_path path to the rt file og the NN.
* @param tensor_path path to the rt file of the NN.
* @param n_classes number of classes for the given dataset.
* @param n_batches maximum number of batches to use in inference
* @return true if everything is correct, false otherwise.
......@@ -141,9 +141,9 @@ class DetectionNN {
}
/**
* Method to draw boundixg boxes and labels on a frame.
* Method to draw bounding boxes and labels on a frame.
*
* @param frames orginal frame to draw bounding box on.
* @param frames original frame to draw bounding box on.
*/
void draw(std::vector<cv::Mat>& frames) {
tk::dnn::box b;
......
......@@ -44,7 +44,7 @@ class ImuOdom {
virtual ~ImuOdom() {}
/**
* Method used for inizialize the class
* Method used for initialize the class
*
* @return Success of the initialization
*/
......@@ -141,7 +141,7 @@ class ImuOdom {
//odomPOS = odomPOS + deltaP.cast<double>(); // V2
odomROT = odomROT * q.normalized().toRotationMatrix();
// compute euler
// compute Euler
auto newEULER = odomROT.eulerAngles(0, 1, 2);
for(int i=0; i<3; i++) {
while( fabs(newEULER(i) - odomEULER(i)) > M_PI_2 ) {
......
......@@ -171,7 +171,7 @@ public:
/**
Input layer (it doesnt need weigths)
Input layer (it doesn't need weights)
*/
class Input : public Layer {
......@@ -207,7 +207,7 @@ public:
/**
Avaible activation functions
Available activation functions
*/
typedef enum {
ACTIVATION_ELU = 100,
......@@ -216,7 +216,7 @@ typedef enum {
} tkdnnActivationMode_t;
/**
Activation layer (it doesnt need weigths)
Activation layer (it doesn't need weights)
*/
class Activation : public Layer {
......@@ -318,9 +318,9 @@ public:
virtual dnnType* infer(dataDim_t &dim, dnnType* srcData);
const bool bidirectional = true; /**> is the net bidir */
bool returnSeq = false; /**> if false return only the result of last timestep */
bool returnSeq = false; /**> if false return only the result of last timestamp */
int stateSize = 0; /**> number of hidden states */
int seqLen = 0; /**> number of timesteps */
int seqLen = 0; /**> number of timestamp */
int numLayers = 1; /**> number of internal layers */
protected:
......@@ -367,7 +367,7 @@ public:
/**
Deformable Convolutionl 2d layer
Deformable Convolutional 2d layer
*/
class DeformConv2d : public LayerWgs {
......@@ -449,7 +449,7 @@ protected:
/**
Avaible pooling functions (padding on tkDNN is not supported)
Available pooling functions (padding on tkDNN is not supported)
*/
typedef enum {
POOLING_MAX = 0,
......@@ -460,7 +460,7 @@ typedef enum {
/**
Pooling layer
currenty supported only 2d pooing (also on 3d input)
currently supported only 2d pooing (also on 3d input)
*/
class Pooling : public Layer {
......@@ -526,7 +526,7 @@ public:
/**
Reorg layer
Mantain same dimension but change C*H*W distribution
Maintains same dimension but change C*H*W distribution
*/
class Reorg : public Layer {
......@@ -559,7 +559,7 @@ public:
/**
Upsample layer
Mantain same dimension but change C*H*W distribution
Maintains same dimension but change C*H*W distribution
*/
class Upsample : public Layer {
......
......@@ -7,12 +7,12 @@
namespace tk { namespace dnn {
/**
Data rapresentation beetween layers
Data representation between layers
n = batch size
c = channels
h = heigth (lines)
h = height (lines)
w = width (rows)
l = lenght (3rd dimension)
l = length (3rd dimension)
*/
struct dataDim_t {
......@@ -43,7 +43,7 @@ public:
void releaseLayers();
/**
Do inferece for every added layer
Do inference for every added layer
*/
dnnType* infer(dataDim_t &dim, dnnType* data);
......
......@@ -91,7 +91,7 @@ public:
}
/**
Do inferece
Do inference
*/
dnnType* infer(dataDim_t &dim, dnnType* data);
void enqueue(int batchSize = 1);
......
......@@ -73,12 +73,12 @@ double computeMap( std::vector<Frame> &images,const int classes,
* all the recall levels are evaluated, otherwise only
* map_point recall levels are used. For COCO evaluation
* 101 points are used.
* @param map_step step used to increment IoU theshold
* @param map_step step used to increment IoU threshold
* @param map_levels number of IoU step to perform
* @param verbose is set to true, prints on screen additional info
* @param write_on_file if set to true, the results produced by this function
* are written on file
* @param net name of the considerd neural network
* @param net name of the considered neural network
*
* @return mAP IoU_tresh:IoU_tresh+map_step*map_levels (e.g. mAP 0.5:0.95 when
* map_step=0.05 and map_levels=10)
......@@ -89,7 +89,7 @@ double computeMapNIoULevels(std::vector<Frame> &images,const int classes,
const int map_levels=10, const bool verbose=false,
const bool write_on_file = false, std::string net = "");
/**
* This method computes the numper of True Positive (TP), False Positive (FP),
* This method computes the number of True Positive (TP), False Positive (FP),
* False Negative (FN), precision, recall and f1-score.
* Those values are computer over all the detections, over all the classes.
*
......@@ -101,7 +101,7 @@ double computeMapNIoULevels(std::vector<Frame> &images,const int classes,
* @param verbose is set to true, prints on screen additional info
* @param write_on_file if set to true, the results produced by this function
* are written on file
* @param net name of the considerd neural network
* @param net name of the considered neural network
*/
void computeTPFPFN( std::vector<Frame> &images,const int classes,
const float IoU_thresh=0.5, const float conf_thresh=0.3,
......
......@@ -89,7 +89,7 @@ public:
for(int b=0; b<batchSize; b++) {
checkCuda(cudaMemcpy(offset, output_conv + b * 3 * chunk_dim, 2*chunk_dim*sizeof(dnnType), cudaMemcpyDeviceToDevice));
checkCuda(cudaMemcpy(mask, output_conv + b * 3 * chunk_dim + 2*chunk_dim, chunk_dim*sizeof(dnnType), cudaMemcpyDeviceToDevice));
// kernel sigmoide
// kernel sigmoid
activationSIGMOIDForward(mask, mask, chunk_dim);
// deformable convolution
dcnV2CudaForward(stat, handle,
......
......@@ -20,7 +20,7 @@ int testInference(std::vector<std::string> input_bins, std::vector<std::string>
}
if(output_bins.size() != outputs.size()) {
std::cout<<output_bins.size()<<" "<<outputs.size()<<"\n";
FatalError("outputs size missmatch");
FatalError("outputs size mismatch");
}
// Load input
......
......@@ -201,7 +201,7 @@ namespace tk { namespace dnn {
tk::dnn::Network *net = nullptr;
// layers without activations to retrive correct id number
// layers without activations to retrieve correct id number
std::vector<tk::dnn::Layer*> netLayers;
std::ifstream if_cfg(cfg_file);
......
......@@ -95,7 +95,7 @@ dnnType* DeformConv2d::infer(dataDim_t &dim, dnnType* srcData) {
// split conv2d outputs into offset and mask
checkCuda(cudaMemcpy(offset, output_conv, 2*chunk_dim*sizeof(dnnType), cudaMemcpyDeviceToDevice));
checkCuda(cudaMemcpy(mask, output_conv + 2*chunk_dim, chunk_dim*sizeof(dnnType), cudaMemcpyDeviceToDevice));
// kernel sigmoide
// kernel sigmoid
activationSIGMOIDForward(mask, mask, chunk_dim);
// deformable convolution
......
......@@ -37,7 +37,7 @@ dnnType* Dense::infer(dataDim_t &dim, dnnType* srcData) {
// place bias into dstData
checkCuda( cudaMemcpy(dstData, bias_d, dim_y*sizeof(dnnType), cudaMemcpyDeviceToDevice) );
//do matrix moltiplication
//do matrix multiplication
checkERROR( cublasSgemv(net->cublasHandle, CUBLAS_OP_T,
dim_x, dim_y,
&alpha,
......
......@@ -133,7 +133,7 @@ LSTM::LSTM( Network *net, int hiddensize, bool returnSeq, std::string fname_weig
output_dim = input_dim;
output_dim.c = stateSize*(bidirectional ? 2 : 1);
// if retunseq is disabled only the last timestep is returned
// if retunseq is disabled only the last timestamp is returned
if(!returnSeq) {
output_dim.h = 1;
output_dim.w = 1;
......@@ -254,7 +254,7 @@ dnnType* LSTM::infer(dataDim_t &dim, dnnType* srcData) {
rnnDesc,
seqLen, // number of time steps (nT)
x_desc_vec_.data(), // input array of desc (nT*nC_in)
srcF, // input pointer
srcF, // input pointer
hx_desc_, // initial hidden state desc
hx_ptr, // initial hidden state pointer
cx_desc_, // initial cell state desc
......@@ -281,7 +281,7 @@ dnnType* LSTM::infer(dataDim_t &dim, dnnType* srcData) {
rnnDesc,
seqLen, // number of time steps (nT)
x_desc_vec_.data(), // input array of desc (nT*nC_in)
srcB, // input pointer
srcB, // input pointer
hx_desc_, // initial hidden state desc
hx_ptr, // initial hidden state pointer
cx_desc_, // initial cell state desc
......@@ -289,7 +289,7 @@ dnnType* LSTM::infer(dataDim_t &dim, dnnType* srcData) {
w_desc_, // weights desc
wb_ptr, // weights pointer
y_desc_vec_.data(), // output desc (nT*nC_out)
dstB_NR, // output pointer
dstB_NR, // output pointer
hy_desc_, // final hidden state desc
hy_ptr, // final hidden state pointer
cy_desc_, // final cell state desc
......@@ -307,7 +307,7 @@ dnnType* LSTM::infer(dataDim_t &dim, dnnType* srcData) {
one_output_dim.c*sizeof(dnnType), cudaMemcpyDeviceToDevice));
}
// if retunseq is disabled only the last timestep is returned
// if retunseq is disabled only the last timestamp is returned
if(returnSeq) {
// forward transpose
matrixTranspose(net->cublasHandle, dstF, dstData,
......
......@@ -105,7 +105,7 @@ LayerWgs::LayerWgs(Network *net, int inputs, int outputs,
float2half(tmp_d, variance16_d, b_size);
cudaMemcpy(variance16_h, variance16_d, b_size*sizeof(__half), cudaMemcpyDeviceToHost);
//conver scales
//convert scales
float2half(scales_d, scales16_d, b_size);
cudaMemcpy(scales16_h, scales16_d, b_size*sizeof(__half), cudaMemcpyDeviceToHost);
......
......@@ -12,7 +12,7 @@ MulAdd::MulAdd(Network *net, dnnType mul, dnnType add) : Layer(net) {
int size = input_dim.tot();
// create a vector with all value setted to add
// create a vector with all value set to add
dnnType *add_vector_h = new dnnType[size];
for(int i=0; i<size; i++)
add_vector_h[i] = add;
......
......@@ -163,7 +163,7 @@ NetworkRT::NetworkRT(Network *net, const char *name) {
// note that indices are guaranteed to be less than IEngine::getNbBindings()
buf_input_idx = engineRT->getBindingIndex("data");
buf_output_idx = engineRT->getBindingIndex("out");
std::cout<<"input idex = "<<buf_input_idx<<" -> output index = "<<buf_output_idx<<"\n";
std::cout<<"input index = "<<buf_input_idx<<" -> output index = "<<buf_output_idx<<"\n";
Dims iDim = engineRT->getBindingDimensions(buf_input_idx);
......
......@@ -63,7 +63,7 @@ dnnType* Region::infer(dataDim_t &dim, dnnType* srcData) {
}
/* Intepret class */
/* Interpret class */
RegionInterpret::RegionInterpret(dataDim_t input_dim, dataDim_t output_dim,
int classes, int coords, int num, float thresh, std::string fname_weights) {
......
......@@ -13,7 +13,7 @@ Shortcut::Shortcut(Network *net, Layer *backLayer) : Layer(net) {
if( /*backLayer->output_dim.c != input_dim.c ||*/
backLayer->output_dim.w != input_dim.w ||
backLayer->output_dim.h != input_dim.h )
FatalError("Shortcut dim missmatch");
FatalError("Shortcut dim mismatch");
}
Shortcut::~Shortcut() {
......
......@@ -63,7 +63,7 @@ double computeMap( std::vector<Frame> &images,const int classes,
int gt_checked = 0;
// for each detection comput IoU with groundtruth and match detetcion and
// for each detection compute IoU with groundtruth and match detetcion and
// groundtruth with IoU greater than IoU_thresh
for(auto &img:images){
for(size_t i=0; i<img.det.size(); i++){
......@@ -153,7 +153,7 @@ double computeMap( std::vector<Frame> &images,const int classes,
}
}
//compute average precision for each class. Two methods are avaible,
//compute average precision for each class. Two methods are available,
//based on map_points required
double mean_average_precision = 0;
double last_recall, last_precision, delta_recall;
......@@ -287,7 +287,7 @@ void computeTPFPFN( std::vector<Frame> &images,const int classes,
}
}
//count all TP, FP, FN and compute precsion, recall and f1-score
//count all TP, FP, FN and compute precision, recall and f1-score
double avg_precision = 0, avg_recall = 0, f1_score = 0;
int TP = 0, FP = 0, FN = 0;
for(size_t i=0; i<classes; i++){
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment