diff --git a/src/papillon.cpp b/src/papillon.cpp index c517c1b..f3e1ab0 100644 --- a/src/papillon.cpp +++ b/src/papillon.cpp @@ -56,17 +56,16 @@ class Traite_image { return; } - //cv::Mat& input = const_cast(bridge_input->image); const cv::Mat& input = bridge_input->image; cv::Mat next; resize(input, next, cv::Size(input.size().width/resize_f, input.size().height/resize_f)); - cv::Mat output;// = input.clone(); // (input.rows, input.cols, CV_32FC2); + cv::Mat output; if (first) { for (int i = 0; i < NB_FRAME_DROP; ++i) { prevs.push_back(next.clone()); } first = false; - ROS_INFO("first done"); + ROS_INFO("Ready"); } cv::Mat next_stab; @@ -85,22 +84,11 @@ class Traite_image { pub_img.publish(cv_bridge::CvImage(msg->header, "rgb8", output).toImageMsg()); pub_thres.publish(cv_bridge::CvImage(msg->header, "mono8", closed_thres).toImageMsg()); - // bridge_input is handled by a smart-pointer. No explicit delete needed. prevs.pop_back(); prevs.insert(prevs.begin(), next.clone()); } - - - //int to string helper function - string intToString(int number){ - //this function has a number input and string output - std::stringstream ss; - ss << number; - return ss.str(); - } - - + void stabiliseImg(cv::Mat prev, cv::Mat cur, cv::Mat &output){ cv::Mat cur_grey, prev_grey; cv::cvtColor(cur, cur_grey, cv::COLOR_BGR2GRAY); @@ -115,7 +103,7 @@ class Traite_image { cv::goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30); cv::calcOpticalFlowPyrLK(prev_grey, cur_grey, prev_corner, cur_corner, status, err); - // weed out bad cv::Matches + // weed out bad matches for(size_t i=0; i < status.size(); i++) { if(status[i]) { prev_corner2.push_back(prev_corner[i]); @@ -144,64 +132,40 @@ class Traite_image { cv::cvtColor(cur, cur_grey, cv::COLOR_BGR2GRAY); cv::GaussianBlur(prev_grey, prev_grey, cv::Size(BLUR_Size,BLUR_Size), 3.0); cv::GaussianBlur(cur_grey, cur_grey, cv::Size(BLUR_Size,BLUR_Size), 3.0); - //blur(prev_grey, prev_grey, cv::Size(BLUR_Size, BLUR_Size)); - //blur(cur_grey, cur_grey, cv::Size(BLUR_Size, BLUR_Size)); // Subtract the 2 last frames and threshold them cv::Mat thres; cv::absdiff(prev_grey,cur_grey,thres); - // threshold(thres, thres, SENSITIVITY_VALUE, 255, THRESH_BINARY); - // // Blur to eliminate noise - // blur(thres, thres, cv::Size(BLUR_Size, BLUR_Size)); cv::Mat element = getStructuringElement( cv::MORPH_ELLIPSE, cv::Size( 2*ERODE_SIZE + 1, 2*ERODE_SIZE+1 ), cv::Point( ERODE_SIZE, ERODE_SIZE ) ); - // Apply the dilation operation + // Apply the erode operation cv::erode(thres, thres, element ); - thres.copyTo(out2); - - cv::threshold(thres, thres, SENSITIVITY_VALUE, 255, cv::THRESH_BINARY); + + // Intermediate output + thres.copyTo(out2); cv::Mat closed_thres; cv::Mat structuringElement = getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(CLOSE_SIZE, CLOSE_SIZE)); cv::morphologyEx( thres, closed_thres, cv::MORPH_CLOSE, structuringElement ); - // dilated_thres.copyTo(output); - - //closed_thres.copyTo(output); - - //notice how we use the '&' operator for objectDetected and output. This is because we wish - //to take the values passed into the function and manipulate them, rather than just working with a copy. - //eg. we draw to the output to be displayed in the main() function. bool objectDetected = false; cv::Mat temp; closed_thres.copyTo(temp); - //these two vectors needed for output of findContours vector< vector > contours; vector hierarchy; //find contours of filtered image using openCV findContours function - //findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours cv::findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours //if contours vector is not empty, we have found some objects - if(contours.size()>0)objectDetected=true; - else objectDetected = false; - - if(objectDetected){ - //the largest contour is found at the end of the contours vector - //we will simply assume that the biggest contour is the object we are looking for. - //vector< vector > largestContourVec; - //largestContourVec.push_back(contours.at(contours.size()-1)); - //make a bounding rectangle around the largest contour then find its centroid - //this will be the object's final esticv::Mated position. + if(contours.size()>0){ vector nc_rects; // Non connected rectangles for(size_t i=0; i c_rects; // Connected rectangles @@ -211,7 +175,6 @@ class Traite_image { cv::rectangle(output, rect, cv::Scalar(0, 255, 0), 2); cv::Rect objBRect = c_rects.front(); - //cv::rectangle(output, objBRect, cv::Scalar(0, 255, 0), 2); papillon::BoundingBox bbox = papillon::BoundingBox(); bbox.x = objBRect.x / (float)cur.size().width; bbox.y = objBRect.y / (float)cur.size().height; @@ -220,20 +183,8 @@ class Traite_image { pub_cmd.publish(bbox); } } - //make some temp x and y variables so we dont have to type out so much - //~ int x = objectBoundingRectangle.x; - //~ int y = objectBoundingRectangle.y; - //~ int width = objectBoundingRectangle.width; - //~ int height = objectBoundingRectangle.height; - - //draw a rectangle around the object - //rectangle(output, Point(x,y), Point(x+width, y+height), cv::Scalar(0, 255, 0), 2); - - //write the position of the object to the screen - //putText(output,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,cv::Scalar(255,0,0),2); } - void cleanBBoxes(vector nc_rects, cv::Size img, vector &c_rects) { int max = 0; for (auto r : nc_rects) { @@ -272,4 +223,4 @@ int main(int argc, char **argv) ros::spin(); return 0; -} +} \ No newline at end of file