#include "ros/ros.h" #include #include #include #include #include using namespace cv; using namespace std; class Traite_image { public: const static int SENSITIVITY_VALUE = 30; const static int BLUR_SIZE = 10; Mat prev; Mat last_T; bool first = true; int resize_f = 4; int theObject[2] = {0,0}; Rect objectBoundingRectangle = Rect(0,0,0,0); ros::NodeHandle n; image_transport::ImageTransport it; image_transport::Publisher pub; image_transport::Subscriber sub; Traite_image() : n("~"),it(n) { pub = it.advertise("/image_out", 1); sub = it.subscribe("/usb_cam/image_raw", 1, [this](const sensor_msgs::ImageConstPtr& img) -> void { this->on_image(img);},ros::VoidPtr(),image_transport::TransportHints("compressed")); } // This processes an image and publishes the result. void on_image(const sensor_msgs::ImageConstPtr& msg) { cv_bridge::CvImageConstPtr bridge_input; try { bridge_input = cv_bridge::toCvShare(msg,sensor_msgs::image_encodings::RGB8); } catch (Exception& e) { std::ostringstream errstr; errstr << "cv_bridge exception caught: " << e.what(); return; } //Mat& input = const_cast(bridge_input->image); const Mat& input = bridge_input->image; Mat next; Mat next_grey; resize(input, next, Size(input.size().width/resize_f, input.size().height/resize_f)); cvtColor(next, next_grey, CV_BGR2GRAY); Mat output;// = input.clone(); // (input.rows, input.cols, CV_32FC2); //ROS_INFO("got input"); if (first) { prev = next.clone(); first = false; ROS_INFO("first done"); } stabiliseImg(prev, next, output); // Subtract the 2 last frames and threshold them //Mat thres; //absdiff(prev,next,thres); //threshold(thres, thres, SENSITIVITY_VALUE, 255, THRESH_BINARY); // Blur to eliminate noise //blur(thres, thres, Size(BLUR_SIZE, BLUR_SIZE)); //threshold(thres, output, SENSITIVITY_VALUE, 255, THRESH_BINARY); //searchForMovement(thres, output); pub.publish(cv_bridge::CvImage(msg->header, "rgb8", output).toImageMsg()); // bridge_input is handled by a smart-pointer. No explicit delete needed. //ROS_INFO("pub"); prev = next.clone(); } //int to string helper function string intToString(int number){ //this function has a number input and string output std::stringstream ss; ss << number; return ss.str(); } void stabiliseImg(Mat prev, Mat cur, Mat &output){ Mat prev_grey, cur_grey; cvtColor(cur, cur_grey, COLOR_BGR2GRAY); cvtColor(prev, prev_grey, COLOR_BGR2GRAY); Point2f srcTri[3]; Point2f dstTri[3]; Mat warp_mat( 2, 3, CV_32FC1 ); // vector from prev to cur vector prev_corner, cur_corner; vector prev_corner2, cur_corner2; vector status; vector err; goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30); calcOpticalFlowPyrLK(prev_grey, cur_grey, prev_corner, cur_corner, status, err); // weed out bad matches for(size_t i=0; i < status.size(); i++) { if(status[i]) { prev_corner2.push_back(prev_corner[i]); cur_corner2.push_back(cur_corner[i]); } } // translation + rotation only Mat T(2, 3, CV_32FC1); T = estimateRigidTransform(prev_corner2, cur_corner2, false); // false = rigid transform, no scaling/shearing double dx = T.at(0,2); double dy = T.at(1,2); double da = atan2(T.at(1,0), T.at(0,0)); // cv::Mat H = cv::Mat(3,3,T.type()); // H.at(0,0) = T.at(0,0); // H.at(0,1) = T.at(0,1); // H.at(0,2) = T.at(0,2); // H.at(1,0) = T.at(1,0); // H.at(1,1) = T.at(1,1); // H.at(1,2) = T.at(1,2); // H.at(2,0) = 0.0; // H.at(2,1) = 0.0; // H.at(2,2) = 1.0; T.at(0,0) = cos(da); T.at(0,1) = -sin(da); T.at(1,0) = sin(da); T.at(1,1) = cos(da); T.at(0,2) = dx; T.at(1,2) = dy; // in rare cases no transform is found. We'll just use the last known good transform. if(T.data == NULL) { last_T.copyTo(T); } T.copyTo(last_T); /// Set your 3 points to calculate the Affine Transform srcTri[0] = Point2f( 0,0 ); srcTri[1] = Point2f( prev.cols, 0 ); srcTri[2] = Point2f( 0, prev.rows ); dstTri[0] = Point2f( prev.cols / 2, prev.rows / 2 ); dstTri[1] = Point2f( prev.cols * 3 / 2, prev.rows / 2); dstTri[2] = Point2f( prev.cols / 2, prev.rows * 3 / 2 ); /// Get the Affine Transform warp_mat = getAffineTransform( srcTri, dstTri ); warpAffine(prev,output,T,cv::Size(prev.cols+cur.cols,prev.rows+cur.rows)); warpAffine(output,output,warp_mat,cv::Size(prev.cols+cur.cols,prev.rows+cur.rows)); warpAffine(cur,cur,warp_mat,cv::Size(prev.cols+cur.cols,prev.rows+cur.rows)); //Mat half(output, cv::Rect(0, 0,cur.cols,cur.rows)); cur.copyTo(output, cur); } void searchForMovement(Mat thresholdImage, Mat &cameraFeed){ //notice how we use the '&' operator for objectDetected and cameraFeed. This is because we wish //to take the values passed into the function and manipulate them, rather than just working with a copy. //eg. we draw to the cameraFeed to be displayed in the main() function. bool objectDetected = false; Mat temp; thresholdImage.copyTo(temp); //these two vectors needed for output of findContours vector< vector > contours; vector hierarchy; //find contours of filtered image using openCV findContours function //findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours //if contours vector is not empty, we have found some objects if(contours.size()>0)objectDetected=true; else objectDetected = false; if(objectDetected){ //the largest contour is found at the end of the contours vector //we will simply assume that the biggest contour is the object we are looking for. vector< vector > largestContourVec; largestContourVec.push_back(contours.at(contours.size()-1)); //make a bounding rectangle around the largest contour then find its centroid //this will be the object's final estimated position. objectBoundingRectangle = boundingRect(largestContourVec.at(0)); int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2; int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2; //update the objects positions by changing the 'theObject' array values theObject[0] = xpos , theObject[1] = ypos; } //make some temp x and y variables so we dont have to type out so much int x = theObject[0]; int y = theObject[1]; //draw some crosshairs around the object circle(cameraFeed,Point(x,y),20,Scalar(0,255,0),2); line(cameraFeed,Point(x,y),Point(x,y-25),Scalar(0,255,0),2); line(cameraFeed,Point(x,y),Point(x,y+25),Scalar(0,255,0),2); line(cameraFeed,Point(x,y),Point(x-25,y),Scalar(0,255,0),2); line(cameraFeed,Point(x,y),Point(x+25,y),Scalar(0,255,0),2); //write the position of the object to the screen putText(cameraFeed,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2); } inline bool isFlowCorrect(Point2f u) { return !cvIsNaN(u.x) && !cvIsNaN(u.y) && fabs(u.x) < 1e9 && fabs(u.y) < 1e9; } Vec3b computeColor(float fx, float fy) { static bool first = true; // relative lengths of color transitions: // these are chosen based on perceptual similarity // (e.g. one can distinguish more shades between red and yellow // than between yellow and green) const int RY = 15; const int YG = 6; const int GC = 4; const int CB = 11; const int BM = 13; const int MR = 6; const int NCOLS = RY + YG + GC + CB + BM + MR; static Vec3i colorWheel[NCOLS]; if (first) { int k = 0; for (int i = 0; i < RY; ++i, ++k) colorWheel[k] = Vec3i(255, 255 * i / RY, 0); for (int i = 0; i < YG; ++i, ++k) colorWheel[k] = Vec3i(255 - 255 * i / YG, 255, 0); for (int i = 0; i < GC; ++i, ++k) colorWheel[k] = Vec3i(0, 255, 255 * i / GC); for (int i = 0; i < CB; ++i, ++k) colorWheel[k] = Vec3i(0, 255 - 255 * i / CB, 255); for (int i = 0; i < BM; ++i, ++k) colorWheel[k] = Vec3i(255 * i / BM, 0, 255); for (int i = 0; i < MR; ++i, ++k) colorWheel[k] = Vec3i(255, 0, 255 - 255 * i / MR); first = false; } const float rad = sqrt(fx * fx + fy * fy); const float a = atan2(-fy, -fx) / (float)CV_PI; const float fk = (a + 1.0f) / 2.0f * (NCOLS - 1); const int k0 = static_cast(fk); const int k1 = (k0 + 1) % NCOLS; const float f = fk - k0; Vec3b pix; for (int b = 0; b < 3; b++) { const float col0 = colorWheel[k0][b] / 255.f; const float col1 = colorWheel[k1][b] / 255.f; float col = (1 - f) * col0 + f * col1; if (rad <= 1) col = 1 - rad * (1 - col); // increase saturation with radius else col *= .75; // out of range pix[2 - b] = static_cast(255.f * col); } return pix; } void drawOpticalFlow(const Mat_& flow, Mat& dst, float maxmotion = -1) { dst.create(flow.size(), CV_8UC3); dst.setTo(Scalar::all(0)); // determine motion range: float maxrad = maxmotion; if (maxmotion <= 0) { maxrad = 1; for (int y = 0; y < flow.rows; ++y) { for (int x = 0; x < flow.cols; ++x) { Point2f u = flow(y, x); if (!isFlowCorrect(u)) continue; maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y)); } } } for (int y = 0; y < flow.rows; ++y) { for (int x = 0; x < flow.cols; ++x) { Point2f u = flow(y, x); if (isFlowCorrect(u)) dst.at(y, x) = computeColor(u.x / maxrad, u.y / maxrad); } } } }; int main(int argc, char **argv) { ros::init(argc, argv, "test_opencv"); Traite_image dataset=Traite_image(); ros::spin(); return 0; }