papillon/src/papillon.cpp
2016-06-02 18:06:24 +02:00

290 lines
9.8 KiB
C++

#include "ros/ros.h"
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <geometry_msgs/Twist.h>
#include <typeinfo>
#include <opencv/cv.h>
#include <sstream>
using namespace cv;
using namespace std;
class Traite_image {
public:
const static int THRESHOLD_DETECT_SENSITIVITY = 10;
const static int BLUR_SIZE = 5;
const static int THRESHOLD_MOV = 5;
const static int crop_ratio = 8;
Mat prev;
Mat last_T;
bool first = true;
int resize_f = 1;
int theObject[2] = {0,0};
Rect objectBoundingRectangle = Rect(0,0,0,0);
ros::NodeHandle n;
image_transport::ImageTransport it;
image_transport::Publisher pub_img;
ros::Publisher pub_cmd;
image_transport::Subscriber sub;
Traite_image(bool sim) : n("~"),it(n) {
String img_out, cmd_out, img_in;
if (!sim) {
img_out = "/image_out";
cmd_out = "/bebop/cmd_vel";
img_in = "/bebop/image_raw";
}
else
{
img_out = "/image_out";
cmd_out = "/vrep/drone/cmd_vel";
img_in = "/usb_cam/image_raw";
}
pub_img = it.advertise(img_out, 1);
pub_cmd = n.advertise<geometry_msgs::Twist>(cmd_out, 1);
sub = it.subscribe(img_in, 1, [this](const sensor_msgs::ImageConstPtr& img) -> void { this->on_image(img);},ros::VoidPtr(),image_transport::TransportHints("compressed"));
}
// This processes an image and publishes the result.
void on_image(const sensor_msgs::ImageConstPtr& msg) {
cv_bridge::CvImageConstPtr bridge_input;
try {
bridge_input = cv_bridge::toCvShare(msg,sensor_msgs::image_encodings::RGB8);
}
catch (Exception& e) {
std::ostringstream errstr;
errstr << "cv_bridge exception caught: " << e.what();
return;
}
//Mat& input = const_cast<Mat&>(bridge_input->image);
const Mat& input = bridge_input->image;
Mat next;
resize(input, next, Size(input.size().width/resize_f, input.size().height/resize_f));
Mat output;// = input.clone(); // (input.rows, input.cols, CV_32FC2);
//ROS_INFO("got input");
if (first) {
prev = next.clone();
first = false;
ROS_INFO("first done");
}
Mat next_stab;
stabiliseImg(prev, next, next_stab);
float crop_x = next_stab.size().width/crop_ratio;
float crop_y = next_stab.size().height/crop_ratio;
float crop_w = next_stab.size().width*(1-2.0/crop_ratio);
float crop_h = next_stab.size().height*(1-2.0/crop_ratio);
Rect myROI(crop_x, crop_y, crop_w, crop_h);
Mat next_stab_cropped = next_stab(myROI);
Mat prev_cropped = prev(myROI);
searchForMovementOptFlow(prev_cropped, next_stab_cropped, output);
pub_img.publish(cv_bridge::CvImage(msg->header, "rgb8", output).toImageMsg());
// bridge_input is handled by a smart-pointer. No explicit delete needed.
//droneTracking(Rect(Point(0,0), output.size()));
//ROS_INFO("pub");
prev = next.clone();
}
//int to string helper function
string intToString(int number){
//this function has a number input and string output
std::stringstream ss;
ss << number;
return ss.str();
}
void stabiliseImg(Mat prev, Mat cur, Mat &output){
Mat cur_grey, prev_grey;
cvtColor(cur, cur_grey, COLOR_BGR2GRAY);
cvtColor(prev, prev_grey, COLOR_BGR2GRAY);
// vector from prev to cur
vector <Point2f> prev_corner, cur_corner;
vector <Point2f> prev_corner2, cur_corner2;
vector <uchar> status;
vector <float> err;
goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30);
calcOpticalFlowPyrLK(prev_grey, cur_grey, prev_corner, cur_corner, status, err);
// weed out bad matches
for(size_t i=0; i < status.size(); i++) {
if(status[i]) {
prev_corner2.push_back(prev_corner[i]);
cur_corner2.push_back(cur_corner[i]);
}
}
Mat T = estimateRigidTransform(prev_corner2, cur_corner2, true); // false = rigid transform, no scaling/shearing
if(T.data == NULL) {
last_T.copyTo(T);
}
T.copyTo(last_T);
Mat cur2;
warpAffine(cur, cur2, T, cur.size(),INTER_NEAREST|WARP_INVERSE_MAP);
cur2.copyTo(output);
}
void searchForMovement(Mat prev, Mat cur, Mat &output){
Mat cur_grey, prev_grey;
cur.copyTo(output);
cvtColor(prev, prev_grey, COLOR_BGR2GRAY);
cvtColor(cur, cur_grey, COLOR_BGR2GRAY);
// Subtract the 2 last frames and threshold them
Mat thres;
absdiff(prev_grey,cur_grey,thres);
threshold(thres, thres, THRESHOLD_DETECT_SENSITIVITY, 255, THRESH_BINARY);
// Blur to eliminate noise
blur(thres, thres, Size(BLUR_SIZE, BLUR_SIZE));
threshold(thres, thres, THRESHOLD_DETECT_SENSITIVITY, 255, THRESH_BINARY);
//notice how we use the '&' operator for objectDetected and output. This is because we wish
//to take the values passed into the function and manipulate them, rather than just working with a copy.
//eg. we draw to the output to be displayed in the main() function.
bool objectDetected = false;
Mat temp;
thres.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours
//if contours vector is not empty, we have found some objects
if(contours.size()>0)objectDetected=true;
else objectDetected = false;
if(objectDetected){
//the largest contour is found at the end of the contours vector
//we will simply assume that the biggest contour is the object we are looking for.
vector< vector<Point> > largestContourVec;
largestContourVec.push_back(contours.at(contours.size()-1));
//make a bounding rectangle around the largest contour then find its centroid
//this will be the object's final estimated position.
objectBoundingRectangle = boundingRect(largestContourVec.at(0));
}
//make some temp x and y variables so we dont have to type out so much
int x = objectBoundingRectangle.x;
int y = objectBoundingRectangle.y;
int width = objectBoundingRectangle.width;
int height = objectBoundingRectangle.height;
//draw a rectangle around the object
rectangle(output, Point(x,y), Point(x+width, y+height), Scalar(0, 255, 0), 2);
//write the position of the object to the screen
putText(output,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);
}
void searchForMovementOptFlow(Mat prev, Mat cur, Mat &output){
Mat cur_grey, prev_grey;
cur.copyTo(output);
cvtColor(prev, prev_grey, COLOR_BGR2GRAY);
cvtColor(cur, cur_grey, COLOR_BGR2GRAY);
Mat flow;
calcOpticalFlowFarneback(prev_grey, cur_grey, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
vector<Mat> flow_coord(2);
Mat flow_norm, angle;
split(flow, flow_coord);
cartToPolar(flow_coord[0], flow_coord[1], flow_norm, angle);
//threshold(flow_norm, flow_norm, THRESHOLD_DETECT_SENSITIVITY, 255, THRESH_BINARY);
// Blur to eliminate noise
blur(flow_norm, flow_norm, Size(BLUR_SIZE, BLUR_SIZE));
threshold(flow_norm, flow_norm, THRESHOLD_DETECT_SENSITIVITY, 255, THRESH_BINARY);
flow_norm.convertTo(flow_norm, CV_8U);
Mat temp;
flow_norm.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours
if(contours.size()>0){ //if contours vector is not empty, we have found some objects
//the largest contour is found at the end of the contours vector
//we will simply assume that the biggest contour is the object we are looking for.
vector< vector<Point> > largestContourVec;
largestContourVec.push_back(contours.at(contours.size()-1));
//make a bounding rectangle around the largest contour then find its centroid
//this will be the object's final estimated position.
objectBoundingRectangle = boundingRect(largestContourVec.at(0));
}
//make some temp x and y variables so we dont have to type out so much
int x = objectBoundingRectangle.x;
int y = objectBoundingRectangle.y;
int width = objectBoundingRectangle.width;
int height = objectBoundingRectangle.height;
//draw a rectangle around the object
rectangle(output, Point(x,y), Point(x+width, y+height), Scalar(0, 255, 0), 2);
//write the position of the object to the screen
putText(output,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);
}
inline bool isFlowCorrect(Point2f u)
{
return !cvIsNaN(u.x) && !cvIsNaN(u.y) && fabs(u.x) < 1e9 && fabs(u.y) < 1e9;
}
void droneTracking(Rect img_size)
{
Point2f centre_image = Point2f(img_size.width/2, img_size.height/2);
Point2f centre_rect = Point2f(objectBoundingRectangle.x + objectBoundingRectangle.width/2, objectBoundingRectangle.y + objectBoundingRectangle.height/2);
geometry_msgs::Twist twist = geometry_msgs::Twist();
if(centre_rect.x < centre_image.x-THRESHOLD_MOV)
{
twist.angular.z = 0.2;
}
else if(centre_rect.x > centre_image.x+THRESHOLD_MOV)
{
twist.angular.z = -0.2;
}
pub_cmd.publish(twist);
}
};
int main(int argc, char **argv)
{
ros::init(argc, argv, "Papillon");
bool sim = false;
Traite_image dataset=Traite_image(sim);
ros::spin();
return 0;
}