diff --git a/CMakeLists.txt b/CMakeLists.txt index d6bc35e..38094c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,12 +6,16 @@ find_package(catkin REQUIRED geometry_msgs nav_msgs roscpp + roslint sensor_msgs std_msgs std_srvs tf ) +roslint_cpp() +roslint_add_test() + find_package(Boost REQUIRED COMPONENTS system thread) find_package(stage REQUIRED) @@ -22,7 +26,13 @@ include_directories( ${STAGE_INCLUDE_DIRS} ) -catkin_package() +catkin_package( + CATKIN_DEPENDS + geometry_msgs + nav_msgs + sensor_msgs + std_msgs +) # Declare a cpp executable add_executable(stageros src/stageros.cpp) diff --git a/package.xml b/package.xml index ef3ccdf..5e53d17 100644 --- a/package.xml +++ b/package.xml @@ -1,8 +1,8 @@ - + stage_ros 1.8.0 - This package provides ROS specific hooks for stage + ROS specific hooks for stage William Woodall @@ -15,25 +15,17 @@ Brian Gerky catkin + roslint - boost - geometry_msgs - nav_msgs - roscpp - rostest - sensor_msgs - stage - std_msgs - std_srvs - tf + boost + geometry_msgs + nav_msgs + roscpp + sensor_msgs + stage + std_msgs + std_srvs + tf - boost - geometry_msgs - nav_msgs - roscpp - sensor_msgs - stage - std_msgs - std_srvs - tf + rostest diff --git a/src/stageros.cpp b/src/stageros.cpp index 39d040a..30b0707 100644 --- a/src/stageros.cpp +++ b/src/stageros.cpp @@ -27,17 +27,17 @@ #include #include #include +#include +#include #include #include #include #include - // libstage #include - // roscpp #include #include @@ -67,250 +67,250 @@ class StageNode { private: + // roscpp-related bookkeeping + ros::NodeHandle n_; + + // A mutex to lock access to fields that are used in message callbacks + boost::mutex msg_lock; + + // The models that we're interested in + std::vector cameramodels; + std::vector lasermodels; + std::vector positionmodels; + + // a structure representing a robot inthe simulator + struct StageRobot + { + // stage related models + Stg::ModelPosition* positionmodel; // one position + std::vector cameramodels; // multiple cameras per position + std::vector lasermodels; // multiple rangers per position + + // ros publishers + ros::Publisher odom_pub; // one odom + ros::Publisher ground_truth_pub; // one ground truth + + std::vector image_pubs; // multiple images + std::vector depth_pubs; // multiple depths + std::vector camera_pubs; // multiple cameras + std::vector laser_pubs; // multiple lasers + + ros::Subscriber cmdvel_sub; // one cmd_vel subscriber + }; + + std::vector robotmodels_; + + // Used to remember initial poses for soft reset + std::vector initial_poses; + ros::ServiceServer reset_srv_; + + ros::Publisher clock_pub_; + + bool isDepthCanonical; + bool use_model_names; + + // A helper function that is executed for each stage model. We use it + // to search for models of interest. + static void ghfunc(Stg::Model* mod, StageNode* node); + + static bool s_update(Stg::World* world, StageNode* node) + { + node->WorldCallback(); + // We return false to indicate that we want to be called again (an + // odd convention, but that's the way that Stage works). + return false; + } - // roscpp-related bookkeeping - ros::NodeHandle n_; + // Appends the given robot ID to the given message name. If omitRobotID + // is true, an unaltered copy of the name is returned. + const char *mapName(const char *name, size_t robotID, Stg::Model* mod) const; + const char *mapName(const char *name, size_t robotID, size_t deviceID, Stg::Model* mod) const; - // A mutex to lock access to fields that are used in message callbacks - boost::mutex msg_lock; + tf::TransformBroadcaster tf; - // The models that we're interested in - std::vector cameramodels; - std::vector lasermodels; - std::vector positionmodels; + // Last time that we received a velocity command + ros::Time base_last_cmd; + ros::Duration base_watchdog_timeout; - //a structure representing a robot inthe simulator - struct StageRobot - { - //stage related models - Stg::ModelPosition* positionmodel; //one position - std::vector cameramodels; //multiple cameras per position - std::vector lasermodels; //multiple rangers per position - - //ros publishers - ros::Publisher odom_pub; //one odom - ros::Publisher ground_truth_pub; //one ground truth - - std::vector image_pubs; //multiple images - std::vector depth_pubs; //multiple depths - std::vector camera_pubs; //multiple cameras - std::vector laser_pubs; //multiple lasers - - ros::Subscriber cmdvel_sub; //one cmd_vel subscriber - }; - - std::vector robotmodels_; - - // Used to remember initial poses for soft reset - std::vector initial_poses; - ros::ServiceServer reset_srv_; - - ros::Publisher clock_pub_; - - bool isDepthCanonical; - bool use_model_names; - - // A helper function that is executed for each stage model. We use it - // to search for models of interest. - static void ghfunc(Stg::Model* mod, StageNode* node); - - static bool s_update(Stg::World* world, StageNode* node) - { - node->WorldCallback(); - // We return false to indicate that we want to be called again (an - // odd convention, but that's the way that Stage works). - return false; - } + // Current simulation time + ros::Time sim_time; + + // Last time we saved global position (for velocity calculation). + ros::Time base_last_globalpos_time; + // Last published global pose of each robot + std::vector base_last_globalpos; - // Appends the given robot ID to the given message name. If omitRobotID - // is true, an unaltered copy of the name is returned. - const char *mapName(const char *name, size_t robotID, Stg::Model* mod) const; - const char *mapName(const char *name, size_t robotID, size_t deviceID, Stg::Model* mod) const; +public: + // Constructor; stage itself needs argc/argv. fname is the .world file + // that stage should load. + StageNode(int argc, char** argv, bool gui, const char* fname, bool use_model_names); + ~StageNode(); - tf::TransformBroadcaster tf; + // Subscribe to models of interest. Currently, we find and subscribe + // to the first 'laser' model and the first 'position' model. Returns + // 0 on success (both models subscribed), -1 otherwise. + int SubscribeModels(); - // Last time that we received a velocity command - ros::Time base_last_cmd; - ros::Duration base_watchdog_timeout; + // Our callback + void WorldCallback(); - // Current simulation time - ros::Time sim_time; - - // Last time we saved global position (for velocity calculation). - ros::Time base_last_globalpos_time; - // Last published global pose of each robot - std::vector base_last_globalpos; + // Do one update of the world. May pause if the next update time + // has not yet arrived. + bool UpdateWorld(); -public: - // Constructor; stage itself needs argc/argv. fname is the .world file - // that stage should load. - StageNode(int argc, char** argv, bool gui, const char* fname, bool use_model_names); - ~StageNode(); - - // Subscribe to models of interest. Currently, we find and subscribe - // to the first 'laser' model and the first 'position' model. Returns - // 0 on success (both models subscribed), -1 otherwise. - int SubscribeModels(); - - // Our callback - void WorldCallback(); - - // Do one update of the world. May pause if the next update time - // has not yet arrived. - bool UpdateWorld(); - - // Message callback for a MsgBaseVel message, which set velocities. - void cmdvelReceived(int idx, const boost::shared_ptr& msg); - - // Service callback for soft reset - bool cb_reset_srv(std_srvs::Empty::Request& request, std_srvs::Empty::Response& response); - - // The main simulator object - Stg::World* world; + // Message callback for a MsgBaseVel message, which set velocities. + void cmdvelReceived(int idx, const boost::shared_ptr& msg); + + // Service callback for soft reset + bool cb_reset_srv(std_srvs::Empty::Request& request, std_srvs::Empty::Response& response); + + // The main simulator object + Stg::World* world; }; // since stageros is single-threaded, this is OK. revisit if that changes! +// returns full device name with IDs const char * StageNode::mapName(const char *name, size_t robotID, Stg::Model* mod) const { - //ROS_INFO("Robot %lu: Device %s", robotID, name); - bool umn = this->use_model_names; + // ROS_INFO("Robot %lu: Device %s", robotID, name); + bool umn = this->use_model_names; - if ((positionmodels.size() > 1 ) || umn) - { - static char buf[100]; - std::size_t found = std::string(((Stg::Ancestor *) mod)->Token()).find(":"); + if ((positionmodels.size() > 1) || umn) + { + static char buf[100]; + std::size_t found = std::string(((Stg::Ancestor *) mod)->Token()).find(":"); - if ((found==std::string::npos) && umn) - { - snprintf(buf, sizeof(buf), "/%s/%s", ((Stg::Ancestor *) mod)->Token(), name); - } - else - { - snprintf(buf, sizeof(buf), "/robot_%u/%s", (unsigned int)robotID, name); - } - - return buf; + if ((found == std::string::npos) && umn) + { + snprintf(buf, sizeof(buf), "/%s/%s", ((Stg::Ancestor *) mod)->Token(), name); } else - return name; + { + snprintf(buf, sizeof(buf), "/robot_%u/%s", (unsigned int)robotID, name); + } + + return buf; + } + else + return name; } +// returns full device name with IDs and numbering const char * StageNode::mapName(const char *name, size_t robotID, size_t deviceID, Stg::Model* mod) const { - //ROS_INFO("Robot %lu: Device %s:%lu", robotID, name, deviceID); - bool umn = this->use_model_names; + // ROS_INFO("Robot %lu: Device %s:%lu", robotID, name, deviceID); + bool umn = this->use_model_names; - if ((positionmodels.size() > 1 ) || umn) - { - static char buf[100]; - std::size_t found = std::string(((Stg::Ancestor *) mod)->Token()).find(":"); + if ((positionmodels.size() > 1) || umn) + { + static char buf[100]; + std::size_t found = std::string(((Stg::Ancestor *) mod)->Token()).find(":"); - if ((found==std::string::npos) && umn) - { - snprintf(buf, sizeof(buf), "/%s/%s_%u", ((Stg::Ancestor *) mod)->Token(), name, (unsigned int)deviceID); - } - else - { - snprintf(buf, sizeof(buf), "/robot_%u/%s_%u", (unsigned int)robotID, name, (unsigned int)deviceID); - } - - return buf; + if ((found == std::string::npos) && umn) + { + snprintf(buf, sizeof(buf), "/%s/%s_%u", ((Stg::Ancestor *) mod)->Token(), name, (unsigned int)deviceID); } else { - static char buf[100]; - snprintf(buf, sizeof(buf), "/%s_%u", name, (unsigned int)deviceID); - return buf; + snprintf(buf, sizeof(buf), "/robot_%u/%s_%u", (unsigned int)robotID, name, (unsigned int)deviceID); } + + return buf; + } + else + { + static char buf[100]; + snprintf(buf, sizeof(buf), "/%s_%u", name, (unsigned int)deviceID); + return buf; + } } void StageNode::ghfunc(Stg::Model* mod, StageNode* node) { - //printf( "inspecting %s, parent\n", mod->Token() ); + // printf( "inspecting %s, parent\n", mod->Token() ); - if (dynamic_cast(mod)) { - node->lasermodels.push_back(dynamic_cast(mod)); + if (dynamic_cast(mod)) + { + node->lasermodels.push_back(dynamic_cast(mod)); } - if (dynamic_cast(mod)) { - Stg::ModelPosition * p = dynamic_cast(mod); - // remember initial poses - node->positionmodels.push_back(p); - node->initial_poses.push_back(p->GetGlobalPose()); - } - if (dynamic_cast(mod)) { - node->cameramodels.push_back(dynamic_cast(mod)); + if (dynamic_cast(mod)) + { + Stg::ModelPosition * p = dynamic_cast(mod); + // remember initial poses + node->positionmodels.push_back(p); + node->initial_poses.push_back(p->GetGlobalPose()); + } + if (dynamic_cast(mod)) + { + node->cameramodels.push_back(dynamic_cast(mod)); } } - - - bool StageNode::cb_reset_srv(std_srvs::Empty::Request& request, std_srvs::Empty::Response& response) { ROS_INFO("Resetting stage!"); - for (size_t r = 0; r < this->positionmodels.size(); r++) { + for (size_t r = 0; r < this->positionmodels.size(); r++) + { this->positionmodels[r]->SetPose(this->initial_poses[r]); this->positionmodels[r]->SetStall(false); } return true; } - - void StageNode::cmdvelReceived(int idx, const boost::shared_ptr& msg) { - boost::mutex::scoped_lock lock(msg_lock); - this->positionmodels[idx]->SetSpeed(msg->linear.x, - msg->linear.y, - msg->angular.z); - this->base_last_cmd = this->sim_time; + boost::mutex::scoped_lock lock(msg_lock); + this->positionmodels[idx]->SetSpeed(msg->linear.x, + msg->linear.y, + msg->angular.z); + this->base_last_cmd = this->sim_time; } StageNode::StageNode(int argc, char** argv, bool gui, const char* fname, bool use_model_names) { - this->use_model_names = use_model_names; - this->sim_time.fromSec(0.0); - this->base_last_cmd.fromSec(0.0); - double t; - ros::NodeHandle localn("~"); - if(!localn.getParam("base_watchdog_timeout", t)) - t = 0.2; - this->base_watchdog_timeout.fromSec(t); - - if(!localn.getParam("is_depth_canonical", isDepthCanonical)) - isDepthCanonical = true; - - // We'll check the existence of the world file, because libstage doesn't - // expose its failure to open it. Could go further with checks (e.g., is - // it readable by this user). - struct stat s; - if(stat(fname, &s) != 0) - { - ROS_FATAL("The world file %s does not exist.", fname); - ROS_BREAK(); - } + this->use_model_names = use_model_names; + this->sim_time.fromSec(0.0); + this->base_last_cmd.fromSec(0.0); + double t; + ros::NodeHandle localn("~"); + if (!localn.getParam("base_watchdog_timeout", t)) + t = 0.2; + this->base_watchdog_timeout.fromSec(t); + + if (!localn.getParam("is_depth_canonical", isDepthCanonical)) + isDepthCanonical = true; + + // We'll check the existence of the world file, because libstage doesn't + // expose its failure to open it. Could go further with checks (e.g., is + // it readable by this user). + struct stat s; + if (stat(fname, &s) != 0) + { + ROS_FATAL("The world file %s does not exist.", fname); + ROS_BREAK(); + } - // initialize libstage - Stg::Init( &argc, &argv ); + // initialize libstage + Stg::Init(&argc, &argv); - if(gui) - this->world = new Stg::WorldGui(600, 400, "Stage (ROS)"); - else - this->world = new Stg::World(); + if (gui) + this->world = new Stg::WorldGui(600, 400, "Stage (ROS)"); + else + this->world = new Stg::World(); - this->world->Load(fname); + this->world->Load(fname); - // todo: reverse the order of these next lines? try it . + // add: reverse the order of these next lines? try it . - this->world->AddUpdateCallback((Stg::world_callback_t)s_update, this); + this->world->AddUpdateCallback((Stg::world_callback_t)s_update, this); - // inspect every model to locate the things we care about - this->world->ForEachDescendant((Stg::model_callback_t)ghfunc, this); + // inspect every model to locate the things we care about + this->world->ForEachDescendant((Stg::model_callback_t)ghfunc, this); } @@ -323,463 +323,506 @@ StageNode::StageNode(int argc, char** argv, bool gui, const char* fname, bool us int StageNode::SubscribeModels() { - n_.setParam("/use_sim_time", true); + n_.setParam("/use_sim_time", true); - for (size_t r = 0; r < this->positionmodels.size(); r++) - { - StageRobot* new_robot = new StageRobot; - new_robot->positionmodel = this->positionmodels[r]; - new_robot->positionmodel->Subscribe(); + for (size_t r = 0; r < this->positionmodels.size(); r++) + { + // subscribe position + StageRobot* new_robot = new StageRobot; + new_robot->positionmodel = this->positionmodels[r]; + new_robot->positionmodel->Subscribe(); - ROS_INFO( "Subscribed to Stage position model \"%s\"", this->positionmodels[r]->Token() ); - - for (size_t s = 0; s < this->lasermodels.size(); s++) - { - if (this->lasermodels[s] and this->lasermodels[s]->Parent() == new_robot->positionmodel) - { - new_robot->lasermodels.push_back(this->lasermodels[s]); - this->lasermodels[s]->Subscribe(); - ROS_INFO( "subscribed to Stage ranger \"%s\"", this->lasermodels[s]->Token() ); - } - } - - for (size_t s = 0; s < this->cameramodels.size(); s++) - { - if (this->cameramodels[s] and this->cameramodels[s]->Parent() == new_robot->positionmodel) - { - new_robot->cameramodels.push_back(this->cameramodels[s]); - this->cameramodels[s]->Subscribe(); - - ROS_INFO( "subscribed to Stage camera model \"%s\"", this->cameramodels[s]->Token() ); - } - } + ROS_INFO("Subscribed to Stage position model \"%s\"", this->positionmodels[r]->Token()); - // TODO - print the topic names nicely as well - ROS_INFO("Robot %s provided %lu rangers and %lu cameras", - new_robot->positionmodel->Token(), - new_robot->lasermodels.size(), - new_robot->cameramodels.size() ); - - new_robot->odom_pub = n_.advertise(mapName(ODOM, r, static_cast(new_robot->positionmodel)), 10); - new_robot->ground_truth_pub = n_.advertise(mapName(BASE_POSE_GROUND_TRUTH, r, static_cast(new_robot->positionmodel)), 10); - new_robot->cmdvel_sub = n_.subscribe(mapName(CMD_VEL, r, static_cast(new_robot->positionmodel)), 10, boost::bind(&StageNode::cmdvelReceived, this, r, _1)); + // subscribe rangers + for (size_t s = 0; s < this->lasermodels.size(); s++) + { + if (this->lasermodels[s] && this->lasermodels[s]->Parent() == new_robot->positionmodel) + { + new_robot->lasermodels.push_back(this->lasermodels[s]); + this->lasermodels[s]->Subscribe(); + ROS_INFO("subscribed to Stage ranger \"%s\"", this->lasermodels[s]->Token()); + } + } - for (size_t s = 0; s < new_robot->lasermodels.size(); ++s) - { - if (new_robot->lasermodels.size() == 1) - new_robot->laser_pubs.push_back(n_.advertise(mapName(BASE_SCAN, r, static_cast(new_robot->positionmodel)), 10)); - else - new_robot->laser_pubs.push_back(n_.advertise(mapName(BASE_SCAN, r, s, static_cast(new_robot->positionmodel)), 10)); + // subscribe cameras + for (size_t s = 0; s < this->cameramodels.size(); s++) + { + if (this->cameramodels[s] && this->cameramodels[s]->Parent() == new_robot->positionmodel) + { + new_robot->cameramodels.push_back(this->cameramodels[s]); + this->cameramodels[s]->Subscribe(); - } + ROS_INFO("subscribed to Stage camera model \"%s\"", this->cameramodels[s]->Token()); + } + } - for (size_t s = 0; s < new_robot->cameramodels.size(); ++s) - { - if (new_robot->cameramodels.size() == 1) - { - new_robot->image_pubs.push_back(n_.advertise(mapName(IMAGE, r, static_cast(new_robot->positionmodel)), 10)); - new_robot->depth_pubs.push_back(n_.advertise(mapName(DEPTH, r, static_cast(new_robot->positionmodel)), 10)); - new_robot->camera_pubs.push_back(n_.advertise(mapName(CAMERA_INFO, r, static_cast(new_robot->positionmodel)), 10)); - } - else - { - new_robot->image_pubs.push_back(n_.advertise(mapName(IMAGE, r, s, static_cast(new_robot->positionmodel)), 10)); - new_robot->depth_pubs.push_back(n_.advertise(mapName(DEPTH, r, s, static_cast(new_robot->positionmodel)), 10)); - new_robot->camera_pubs.push_back(n_.advertise(mapName(CAMERA_INFO, r, s, static_cast(new_robot->positionmodel)), 10)); - } - } + // ADD print the topic names nicely as well + ROS_INFO("Robot %s provided %lu rangers and %lu cameras", + new_robot->positionmodel->Token(), + new_robot->lasermodels.size(), + new_robot->cameramodels.size()); + + // create odom, ground_truth and cmd_vel publishers + new_robot->odom_pub = n_.advertise(mapName(ODOM, r, + static_cast(new_robot->positionmodel)), 10); + new_robot->ground_truth_pub = n_.advertise(mapName(BASE_POSE_GROUND_TRUTH, r, + static_cast(new_robot->positionmodel)), 10); + new_robot->cmdvel_sub = n_.subscribe(mapName(CMD_VEL, r, + static_cast(new_robot->positionmodel)), 10, boost::bind(&StageNode::cmdvelReceived, this, r, _1)); + + // create laser publishers + for (size_t s = 0; s < new_robot->lasermodels.size(); ++s) + { + if (new_robot->lasermodels.size() == 1) + new_robot->laser_pubs.push_back(n_.advertise(mapName(BASE_SCAN, r, + static_cast(new_robot->positionmodel)), 10)); + else + new_robot->laser_pubs.push_back(n_.advertise(mapName(BASE_SCAN, r, s, + static_cast(new_robot->positionmodel)), 10)); + } - this->robotmodels_.push_back(new_robot); + // create camera publishers + for (size_t s = 0; s < new_robot->cameramodels.size(); ++s) + { + if (new_robot->cameramodels.size() == 1) + { + new_robot->image_pubs.push_back(n_.advertise(mapName(IMAGE, r, + static_cast(new_robot->positionmodel)), 10)); + new_robot->depth_pubs.push_back(n_.advertise(mapName(DEPTH, r, + static_cast(new_robot->positionmodel)), 10)); + new_robot->camera_pubs.push_back(n_.advertise(mapName(CAMERA_INFO, r, + static_cast(new_robot->positionmodel)), 10)); + } + else + { + new_robot->image_pubs.push_back(n_.advertise(mapName(IMAGE, r, s, + static_cast(new_robot->positionmodel)), 10)); + new_robot->depth_pubs.push_back(n_.advertise(mapName(DEPTH, r, s, + static_cast(new_robot->positionmodel)), 10)); + new_robot->camera_pubs.push_back(n_.advertise(mapName(CAMERA_INFO, r, s, + static_cast(new_robot->positionmodel)), 10)); + } } - clock_pub_ = n_.advertise("/clock", 10); + this->robotmodels_.push_back(new_robot); + } + clock_pub_ = n_.advertise("/clock", 10); - // advertising reset service - reset_srv_ = n_.advertiseService("reset_positions", &StageNode::cb_reset_srv, this); + // advertising reset service + reset_srv_ = n_.advertiseService("reset_positions", &StageNode::cb_reset_srv, this); - return(0); + return (0); } StageNode::~StageNode() -{ - for (std::vector::iterator r = this->robotmodels_.begin(); r != this->robotmodels_.end(); ++r) - delete *r; +{ + for (std::vector::iterator r = this->robotmodels_.begin(); r != this->robotmodels_.end(); ++r) + delete *r; } bool StageNode::UpdateWorld() { - return this->world->UpdateAll(); + return this->world->UpdateAll(); } void StageNode::WorldCallback() { - if( ! ros::ok() ) { - ROS_INFO( "ros::ok() is false. Quitting." ); + if (!ros::ok()) + { + ROS_INFO("ros::ok() is false. Quitting."); this->world->QuitAll(); return; } - - boost::mutex::scoped_lock lock(msg_lock); - this->sim_time.fromSec(world->SimTimeNow() / 1e6); - // We're not allowed to publish clock==0, because it used as a special - // value in parts of ROS, #4027. - if(this->sim_time.sec == 0 && this->sim_time.nsec == 0) + boost::mutex::scoped_lock lock(msg_lock); + + this->sim_time.fromSec(world->SimTimeNow() / 1e6); + // We're not allowed to publish clock==0, because it used as a special + // value in parts of ROS, #4027. + if (this->sim_time.sec == 0 && this->sim_time.nsec == 0) + { + ROS_DEBUG("Skipping initial simulation step, to avoid publishing clock==0"); + return; + } + + // add: make this only affect one robot if necessary + if ((this->base_watchdog_timeout.toSec() > 0.0) && + ((this->sim_time - this->base_last_cmd) >= this->base_watchdog_timeout)) + { + for (size_t r = 0; r < this->positionmodels.size(); r++) + this->positionmodels[r]->SetSpeed(0.0, 0.0, 0.0); + } + + // loop on the robot models + for (size_t r = 0; r < this->robotmodels_.size(); ++r) + { + StageRobot const * robotmodel = this->robotmodels_[r]; + + // loop on the laser devices for the current robot + for (size_t s = 0; s < robotmodel->lasermodels.size(); ++s) { - ROS_DEBUG("Skipping initial simulation step, to avoid publishing clock==0"); - return; + Stg::ModelRanger const* lasermodel = robotmodel->lasermodels[s]; + const std::vector& sensors = lasermodel->GetSensors(); + + if (sensors.size() > 1) + ROS_WARN("ROS Stage currently supports rangers with 1 sensor only."); + + // for now we access only the zeroth sensor of the ranger - good + // enough for most laser models that have a single beam origin + const Stg::ModelRanger::Sensor& sensor = sensors[0]; + + if (sensor.ranges.size()) + { + // Translate into ROS message format and publish + sensor_msgs::LaserScan msg; + msg.angle_min = -sensor.fov / 2.0; + msg.angle_max = +sensor.fov / 2.0; + msg.angle_increment = sensor.fov / static_cast(sensor.sample_count - 1); + msg.range_min = sensor.range.min; + msg.range_max = sensor.range.max; + msg.ranges.resize(sensor.ranges.size()); + msg.intensities.resize(sensor.intensities.size()); + + for (unsigned int i = 0; i < sensor.ranges.size(); i++) + { + msg.ranges[i] = sensor.ranges[i]; + msg.intensities[i] = sensor.intensities[i]; + } + + if (robotmodel->lasermodels.size() > 1) + msg.header.frame_id = mapName("base_laser_link", r, s, + static_cast(robotmodel->positionmodel)); + else + msg.header.frame_id = mapName("base_laser_link", r, + static_cast(robotmodel->positionmodel)); + + msg.header.stamp = sim_time; + robotmodel->laser_pubs[s].publish(msg); + } + + // Also publish the base->base_laser_link Tx. This could eventually move + // into being retrieved from the param server as a static Tx. + Stg::Pose lp = lasermodel->GetPose(); + tf::Quaternion laserQ; + laserQ.setRPY(0.0, 0.0, lp.a); + tf::Transform txLaser = tf::Transform(laserQ, tf::Point(lp.x, lp.y, + robotmodel->positionmodel->GetGeom().size.z + lp.z)); + + if (robotmodel->lasermodels.size() > 1) + tf.sendTransform(tf::StampedTransform(txLaser, sim_time, + mapName("base_link", r, + static_cast(robotmodel->positionmodel)), + mapName("base_laser_link", r, s, + static_cast(robotmodel->positionmodel)))); + else + tf.sendTransform(tf::StampedTransform(txLaser, sim_time, + mapName("base_link", r, + static_cast(robotmodel->positionmodel)), + mapName("base_laser_link", r, + static_cast(robotmodel->positionmodel)))); } - // TODO make this only affect one robot if necessary - if((this->base_watchdog_timeout.toSec() > 0.0) && - ((this->sim_time - this->base_last_cmd) >= this->base_watchdog_timeout)) + // the position of the robot + tf.sendTransform(tf::StampedTransform(tf::Transform::getIdentity(), + sim_time, + mapName("base_footprint", r, + static_cast(robotmodel->positionmodel)), + mapName("base_link", r, + static_cast(robotmodel->positionmodel)))); + + // Get latest odometry data + // Translate into ROS message format and publish + nav_msgs::Odometry odom_msg; + odom_msg.pose.pose.position.x = robotmodel->positionmodel->est_pose.x; + odom_msg.pose.pose.position.y = robotmodel->positionmodel->est_pose.y; + odom_msg.pose.pose.orientation = tf::createQuaternionMsgFromYaw(robotmodel->positionmodel->est_pose.a); + Stg::Velocity v = robotmodel->positionmodel->GetVelocity(); + odom_msg.twist.twist.linear.x = v.x; + odom_msg.twist.twist.linear.y = v.y; + odom_msg.twist.twist.angular.z = v.a; + + // @add: Publish stall on a separate topic when one becomes available + // this->odomMsgs[r].stall = this->positionmodels[r]->Stall(); + // + odom_msg.header.frame_id = mapName("odom", r, + static_cast(robotmodel->positionmodel)); + odom_msg.header.stamp = sim_time; + + robotmodel->odom_pub.publish(odom_msg); + + // broadcast odometry transform + tf::Quaternion odomQ; + tf::quaternionMsgToTF(odom_msg.pose.pose.orientation, odomQ); + tf::Transform txOdom(odomQ, tf::Point(odom_msg.pose.pose.position.x, odom_msg.pose.pose.position.y, 0.0)); + tf.sendTransform(tf::StampedTransform(txOdom, sim_time, + mapName("odom", r, + static_cast(robotmodel->positionmodel)), + mapName("base_footprint", r, + static_cast(robotmodel->positionmodel)))); + + // Also publish the ground truth pose and velocity + Stg::Pose gpose = robotmodel->positionmodel->GetGlobalPose(); + tf::Quaternion q_gpose; + q_gpose.setRPY(0.0, 0.0, gpose.a); + tf::Transform gt(q_gpose, tf::Point(gpose.x, gpose.y, 0.0)); + // Velocity is 0 by default and will be set only if there is previous pose and time delta>0 + Stg::Velocity gvel(0, 0, 0, 0); + if (this->base_last_globalpos.size() > r) { - for (size_t r = 0; r < this->positionmodels.size(); r++) - this->positionmodels[r]->SetSpeed(0.0, 0.0, 0.0); + Stg::Pose prevpose = this->base_last_globalpos.at(r); + double dT = (this->sim_time - this->base_last_globalpos_time).toSec(); + if (dT > 0) + gvel = Stg::Velocity( + (gpose.x - prevpose.x) / dT, + (gpose.y - prevpose.y) / dT, + (gpose.z - prevpose.z) / dT, + Stg::normalize(gpose.a - prevpose.a) / dT); + this->base_last_globalpos.at(r) = gpose; } - - //loop on the robot models - for (size_t r = 0; r < this->robotmodels_.size(); ++r) + else // There are no previous readings, adding current pose... + this->base_last_globalpos.push_back(gpose); + + nav_msgs::Odometry ground_truth_msg; + ground_truth_msg.pose.pose.position.x = gt.getOrigin().x(); + ground_truth_msg.pose.pose.position.y = gt.getOrigin().y(); + ground_truth_msg.pose.pose.position.z = gt.getOrigin().z(); + ground_truth_msg.pose.pose.orientation.x = gt.getRotation().x(); + ground_truth_msg.pose.pose.orientation.y = gt.getRotation().y(); + ground_truth_msg.pose.pose.orientation.z = gt.getRotation().z(); + ground_truth_msg.pose.pose.orientation.w = gt.getRotation().w(); + ground_truth_msg.twist.twist.linear.x = gvel.x; + ground_truth_msg.twist.twist.linear.y = gvel.y; + ground_truth_msg.twist.twist.linear.z = gvel.z; + ground_truth_msg.twist.twist.angular.z = gvel.a; + + ground_truth_msg.header.frame_id = mapName("odom", r, + static_cast(robotmodel->positionmodel)); + ground_truth_msg.header.stamp = sim_time; + + robotmodel->ground_truth_pub.publish(ground_truth_msg); + + // cameras + for (size_t s = 0; s < robotmodel->cameramodels.size(); ++s) { - StageRobot const * robotmodel = this->robotmodels_[r]; + Stg::ModelCamera* cameramodel = robotmodel->cameramodels[s]; + // Get latest image data + // Translate into ROS message format and publish + if (robotmodel->image_pubs[s].getNumSubscribers() > 0 && cameramodel->FrameColor()) + { + sensor_msgs::Image image_msg; + + image_msg.height = cameramodel->getHeight(); + image_msg.width = cameramodel->getWidth(); + image_msg.encoding = "rgba8"; + // this->imageMsgs[r].is_bigendian=""; + image_msg.step = image_msg.width * 4; + image_msg.data.resize(image_msg.width * image_msg.height * 4); + + memcpy(&(image_msg.data[0]), cameramodel->FrameColor(), image_msg.width * image_msg.height * 4); + + // invert the opengl weirdness + int height = image_msg.height - 1; + int linewidth = image_msg.width * 4; + + char* temp = new char[linewidth]; + for (int y = 0; y < (height + 1) / 2; y++) + { + memcpy(temp, &image_msg.data[y * linewidth], linewidth); + memcpy(&(image_msg.data[y * linewidth]), &(image_msg.data[(height - y)*linewidth]), linewidth); + memcpy(&(image_msg.data[(height - y)*linewidth]), temp, linewidth); + } - //loop on the laser devices for the current robot - for (size_t s = 0; s < robotmodel->lasermodels.size(); ++s) + if (robotmodel->cameramodels.size() > 1) + image_msg.header.frame_id = mapName("camera", r, s, + static_cast(robotmodel->positionmodel)); + else + image_msg.header.frame_id = mapName("camera", r, + static_cast(robotmodel->positionmodel)); + image_msg.header.stamp = sim_time; + + robotmodel->image_pubs[s].publish(image_msg); + } + + // Get latest depth data + // Translate into ROS message format and publish + // Skip if there are no subscribers + if (robotmodel->depth_pubs[s].getNumSubscribers() > 0 && cameramodel->FrameDepth()) + { + sensor_msgs::Image depth_msg; + depth_msg.height = cameramodel->getHeight(); + depth_msg.width = cameramodel->getWidth(); + depth_msg.encoding = (this->isDepthCanonical ? sensor_msgs::image_encodings::TYPE_32FC1 : + sensor_msgs::image_encodings::TYPE_16UC1); + + // this->depthMsgs[r].is_bigendian=""; + int sz = this->isDepthCanonical ? sizeof(float) : sizeof(uint16_t); + size_t len = depth_msg.width * depth_msg.height; + depth_msg.step = depth_msg.width * sz; + depth_msg.data.resize(len * sz); + + // processing data according to REP118 + if (this->isDepthCanonical) + { + double nearClip = cameramodel->getCamera().nearClip(); + double farClip = cameramodel->getCamera().farClip(); + memcpy(&(depth_msg.data[0]), cameramodel->FrameDepth(), len * sz); + float * data = reinterpret_cast(&(depth_msg.data[0])); + for (size_t i = 0; i < len; ++i) + if (data[i] <= nearClip) + data[i] = -INFINITY; + else if (data[i] >= farClip) + data[i] = INFINITY; + } + else { - Stg::ModelRanger const* lasermodel = robotmodel->lasermodels[s]; - const std::vector& sensors = lasermodel->GetSensors(); - - if( sensors.size() > 1 ) - ROS_WARN( "ROS Stage currently supports rangers with 1 sensor only." ); - - // for now we access only the zeroth sensor of the ranger - good - // enough for most laser models that have a single beam origin - const Stg::ModelRanger::Sensor& sensor = sensors[0]; - - if( sensor.ranges.size() ) - { - // Translate into ROS message format and publish - sensor_msgs::LaserScan msg; - msg.angle_min = -sensor.fov/2.0; - msg.angle_max = +sensor.fov/2.0; - msg.angle_increment = sensor.fov/(double)(sensor.sample_count-1); - msg.range_min = sensor.range.min; - msg.range_max = sensor.range.max; - msg.ranges.resize(sensor.ranges.size()); - msg.intensities.resize(sensor.intensities.size()); - - for(unsigned int i = 0; i < sensor.ranges.size(); i++) - { - msg.ranges[i] = sensor.ranges[i]; - msg.intensities[i] = sensor.intensities[i]; - } - - if (robotmodel->lasermodels.size() > 1) - msg.header.frame_id = mapName("base_laser_link", r, s, static_cast(robotmodel->positionmodel)); - else - msg.header.frame_id = mapName("base_laser_link", r, static_cast(robotmodel->positionmodel)); - - msg.header.stamp = sim_time; - robotmodel->laser_pubs[s].publish(msg); - } - - // Also publish the base->base_laser_link Tx. This could eventually move - // into being retrieved from the param server as a static Tx. - Stg::Pose lp = lasermodel->GetPose(); - tf::Quaternion laserQ; - laserQ.setRPY(0.0, 0.0, lp.a); - tf::Transform txLaser = tf::Transform(laserQ, tf::Point(lp.x, lp.y, robotmodel->positionmodel->GetGeom().size.z + lp.z)); - - if (robotmodel->lasermodels.size() > 1) - tf.sendTransform(tf::StampedTransform(txLaser, sim_time, - mapName("base_link", r, static_cast(robotmodel->positionmodel)), - mapName("base_laser_link", r, s, static_cast(robotmodel->positionmodel)))); - else - tf.sendTransform(tf::StampedTransform(txLaser, sim_time, - mapName("base_link", r, static_cast(robotmodel->positionmodel)), - mapName("base_laser_link", r, static_cast(robotmodel->positionmodel)))); + int nearClip = static_cast(cameramodel->getCamera().nearClip() * 1000); + int farClip = static_cast(cameramodel->getCamera().farClip() * 1000); + for (size_t i = 0; i < len; ++i) + { + int v = static_cast(cameramodel->FrameDepth()[i] * 1000); + if (v <= nearClip || v >= farClip) v = 0; + (reinterpret_cast (&(depth_msg.data[0])))[i] = static_cast( + (v <= nearClip || v >= farClip) ? 0 : v); + } } - //the position of the robot - tf.sendTransform(tf::StampedTransform(tf::Transform::getIdentity(), - sim_time, - mapName("base_footprint", r, static_cast(robotmodel->positionmodel)), - mapName("base_link", r, static_cast(robotmodel->positionmodel)))); + // invert the opengl weirdness + int height = depth_msg.height - 1; + int linewidth = depth_msg.width * sz; - // Get latest odometry data - // Translate into ROS message format and publish - nav_msgs::Odometry odom_msg; - odom_msg.pose.pose.position.x = robotmodel->positionmodel->est_pose.x; - odom_msg.pose.pose.position.y = robotmodel->positionmodel->est_pose.y; - odom_msg.pose.pose.orientation = tf::createQuaternionMsgFromYaw(robotmodel->positionmodel->est_pose.a); - Stg::Velocity v = robotmodel->positionmodel->GetVelocity(); - odom_msg.twist.twist.linear.x = v.x; - odom_msg.twist.twist.linear.y = v.y; - odom_msg.twist.twist.angular.z = v.a; - - //@todo Publish stall on a separate topic when one becomes available - //this->odomMsgs[r].stall = this->positionmodels[r]->Stall(); - // - odom_msg.header.frame_id = mapName("odom", r, static_cast(robotmodel->positionmodel)); - odom_msg.header.stamp = sim_time; - - robotmodel->odom_pub.publish(odom_msg); - - // broadcast odometry transform - tf::Quaternion odomQ; - tf::quaternionMsgToTF(odom_msg.pose.pose.orientation, odomQ); - tf::Transform txOdom(odomQ, tf::Point(odom_msg.pose.pose.position.x, odom_msg.pose.pose.position.y, 0.0)); - tf.sendTransform(tf::StampedTransform(txOdom, sim_time, - mapName("odom", r, static_cast(robotmodel->positionmodel)), - mapName("base_footprint", r, static_cast(robotmodel->positionmodel)))); - - // Also publish the ground truth pose and velocity - Stg::Pose gpose = robotmodel->positionmodel->GetGlobalPose(); - tf::Quaternion q_gpose; - q_gpose.setRPY(0.0, 0.0, gpose.a); - tf::Transform gt(q_gpose, tf::Point(gpose.x, gpose.y, 0.0)); - // Velocity is 0 by default and will be set only if there is previous pose and time delta>0 - Stg::Velocity gvel(0,0,0,0); - if (this->base_last_globalpos.size()>r){ - Stg::Pose prevpose = this->base_last_globalpos.at(r); - double dT = (this->sim_time-this->base_last_globalpos_time).toSec(); - if (dT>0) - gvel = Stg::Velocity( - (gpose.x - prevpose.x)/dT, - (gpose.y - prevpose.y)/dT, - (gpose.z - prevpose.z)/dT, - Stg::normalize(gpose.a - prevpose.a)/dT - ); - this->base_last_globalpos.at(r) = gpose; - }else //There are no previous readings, adding current pose... - this->base_last_globalpos.push_back(gpose); - - nav_msgs::Odometry ground_truth_msg; - ground_truth_msg.pose.pose.position.x = gt.getOrigin().x(); - ground_truth_msg.pose.pose.position.y = gt.getOrigin().y(); - ground_truth_msg.pose.pose.position.z = gt.getOrigin().z(); - ground_truth_msg.pose.pose.orientation.x = gt.getRotation().x(); - ground_truth_msg.pose.pose.orientation.y = gt.getRotation().y(); - ground_truth_msg.pose.pose.orientation.z = gt.getRotation().z(); - ground_truth_msg.pose.pose.orientation.w = gt.getRotation().w(); - ground_truth_msg.twist.twist.linear.x = gvel.x; - ground_truth_msg.twist.twist.linear.y = gvel.y; - ground_truth_msg.twist.twist.linear.z = gvel.z; - ground_truth_msg.twist.twist.angular.z = gvel.a; - - ground_truth_msg.header.frame_id = mapName("odom", r, static_cast(robotmodel->positionmodel)); - ground_truth_msg.header.stamp = sim_time; - - robotmodel->ground_truth_pub.publish(ground_truth_msg); - - //cameras - for (size_t s = 0; s < robotmodel->cameramodels.size(); ++s) + char* temp = new char[linewidth]; + for (int y = 0; y < (height + 1) / 2; y++) { - Stg::ModelCamera* cameramodel = robotmodel->cameramodels[s]; - // Get latest image data - // Translate into ROS message format and publish - if (robotmodel->image_pubs[s].getNumSubscribers() > 0 && cameramodel->FrameColor()) - { - sensor_msgs::Image image_msg; - - image_msg.height = cameramodel->getHeight(); - image_msg.width = cameramodel->getWidth(); - image_msg.encoding = "rgba8"; - //this->imageMsgs[r].is_bigendian=""; - image_msg.step = image_msg.width*4; - image_msg.data.resize(image_msg.width * image_msg.height * 4); - - memcpy(&(image_msg.data[0]), cameramodel->FrameColor(), image_msg.width * image_msg.height * 4); - - //invert the opengl weirdness - int height = image_msg.height - 1; - int linewidth = image_msg.width*4; - - char* temp = new char[linewidth]; - for (int y = 0; y < (height+1)/2; y++) - { - memcpy(temp,&image_msg.data[y*linewidth],linewidth); - memcpy(&(image_msg.data[y*linewidth]),&(image_msg.data[(height-y)*linewidth]),linewidth); - memcpy(&(image_msg.data[(height-y)*linewidth]),temp,linewidth); - } - - if (robotmodel->cameramodels.size() > 1) - image_msg.header.frame_id = mapName("camera", r, s, static_cast(robotmodel->positionmodel)); - else - image_msg.header.frame_id = mapName("camera", r,static_cast(robotmodel->positionmodel)); - image_msg.header.stamp = sim_time; - - robotmodel->image_pubs[s].publish(image_msg); - } - - //Get latest depth data - //Translate into ROS message format and publish - //Skip if there are no subscribers - if (robotmodel->depth_pubs[s].getNumSubscribers()>0 && cameramodel->FrameDepth()) - { - sensor_msgs::Image depth_msg; - depth_msg.height = cameramodel->getHeight(); - depth_msg.width = cameramodel->getWidth(); - depth_msg.encoding = this->isDepthCanonical?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1; - //this->depthMsgs[r].is_bigendian=""; - int sz = this->isDepthCanonical?sizeof(float):sizeof(uint16_t); - size_t len = depth_msg.width * depth_msg.height; - depth_msg.step = depth_msg.width * sz; - depth_msg.data.resize(len*sz); - - //processing data according to REP118 - if (this->isDepthCanonical){ - double nearClip = cameramodel->getCamera().nearClip(); - double farClip = cameramodel->getCamera().farClip(); - memcpy(&(depth_msg.data[0]),cameramodel->FrameDepth(),len*sz); - float * data = (float*)&(depth_msg.data[0]); - for (size_t i=0;i=farClip) - data[i] = INFINITY; - } - else{ - int nearClip = (int)(cameramodel->getCamera().nearClip() * 1000); - int farClip = (int)(cameramodel->getCamera().farClip() * 1000); - for (size_t i=0;iFrameDepth()[i]*1000); - if (v<=nearClip || v>=farClip) v = 0; - ((uint16_t*)&(depth_msg.data[0]))[i] = (uint16_t) ((v<=nearClip || v>=farClip) ? 0 : v ); - } - } - - //invert the opengl weirdness - int height = depth_msg.height - 1; - int linewidth = depth_msg.width*sz; - - char* temp = new char[linewidth]; - for (int y = 0; y < (height+1)/2; y++) - { - memcpy(temp,&depth_msg.data[y*linewidth],linewidth); - memcpy(&(depth_msg.data[y*linewidth]),&(depth_msg.data[(height-y)*linewidth]),linewidth); - memcpy(&(depth_msg.data[(height-y)*linewidth]),temp,linewidth); - } - - if (robotmodel->cameramodels.size() > 1) - depth_msg.header.frame_id = mapName("camera", r, s, static_cast(robotmodel->positionmodel)); - else - depth_msg.header.frame_id = mapName("camera", r, static_cast(robotmodel->positionmodel)); - depth_msg.header.stamp = sim_time; - robotmodel->depth_pubs[s].publish(depth_msg); - } - - //sending camera's tf and info only if image or depth topics are subscribed to - if ((robotmodel->image_pubs[s].getNumSubscribers()>0 && cameramodel->FrameColor()) - || (robotmodel->depth_pubs[s].getNumSubscribers()>0 && cameramodel->FrameDepth())) - { - - Stg::Pose lp = cameramodel->GetPose(); - tf::Quaternion Q; Q.setRPY( - (cameramodel->getCamera().pitch()*M_PI/180.0)-M_PI, - 0.0, - lp.a+(cameramodel->getCamera().yaw()*M_PI/180.0) - robotmodel->positionmodel->GetPose().a - ); - - tf::Transform tr = tf::Transform(Q, tf::Point(lp.x, lp.y, robotmodel->positionmodel->GetGeom().size.z+lp.z)); - - if (robotmodel->cameramodels.size() > 1) - tf.sendTransform(tf::StampedTransform(tr, sim_time, - mapName("base_link", r, static_cast(robotmodel->positionmodel)), - mapName("camera", r, s, static_cast(robotmodel->positionmodel)))); - else - tf.sendTransform(tf::StampedTransform(tr, sim_time, - mapName("base_link", r, static_cast(robotmodel->positionmodel)), - mapName("camera", r, static_cast(robotmodel->positionmodel)))); - - sensor_msgs::CameraInfo camera_msg; - if (robotmodel->cameramodels.size() > 1) - camera_msg.header.frame_id = mapName("camera", r, s, static_cast(robotmodel->positionmodel)); - else - camera_msg.header.frame_id = mapName("camera", r, static_cast(robotmodel->positionmodel)); - camera_msg.header.stamp = sim_time; - camera_msg.height = cameramodel->getHeight(); - camera_msg.width = cameramodel->getWidth(); - - double fx,fy,cx,cy; - cx = camera_msg.width / 2.0; - cy = camera_msg.height / 2.0; - double fovh = cameramodel->getCamera().horizFov()*M_PI/180.0; - double fovv = cameramodel->getCamera().vertFov()*M_PI/180.0; - //double fx_ = 1.43266615300557*this->cameramodels[r]->getWidth()/tan(fovh); - //double fy_ = 1.43266615300557*this->cameramodels[r]->getHeight()/tan(fovv); - fx = cameramodel->getWidth()/(2*tan(fovh/2)); - fy = cameramodel->getHeight()/(2*tan(fovv/2)); - - //ROS_INFO("fx=%.4f,%.4f; fy=%.4f,%.4f", fx, fx_, fy, fy_); - - - camera_msg.D.resize(4, 0.0); - - camera_msg.K[0] = fx; - camera_msg.K[2] = cx; - camera_msg.K[4] = fy; - camera_msg.K[5] = cy; - camera_msg.K[8] = 1.0; - - camera_msg.R[0] = 1.0; - camera_msg.R[4] = 1.0; - camera_msg.R[8] = 1.0; - - camera_msg.P[0] = fx; - camera_msg.P[2] = cx; - camera_msg.P[5] = fy; - camera_msg.P[6] = cy; - camera_msg.P[10] = 1.0; - - robotmodel->camera_pubs[s].publish(camera_msg); - - } - + memcpy(temp, &depth_msg.data[y * linewidth], linewidth); + memcpy(&(depth_msg.data[y * linewidth]), &(depth_msg.data[(height - y)*linewidth]), linewidth); + memcpy(&(depth_msg.data[(height - y)*linewidth]), temp, linewidth); } + + if (robotmodel->cameramodels.size() > 1) + depth_msg.header.frame_id = mapName("camera", r, s, + static_cast(robotmodel->positionmodel)); + else + depth_msg.header.frame_id = mapName("camera", r, + static_cast(robotmodel->positionmodel)); + depth_msg.header.stamp = sim_time; + robotmodel->depth_pubs[s].publish(depth_msg); + } + + // sending camera's tf and info only if image or depth topics are subscribed to + if ((robotmodel->image_pubs[s].getNumSubscribers() > 0 && cameramodel->FrameColor()) + || (robotmodel->depth_pubs[s].getNumSubscribers() > 0 && cameramodel->FrameDepth())) + { + Stg::Pose lp = cameramodel->GetPose(); + tf::Quaternion Q; + Q.setRPY( + (cameramodel->getCamera().pitch()*M_PI / 180.0) - M_PI, + 0.0, + lp.a + (cameramodel->getCamera().yaw()*M_PI / 180.0) - robotmodel->positionmodel->GetPose().a); + + tf::Transform tr = tf::Transform(Q, tf::Point(lp.x, lp.y, robotmodel->positionmodel->GetGeom().size.z + lp.z)); + + if (robotmodel->cameramodels.size() > 1) + tf.sendTransform(tf::StampedTransform(tr, sim_time, + mapName("base_link", r, + static_cast(robotmodel->positionmodel)), + mapName("camera", r, s, + static_cast(robotmodel->positionmodel)))); + else + tf.sendTransform(tf::StampedTransform(tr, sim_time, + mapName("base_link", r, + static_cast(robotmodel->positionmodel)), + mapName("camera", r, + static_cast(robotmodel->positionmodel)))); + + sensor_msgs::CameraInfo camera_msg; + if (robotmodel->cameramodels.size() > 1) + camera_msg.header.frame_id = mapName("camera", r, s, + static_cast(robotmodel->positionmodel)); + else + camera_msg.header.frame_id = mapName("camera", r, + static_cast(robotmodel->positionmodel)); + camera_msg.header.stamp = sim_time; + camera_msg.height = cameramodel->getHeight(); + camera_msg.width = cameramodel->getWidth(); + + double fx, fy, cx, cy; + cx = camera_msg.width / 2.0; + cy = camera_msg.height / 2.0; + double fovh = cameramodel->getCamera().horizFov() * M_PI / 180.0; + double fovv = cameramodel->getCamera().vertFov() * M_PI / 180.0; + // double fx_ = 1.43266615300557*this->cameramodels[r]->getWidth()/tan(fovh); + // double fy_ = 1.43266615300557*this->cameramodels[r]->getHeight()/tan(fovv); + fx = cameramodel->getWidth() / (2 * tan(fovh / 2)); + fy = cameramodel->getHeight() / (2 * tan(fovv / 2)); + + // ROS_INFO("fx=%.4f,%.4f; fy=%.4f,%.4f", fx, fx_, fy, fy_); + + + camera_msg.D.resize(4, 0.0); + + camera_msg.K[0] = fx; + camera_msg.K[2] = cx; + camera_msg.K[4] = fy; + camera_msg.K[5] = cy; + camera_msg.K[8] = 1.0; + + camera_msg.R[0] = 1.0; + camera_msg.R[4] = 1.0; + camera_msg.R[8] = 1.0; + + camera_msg.P[0] = fx; + camera_msg.P[2] = cx; + camera_msg.P[5] = fy; + camera_msg.P[6] = cy; + camera_msg.P[10] = 1.0; + + robotmodel->camera_pubs[s].publish(camera_msg); + } } + } - this->base_last_globalpos_time = this->sim_time; - rosgraph_msgs::Clock clock_msg; - clock_msg.clock = sim_time; - this->clock_pub_.publish(clock_msg); + this->base_last_globalpos_time = this->sim_time; + rosgraph_msgs::Clock clock_msg; + clock_msg.clock = sim_time; + this->clock_pub_.publish(clock_msg); } -int +int main(int argc, char** argv) { - if( argc < 2 ) - { - puts(USAGE); - exit(-1); - } + if (argc < 2) + { + puts(USAGE); + exit(-1); + } - ros::init(argc, argv, "stageros"); + ros::init(argc, argv, "stageros"); - bool gui = true; - bool use_model_names = false; - for(int i=0;i<(argc-1);i++) - { - if(!strcmp(argv[i], "-g")) - gui = false; - if(!strcmp(argv[i], "-u")) - use_model_names = true; - } + bool gui = true; + bool use_model_names = false; + for (int i = 0; i < (argc - 1); i++) + { + if (!strcmp(argv[i], "-g")) + gui = false; + if (!strcmp(argv[i], "-u")) + use_model_names = true; + } + + StageNode sn(argc - 1, argv, gui, argv[argc - 1], use_model_names); - StageNode sn(argc-1,argv,gui,argv[argc-1], use_model_names); + if (sn.SubscribeModels() != 0) + exit(-1); - if(sn.SubscribeModels() != 0) - exit(-1); + boost::thread t = boost::thread(boost::bind(&ros::spin)); - boost::thread t = boost::thread(boost::bind(&ros::spin)); + sn.world->Start(); - sn.world->Start(); + Stg::World::Run(); - Stg::World::Run(); - - t.join(); + t.join(); - exit(0); + exit(0); }