MRPT  1.9.9
CObservation3DRangeScan.h
Go to the documentation of this file.
1 /* +------------------------------------------------------------------------+
2  | Mobile Robot Programming Toolkit (MRPT) |
3  | https://www.mrpt.org/ |
4  | |
5  | Copyright (c) 2005-2019, Individual contributors, see AUTHORS file |
6  | See: https://www.mrpt.org/Authors - All rights reserved. |
7  | Released under BSD License. See: https://www.mrpt.org/License |
8  +------------------------------------------------------------------------+ */
9 #pragma once
10 
12 #include <mrpt/img/CImage.h>
13 #include <mrpt/img/color_maps.h>
14 #include <mrpt/math/CMatrixF.h>
15 #include <mrpt/math/CPolygon.h>
16 #include <mrpt/obs/CObservation.h>
21 #include <mrpt/poses/CPose2D.h>
22 #include <mrpt/poses/CPose3D.h>
26 #include <optional>
27 
28 namespace mrpt
29 {
30 namespace obs
31 {
32 /** Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto() */
34 {
35  /** (Default: false) If false, local (sensor-centric) coordinates of points
36  * are generated. Otherwise, points are transformed with \a sensorPose.
37  * Furthermore, if provided, those coordinates are transformed with \a
38  * robotPoseInTheWorld */
40  /** (Default: nullptr) Read takeIntoAccountSensorPoseOnRobot */
42  /** (Default:true) [Only used when `range_is_depth`=true] Whether to use a
43  * Look-up-table (LUT) to speed up the conversion. It's thread safe in all
44  * situations <b>except</b> when you call this method from different threads
45  * <b>and</b> with different camera parameter matrices. In all other cases,
46  * it is a good idea to left it enabled. */
47  bool PROJ3D_USE_LUT{true};
48  /** (Default:true) If possible, use SSE2 optimized code. */
49  bool USE_SSE2{true};
50  /** (Default:false) set to true if you want an organized point cloud */
51  bool MAKE_ORGANIZED{false};
52 
53  /** (Default:1) If !=1, split the range image in blocks of DxD
54  * (D=decimation), and only generates one point per block, with the minimum
55  * valid range. */
56  uint8_t decimation{1};
57 
58  T3DPointsProjectionParams() = default;
59 };
60 /** Used in CObservation3DRangeScan::convertTo2DScan() */
62 {
63  /** The sensor label that will have the newly created observation. */
64  std::string sensorLabel;
65  /** (Default=5 degrees) [Only if use_origin_sensor_pose=false] The upper &
66  * lower half-FOV angle (in radians). */
68  /** (Default:-inf, +inf) [Only if use_origin_sensor_pose=true] Only obstacle
69  * points with Z coordinates within the range [z_min,z_max] will be taken
70  * into account. */
71  double z_min, z_max;
72  /** (Default=1.2=120%) How many more laser scans rays to create (read docs
73  * for CObservation3DRangeScan::convertTo2DScan()). */
74  double oversampling_ratio{1.2};
75 
76  /** (Default:false) If `false`, the conversion will be such that the 2D
77  * observation pose on the robot coincides with that in the original 3D
78  * range scan.
79  * If `true`, the sensed points will be "reprojected" as seen from a sensor
80  * pose at the robot/vehicle frame origin (and angle_sup, angle_inf will be
81  * ignored) */
83 
85 };
86 
87 namespace detail
88 {
89 // Implemented in CObservation3DRangeScan_project3D_impl.h
90 template <class POINTMAP>
92  mrpt::obs::CObservation3DRangeScan& src_obs, POINTMAP& dest_pointcloud,
93  const mrpt::obs::T3DPointsProjectionParams& projectParams,
94  const mrpt::obs::TRangeImageFilterParams& filterParams);
95 } // namespace detail
96 
97 /** Declares a class derived from "CObservation" that encapsules a 3D range scan
98  *measurement, as from a time-of-flight range camera or any other RGBD sensor.
99  *
100  * This kind of observations can carry one or more of these data fields:
101  * - 3D point cloud (as float's).
102  * - Each 3D point has its associated (u,v) pixel coordinates in \a
103  *points3D_idxs_x & \a points3D_idxs_y (New in MRPT 1.4.0)
104  * - 2D range image (as a matrix): Each entry in the matrix
105  *"rangeImage(ROW,COLUMN)" contains a distance or a depth, depending
106  *on \a range_is_depth. Ranges are stored as uint16_t for efficiency. The units
107  *of ranges are stored separately in rangeUnits.
108  * - 2D intensity (grayscale or RGB) image (as a mrpt::img::CImage): For
109  *SwissRanger cameras, a logarithmic A-law compression is used to convert the
110  *original 16bit intensity to a more standard 8bit graylevel.
111  * - 2D confidence image (as a mrpt::img::CImage): For each pixel, a 0x00
112  *and a 0xFF mean the lowest and highest confidence levels, respectively.
113  * - Semantic labels: Stored as a matrix of bitfields, each bit having a
114  *user-defined meaning.
115  *
116  * The coordinates of the 3D point cloud are in meters with respect to the
117  *depth camera origin of coordinates
118  * (in SwissRanger, the front face of the camera: a small offset ~1cm in
119  *front of the physical focal point),
120  * with the +X axis pointing forward, +Y pointing left-hand and +Z pointing
121  *up. By convention, a 3D point with its coordinates set to (0,0,0), will be
122  *considered as invalid.
123  * The field CObservation3DRangeScan::relativePoseIntensityWRTDepth describes
124  *the change of coordinates from
125  * the depth camera to the intensity (RGB or grayscale) camera. In a
126  *SwissRanger camera both cameras coincide,
127  * so this pose is just a rotation (0,0,0,-90deg,0,-90deg). But in
128  * Microsoft Kinect there is also an offset, as shown in this figure:
129  *
130  * <div align=center>
131  * <img src="CObservation3DRangeScan_figRefSystem.png">
132  * </div>
133  *
134  * In any case, check the field \a relativePoseIntensityWRTDepth, or the method
135  *\a doDepthAndIntensityCamerasCoincide()
136  * to determine if both frames of reference coincide, since even for Kinect
137  *cameras both can coincide if the images
138  * have been rectified.
139  *
140  * The 2D images and matrices are stored as common images, with an up->down
141  *rows order and left->right, as usual.
142  * Optionally, the intensity and confidence channels can be set to
143  *delayed-load images for off-rawlog storage so it saves
144  * memory by having loaded in memory just the needed images. See the methods
145  *load() and unload().
146  * Due to the intensive storage requirements of this kind of observations, this
147  *observation is the only one in MRPT
148  * for which it's recommended to always call "load()" and "unload()" before
149  *and after using the observation, *ONLY* when
150  * the observation was read from a rawlog dataset, in order to make sure that
151  *all the externally stored data fields are
152  * loaded and ready in memory.
153  *
154  * Classes that grab observations of this type are:
155  * - mrpt::hwdrivers::CSwissRanger3DCamera
156  * - mrpt::hwdrivers::CKinect
157  * - mrpt::hwdrivers::COpenNI2Sensor
158  *
159  * There are two sets of calibration parameters (see
160  *mrpt::vision::checkerBoardStereoCalibration() or the ready-to-use GUI program
161  *<a href="http://www.mrpt.org/Application:kinect-calibrate"
162  *>kinect-calibrate</a>):
163  * - cameraParams: Projection parameters of the depth camera.
164  * - cameraParamsIntensity: Projection parameters of the intensity
165  *(gray-level or RGB) camera.
166  *
167  * In some cameras, like SwissRanger, both are the same. It is possible in
168  *Kinect to rectify the range images such both cameras
169  * seem to coincide and then both sets of camera parameters will be identical.
170  *
171  * Range data can be interpreted in two different ways depending on the 3D
172  *camera (this field is already set to the
173  * correct setting when grabbing observations from an mrpt::hwdrivers
174  *sensor):
175  * - range_is_depth=true -> Kinect-like ranges: entries of \a rangeImage
176  *are
177  *distances along the +X axis
178  * - range_is_depth=false -> Ranges in \a rangeImage are actual distances
179  *in
180  *3D.
181  *
182  * The "intensity" channel may come from different channels in sesnsors as
183  *Kinect. Look at field \a intensityImageChannel to
184  * find out if the image was grabbed from the visible (RGB) or IR channels.
185  *
186  * 3D point clouds can be generated at any moment after grabbing with
187  *CObservation3DRangeScan::project3DPointsFromDepthImage() and
188  *CObservation3DRangeScan::project3DPointsFromDepthImageInto(), provided the
189  *correct
190  * calibration parameters. Note that project3DPointsFromDepthImage() will
191  *store the point cloud in sensor-centric local coordinates. Use
192  *project3DPointsFromDepthImageInto() to directly obtain vehicle or world
193  *coordinates.
194  *
195  * Example of how to assign labels to pixels (for object segmentation, semantic
196  *information, etc.):
197  *
198  * \code
199  * // Assume obs of type CObservation3DRangeScan::Ptr
200  * obs->pixelLabels =TPixelLabelInfo::Ptr( new
201  *CObservation3DRangeScan::TPixelLabelInfo<NUM_BYTES>() );
202  * obs->pixelLabels->setSize(ROWS,COLS);
203  * obs->pixelLabels->setLabel(col,row, label_idx); // label_idxs =
204  *[0,2^NUM_BYTES-1]
205  * //...
206  * \endcode
207  *
208  * \note Starting at serialization version 2 (MRPT 0.9.1+), the confidence
209  *channel is stored as an image instead of a matrix to optimize memory and disk
210  *space.
211  * \note Starting at serialization version 3 (MRPT 0.9.1+), the 3D point cloud
212  *and the rangeImage can both be stored externally to save rawlog space.
213  * \note Starting at serialization version 5 (MRPT 0.9.5+), the new field \a
214  *range_is_depth
215  * \note Starting at serialization version 6 (MRPT 0.9.5+), the new field \a
216  *intensityImageChannel
217  * \note Starting at serialization version 7 (MRPT 1.3.1+), new fields for
218  *semantic labeling
219  * \note Since MRPT 1.5.0, external files format can be selected at runtime
220  *with `CObservation3DRangeScan::EXTERNALS_AS_TEXT`
221  *
222  * \sa mrpt::hwdrivers::CSwissRanger3DCamera, mrpt::hwdrivers::CKinect,
223  *CObservation
224  * \ingroup mrpt_obs_grp
225  */
227 {
229 
230  protected:
231  /** If set to true, m_points3D_external_file is valid. */
233  /** 3D points are in CImage::getImagesPathBase()+<this_file_name> */
235 
236  /** If set to true, m_rangeImage_external_file is valid. */
238  /** rangeImage is in CImage::getImagesPathBase()+<this_file_name> */
240 
241  public:
242  /** Default constructor */
244  /** Destructor */
245  ~CObservation3DRangeScan() override;
246 
247  /** @name Delayed-load manual control methods.
248  @{ */
249  /** Makes sure all images and other fields which may be externally stored
250  * are loaded in memory.
251  * Note that for all CImages, calling load() is not required since the
252  * images will be automatically loaded upon first access, so load()
253  * shouldn't be needed to be called in normal cases by the user.
254  * If all the data were alredy loaded or this object has no externally
255  * stored data fields, calling this method has no effects.
256  * \sa unload
257  */
258  void load() const override;
259  /** Unload all images, for the case they being delayed-load images stored in
260  * external files (othewise, has no effect).
261  * \sa load
262  */
263  void unload() override;
264  /** @} */
265 
266  /** Project the RGB+D images into a 3D point cloud (with color if the target
267  * map supports it) and optionally at a given 3D pose.
268  * The 3D point coordinates are computed from the depth image (\a
269  * rangeImage) and the depth camera camera parameters (\a cameraParams).
270  * There exist two set of formulas for projecting the i'th point,
271  * depending on the value of "range_is_depth".
272  * In all formulas below, "rangeImage" is the matrix of ranges and the
273  * pixel coordinates are (r,c).
274  *
275  * 1) [range_is_depth=true] With "range equals depth" or "Kinect-like
276  * depth mode": the range values
277  * are in fact distances along the "+X" axis, not real 3D ranges (this
278  * is the way Kinect reports ranges):
279  *
280  * \code
281  * x(i) = rangeImage(r,c) * rangeUnits
282  * y(i) = (r_cx - c) * x(i) / r_fx
283  * z(i) = (r_cy - r) * x(i) / r_fy
284  * \endcode
285  *
286  *
287  * 2) [range_is_depth=false] With "normal ranges": range means distance in
288  * 3D. This must be set when
289  * processing data from the SwissRange 3D camera, among others.
290  *
291  * \code
292  * Ky = (r_cx - c)/r_fx
293  * Kz = (r_cy - r)/r_fy
294  *
295  * x(i) = rangeImage(r,c) * rangeUnits / sqrt( 1 + Ky^2 + Kz^2 )
296  * y(i) = Ky * x(i)
297  * z(i) = Kz * x(i)
298  * \endcode
299  *
300  * The color of each point is determined by projecting the 3D local point
301  * into the RGB image using \a cameraParamsIntensity.
302  *
303  * By default the local (sensor-centric) coordinates of points are
304  * directly stored into the local map, but if indicated so in \a
305  * takeIntoAccountSensorPoseOnRobot
306  * the points are transformed with \a sensorPose. Furthermore, if
307  * provided, those coordinates are transformed with \a robotPoseInTheWorld
308  *
309  * \tparam POINTMAP Supported maps are all those covered by
310  * mrpt::opengl::PointCloudAdapter (mrpt::maps::CPointsMap and derived,
311  * mrpt::opengl::CPointCloudColoured, PCL point clouds,...)
312  */
313  template <class POINTMAP>
315  POINTMAP& dest_pointcloud,
316  const T3DPointsProjectionParams& projectParams,
317  const TRangeImageFilterParams& filterParams = TRangeImageFilterParams())
318  {
319  detail::project3DPointsFromDepthImageInto<POINTMAP>(
320  *this, dest_pointcloud, projectParams, filterParams);
321  }
322 
323  /** This method is equivalent to \c project3DPointsFromDepthImageInto()
324  * storing the projected 3D points (without color, in local sensor-centric
325  * coordinates) in this same class.
326  * For new code it's recommended to use instead \c
327  * project3DPointsFromDepthImageInto() which is much more versatile. */
328  inline void project3DPointsFromDepthImage(const bool PROJ3D_USE_LUT = true)
329  {
332  p.PROJ3D_USE_LUT = PROJ3D_USE_LUT;
333  this->project3DPointsFromDepthImageInto(*this, p);
334  }
335 
336  /** Convert this 3D observation into an "equivalent 2D fake laser scan",
337  * with a configurable vertical FOV.
338  *
339  * The result is a 2D laser scan with more "rays" (N) than columns has the
340  * 3D observation (W), exactly: N = W * oversampling_ratio.
341  * This oversampling is required since laser scans sample the space at
342  * evenly-separated angles, while
343  * a range camera follows a tangent-like distribution. By oversampling we
344  * make sure we don't leave "gaps" unseen by the virtual "2D laser".
345  *
346  * All obstacles within a frustum are considered and the minimum distance
347  * is kept in each direction.
348  * The horizontal FOV of the frustum is automatically computed from the
349  * intrinsic parameters, but the
350  * vertical FOV must be provided by the user, and can be set to be
351  * assymetric which may be useful
352  * depending on the zone of interest where to look for obstacles.
353  *
354  * All spatial transformations are riguorosly taken into account in this
355  * class, using the depth camera
356  * intrinsic calibration parameters.
357  *
358  * The timestamp of the new object is copied from the 3D object.
359  * Obviously, a requisite for calling this method is the 3D observation
360  * having range data,
361  * i.e. hasRangeImage must be true. It's not needed to have RGB data nor
362  * the raw 3D point clouds
363  * for this method to work.
364  *
365  * If `scanParams.use_origin_sensor_pose` is `true`, the points will be
366  * projected to 3D and then reprojected
367  * as seen from a different sensorPose at the vehicle frame origin.
368  * Otherwise (the default), the output 2D observation will share the
369  * sensorPose of the input 3D scan
370  * (using a more efficient algorithm that avoids trigonometric functions).
371  *
372  * \param[out] out_scan2d The resulting 2D equivalent scan.
373  *
374  * \sa The example in
375  * https://www.mrpt.org/tutorials/mrpt-examples/example-kinect-to-2d-laser-demo/
376  */
377  void convertTo2DScan(
379  const T3DPointsTo2DScanParams& scanParams,
380  const TRangeImageFilterParams& filterParams =
382 
383  /** Whether external files (3D points, range and confidence) are to be
384  * saved as `.txt` text files (MATLAB compatible) or `*.bin` binary
385  *(faster).
386  * Loading always will determine the type by inspecting the file extension.
387  * \note Default=false
388  **/
389  static void EXTERNALS_AS_TEXT(bool value);
390  static bool EXTERNALS_AS_TEXT();
391 
392  /** \name Point cloud
393  * @{ */
394  /** true means the field points3D contains valid data. */
395  bool hasPoints3D{false};
396  /** If hasPoints3D=true, the (X,Y,Z) coordinates of the 3D point cloud
397  * detected by the camera. \sa resizePoints3DVectors */
398  std::vector<float> points3D_x, points3D_y, points3D_z;
399  /** If hasPoints3D=true, the (x,y) pixel coordinates for each (X,Y,Z) point
400  * in \a points3D_x, points3D_y, points3D_z */
401  std::vector<uint16_t> points3D_idxs_x, points3D_idxs_y; //!<
402 
403  /** Use this method instead of resizing all three \a points3D_x, \a
404  * points3D_y & \a points3D_z to allow the usage of the internal memory
405  * pool. */
406  void resizePoints3DVectors(const size_t nPoints);
407 
408  /** Get the size of the scan pointcloud. \note Method is added for
409  * compatibility with its CObservation2DRangeScan counterpart */
410  size_t getScanSize() const;
411  /** @} */
412 
413  /** \name Point cloud external storage functions
414  * @{ */
415  inline bool points3D_isExternallyStored() const
416  {
418  }
419  inline std::string points3D_getExternalStorageFile() const
420  {
422  }
424  std::string& out_path) const;
426  {
427  std::string tmp;
429  return tmp;
430  }
431  /** Users won't normally want to call this, it's only used from internal
432  * MRPT programs. \sa EXTERNALS_AS_TEXT */
434  const std::string& fileName, const std::string& use_this_base_dir);
435  /** Users normally won't need to use this */
436  inline void points3D_overrideExternalStoredFlag(bool isExternal)
437  {
438  m_points3D_external_stored = isExternal;
439  }
440  /** @} */
441 
442  /** \name Range (depth) image
443  * @{ */
444  /** true means the field rangeImage contains valid data */
445  bool hasRangeImage{false};
446 
447  /** If hasRangeImage=true, a matrix of floats with the range data as
448  * captured by the camera (in meters) \sa range_is_depth, rangeUnits */
450 
451  /** The conversion factor from integer units in rangeImage and actual
452  * distances in meters. Default is 0.001 m, that is 1 millimeter. \sa
453  * rangeImage */
454  float rangeUnits = 0.001f;
455 
456  /** true: Kinect-like ranges: entries of \a rangeImage are distances
457  * along the +X axis; false: Ranges in \a rangeImage are actual
458  * distances in 3D.
459  */
460  bool range_is_depth{true};
461 
462  /** Similar to calling "rangeImage.setSize(H,W)" but this method provides
463  * memory pooling to speed-up the memory allocation. */
464  void rangeImage_setSize(const int HEIGHT, const int WIDTH);
465 
466  /** Builds a visualization from the rangeImage.
467  * The image is built with the given color map (default: grayscale) and such
468  * that the colormap range is mapped to ranges 0 meters to the field
469  * "maxRange" in this object, unless overriden with the optional parameters.
470  * Note that the usage of optional<> allows any parameter to be left to its
471  * default placing `std::nullopt`.
472  */
474  const std::optional<mrpt::img::TColormap> color = std::nullopt,
475  const std::optional<float> normMinRange = std::nullopt,
476  const std::optional<float> normMaxRange = std::nullopt) const;
477 
478  /** @} */
479 
480  /** \name Range Matrix external storage functions
481  * @{ */
482  inline bool rangeImage_isExternallyStored() const
483  {
485  }
486  inline std::string rangeImage_getExternalStorageFile() const
487  {
489  }
491  std::string& out_path) const;
493  {
494  std::string tmp;
496  return tmp;
497  }
498  /** Users won't normally want to call this, it's only used from internal
499  * MRPT programs. \sa EXTERNALS_AS_TEXT */
501  const std::string& fileName, const std::string& use_this_base_dir);
502  /** Forces marking this observation as non-externally stored - it doesn't
503  * anything else apart from reseting the corresponding flag (Users won't
504  * normally want to call this, it's only used from internal MRPT programs)
505  */
507  {
509  }
510  /** @} */
511 
512  /** \name Intensity (RGB) channels
513  * @{ */
514  /** Enum type for intensityImageChannel */
516  {
517  /** Grayscale or RGB visible channel of the camera sensor. */
519  /** Infrarred (IR) channel */
520  CH_IR = 1
521  };
522 
523  /** true means the field intensityImage contains valid data */
524  bool hasIntensityImage{false};
525 
526  /** If hasIntensityImage=true, a color or gray-level intensity image of the
527  * same size than "rangeImage" */
529 
530  /** The source of the intensityImage; typically the visible channel \sa
531  * TIntensityChannelID */
533  /** @} */
534 
535  /** \name Confidence "channel"
536  * @{ */
537  /** true means the field confidenceImage contains valid data */
538  bool hasConfidenceImage{false};
539  /** If hasConfidenceImage=true, an image with the "confidence" value [range
540  * 0-255] as estimated by the capture drivers. */
542  /** @} */
543 
544  /** \name Pixel-wise classification labels (for semantic labeling, etc.)
545  * @{ */
546  /** Returns true if the field CObservation3DRangeScan::pixelLabels contains
547  * a non-NULL smart pointer.
548  * To enhance a 3D point cloud with labeling info, just assign an
549  * appropiate object to \a pixelLabels
550  */
551  bool hasPixelLabels() const { return pixelLabels ? true : false; }
552 
553  /** All information about pixel labeling is stored in this (smart pointer
554  * to) structure; refer to TPixelLabelInfo for details on the contents
555  * User is responsible of creating a new object of the desired data type.
556  * It will be automatically (de)serialized no matter its specific type. */
558 
559  /** @} */
560 
561  /** \name Sensor parameters
562  * @{ */
563  /** Projection parameters of the depth camera. */
565  /** Projection parameters of the intensity (graylevel or RGB) camera. */
567 
568  /** Relative pose of the intensity camera wrt the depth camera (which is the
569  * coordinates origin for this observation).
570  * In a SwissRanger camera, this will be (0,0,0,-90deg,0,-90deg) since
571  * both cameras coincide.
572  * In a Kinect, this will include a small lateral displacement and a
573  * rotation, according to the drawing on the top of this page.
574  * \sa doDepthAndIntensityCamerasCoincide
575  */
577 
578  /** Return true if \a relativePoseIntensityWRTDepth equals the pure rotation
579  * (0,0,0,-90deg,0,-90deg) (with a small comparison epsilon)
580  * \sa relativePoseIntensityWRTDepth
581  */
583 
584  /** The maximum range allowed by the device, in meters (e.g. 8.0m, 5.0m,...)
585  */
586  float maxRange{5.0f};
587  /** The 6D pose of the sensor on the robot. */
589  /** The "sigma" error of the device in meters, used while inserting the scan
590  * in an occupancy grid. */
591  float stdError{0.01f};
592 
593  // See base class docs
594  void getSensorPose(mrpt::poses::CPose3D& out_sensorPose) const override
595  {
596  out_sensorPose = sensorPose;
597  }
598  // See base class docs
599  void setSensorPose(const mrpt::poses::CPose3D& newSensorPose) override
600  {
601  sensorPose = newSensorPose;
602  }
603 
604  /** @} */ // end sensor params
605 
606  /** Removes the distortion in both, depth and intensity images. Intrinsics
607  * (fx,fy,cx,cy) remains the same for each image after undistortion.
608  */
609  void undistort();
610 
611  // See base class docs
612  void getDescriptionAsText(std::ostream& o) const override;
613 
614  /** Very efficient method to swap the contents of two observations. */
615  void swap(CObservation3DRangeScan& o);
616  /** Extract a ROI of the 3D observation as a new one. \note PixelLabels are
617  * *not* copied to the output subimage. */
618  void getZoneAsObs(
619  CObservation3DRangeScan& obs, const unsigned int& r1,
620  const unsigned int& r2, const unsigned int& c1, const unsigned int& c2);
621 
622  /** A Levenberg-Marquart-based optimizer to recover the calibration
623  * parameters of a 3D camera given a range (depth) image and the
624  * corresponding 3D point cloud.
625  * \param camera_offset The offset (in meters) in the +X direction of the
626  * point cloud. It's 1cm for SwissRanger SR4000.
627  * \return The final average reprojection error per pixel (typ <0.05 px)
628  */
630  const CObservation3DRangeScan& in_obs,
631  mrpt::img::TCamera& out_camParams, const double camera_offset = 0.01);
632 
633  /** Look-up-table struct for project3DPointsFromDepthImageInto() */
635  {
638  };
639  /** 3D point cloud projection look-up-table \sa
640  * project3DPointsFromDepthImage */
642 
643 }; // End of class def.
644 
645 } // namespace obs
646 namespace opengl
647 {
648 /** Specialization mrpt::opengl::PointCloudAdapter<CObservation3DRangeScan>
649  * \ingroup mrpt_adapters_grp */
650 template <>
652 {
653  private:
655 
656  public:
657  /** The type of each point XYZ coordinates */
658  using coords_t = float;
659  /** Has any color RGB info? */
660  static constexpr bool HAS_RGB = false;
661  /** Has native RGB info (as floats)? */
662  static constexpr bool HAS_RGBf = false;
663  /** Has native RGB info (as uint8_t)? */
664  static constexpr bool HAS_RGBu8 = false;
665 
666  /** Constructor (accept a const ref for convenience) */
668  : m_obj(*const_cast<mrpt::obs::CObservation3DRangeScan*>(&obj))
669  {
670  }
671  /** Get number of points */
672  inline size_t size() const { return m_obj.points3D_x.size(); }
673  /** Set number of points (to uninitialized values) */
674  inline void resize(const size_t N)
675  {
676  if (N) m_obj.hasPoints3D = true;
677  m_obj.resizePoints3DVectors(N);
678  }
679  /** Does nothing as of now */
680  inline void setDimensions(size_t height, size_t width) {}
681  /** Get XYZ coordinates of i'th point */
682  template <typename T>
683  inline void getPointXYZ(const size_t idx, T& x, T& y, T& z) const
684  {
685  x = m_obj.points3D_x[idx];
686  y = m_obj.points3D_y[idx];
687  z = m_obj.points3D_z[idx];
688  }
689  /** Set XYZ coordinates of i'th point */
690  inline void setPointXYZ(
691  const size_t idx, const coords_t x, const coords_t y, const coords_t z)
692  {
693  m_obj.points3D_x[idx] = x;
694  m_obj.points3D_y[idx] = y;
695  m_obj.points3D_z[idx] = z;
696  }
697  /** Set XYZ coordinates of i'th point */
698  inline void setInvalidPoint(const size_t idx)
699  {
701  "mrpt::obs::CObservation3DRangeScan requires needs to be dense");
702  }
703 
704 }; // end of PointCloudAdapter<CObservation3DRangeScan>
705 } // namespace opengl
706 } // namespace mrpt
711 
712 #include "CObservation3DRangeScan_project3D_impl.h"
bool m_points3D_external_stored
If set to true, m_points3D_external_file is valid.
std::string m_rangeImage_external_file
rangeImage is in CImage::getImagesPathBase()+<this_file_name>
mrpt::img::TCamera cameraParams
Projection parameters of the depth camera.
void getZoneAsObs(CObservation3DRangeScan &obs, const unsigned int &r1, const unsigned int &r2, const unsigned int &c1, const unsigned int &c2)
Extract a ROI of the 3D observation as a new one.
TIntensityChannelID
Enum type for intensityImageChannel.
uint8_t decimation
(Default:1) If !=1, split the range image in blocks of DxD (D=decimation), and only generates one poi...
void resizePoints3DVectors(const size_t nPoints)
Use this method instead of resizing all three points3D_x, points3D_y & points3D_z to allow the usage ...
const mrpt::poses::CPose3D * robotPoseInTheWorld
(Default: nullptr) Read takeIntoAccountSensorPoseOnRobot
#define THROW_EXCEPTION(msg)
Definition: exceptions.h:67
MRPT_FILL_ENUM_MEMBER(mrpt::obs::CObservation3DRangeScan, CH_VISIBLE)
static double recoverCameraCalibrationParameters(const CObservation3DRangeScan &in_obs, mrpt::img::TCamera &out_camParams, const double camera_offset=0.01)
A Levenberg-Marquart-based optimizer to recover the calibration parameters of a 3D camera given a ran...
std::string sensorLabel
The sensor label that will have the newly created observation.
void project3DPointsFromDepthImage(const bool PROJ3D_USE_LUT=true)
This method is equivalent to project3DPointsFromDepthImageInto() storing the projected 3D points (wit...
std::vector< uint16_t > points3D_idxs_x
If hasPoints3D=true, the (x,y) pixel coordinates for each (X,Y,Z) point in points3D_x, points3D_y, points3D_z.
double oversampling_ratio
(Default=1.2=120%) How many more laser scans rays to create (read docs for CObservation3DRangeScan::c...
double angle_sup
(Default=5 degrees) [Only if use_origin_sensor_pose=false] The upper & lower half-FOV angle (in radia...
bool m_rangeImage_external_stored
If set to true, m_rangeImage_external_file is valid.
mrpt::img::CImage rangeImage_getAsImage(const std::optional< mrpt::img::TColormap > color=std::nullopt, const std::optional< float > normMinRange=std::nullopt, const std::optional< float > normMaxRange=std::nullopt) const
Builds a visualization from the rangeImage.
void getDescriptionAsText(std::ostream &o) const override
Build a detailed, multi-line textual description of the observation contents and dump it to the outpu...
Declares a class derived from "CObservation" that encapsules a 3D range scan measurement, as from a time-of-flight range camera or any other RGBD sensor.
Look-up-table struct for project3DPointsFromDepthImageInto()
void undistort()
Removes the distortion in both, depth and intensity images.
void setDimensions(size_t height, size_t width)
Does nothing as of now.
std::vector< T, mrpt::aligned_allocator_cpp11< T > > aligned_std_vector
double z_min
(Default:-inf, +inf) [Only if use_origin_sensor_pose=true] Only obstacle points with Z coordinates wi...
void resize(const size_t N)
Set number of points (to uninitialized values)
Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto()
void setSensorPose(const mrpt::poses::CPose3D &newSensorPose) override
A general method to change the sensor pose on the robot.
void rangeImage_convertToExternalStorage(const std::string &fileName, const std::string &use_this_base_dir)
Users won&#39;t normally want to call this, it&#39;s only used from internal MRPT programs.
PointCloudAdapter(const mrpt::obs::CObservation3DRangeScan &obj)
Constructor (accept a const ref for convenience)
std::string rangeImage_getExternalStorageFileAbsolutePath() const
mrpt::poses::CPose3D relativePoseIntensityWRTDepth
Relative pose of the intensity camera wrt the depth camera (which is the coordinates origin for this ...
An adapter to different kinds of point cloud object.
void unload() override
Unload all images, for the case they being delayed-load images stored in external files (othewise...
void swap(CObservation3DRangeScan &o)
Very efficient method to swap the contents of two observations.
void project3DPointsFromDepthImageInto(mrpt::obs::CObservation3DRangeScan &src_obs, POINTMAP &dest_pointcloud, const mrpt::obs::T3DPointsProjectionParams &projectParams, const mrpt::obs::TRangeImageFilterParams &filterParams)
mrpt::img::CImage intensityImage
If hasIntensityImage=true, a color or gray-level intensity image of the same size than "rangeImage"...
Used in CObservation3DRangeScan::convertTo2DScan()
std::string m_points3D_external_file
3D points are in CImage::getImagesPathBase()+<this_file_name>
This namespace contains representation of robot actions and observations.
bool hasRangeImage
true means the field rangeImage contains valid data
Parameters for the Brown-Conrady camera lens distortion model.
Definition: TCamera.h:25
mrpt::math::CMatrix_u16 rangeImage
If hasRangeImage=true, a matrix of floats with the range data as captured by the camera (in meters) ...
void convertTo2DScan(mrpt::obs::CObservation2DRangeScan &out_scan2d, const T3DPointsTo2DScanParams &scanParams, const TRangeImageFilterParams &filterParams=TRangeImageFilterParams())
Convert this 3D observation into an "equivalent 2D fake laser scan", with a configurable vertical FOV...
TIntensityChannelID intensityImageChannel
The source of the intensityImage; typically the visible channel.
#define MRPT_ENUM_TYPE_END()
Definition: TEnumType.h:78
void rangeImage_forceResetExternalStorage()
Forces marking this observation as non-externally stored - it doesn&#39;t anything else apart from reseti...
void setPointXYZ(const size_t idx, const coords_t x, const coords_t y, const coords_t z)
Set XYZ coordinates of i&#39;th point.
mrpt::img::CImage confidenceImage
If hasConfidenceImage=true, an image with the "confidence" value [range 0-255] as estimated by the ca...
std::string points3D_getExternalStorageFileAbsolutePath() const
bool hasPoints3D
true means the field points3D contains valid data.
float stdError
The "sigma" error of the device in meters, used while inserting the scan in an occupancy grid...
void points3D_overrideExternalStoredFlag(bool isExternal)
Users normally won&#39;t need to use this.
bool USE_SSE2
(Default:true) If possible, use SSE2 optimized code.
void getSensorPose(mrpt::poses::CPose3D &out_sensorPose) const override
A general method to retrieve the sensor pose on the robot.
This is the global namespace for all Mobile Robot Programming Toolkit (MRPT) libraries.
A "CObservation"-derived class that represents a 2D range scan measurement (typically from a laser sc...
bool MAKE_ORGANIZED
(Default:false) set to true if you want an organized point cloud
mrpt::poses::CPose3D sensorPose
The 6D pose of the sensor on the robot.
void getPointXYZ(const size_t idx, T &x, T &y, T &z) const
Get XYZ coordinates of i&#39;th point.
A class used to store a 3D pose (a 3D translation + a rotation in 3D).
Definition: CPose3D.h:85
Declares a class that represents any robot&#39;s observation.
Definition: CObservation.h:43
TPixelLabelInfoBase::Ptr pixelLabels
All information about pixel labeling is stored in this (smart pointer to) structure; refer to TPixelL...
static TCached3DProjTables & get_3dproj_lut()
3D point cloud projection look-up-table
Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto()
bool hasIntensityImage
true means the field intensityImage contains valid data
void setInvalidPoint(const size_t idx)
Set XYZ coordinates of i&#39;th point.
std::vector< float > points3D_x
If hasPoints3D=true, the (X,Y,Z) coordinates of the 3D point cloud detected by the camera...
void rangeImage_setSize(const int HEIGHT, const int WIDTH)
Similar to calling "rangeImage.setSize(H,W)" but this method provides memory pooling to speed-up the ...
bool hasPixelLabels() const
Returns true if the field CObservation3DRangeScan::pixelLabels contains a non-NULL smart pointer...
void project3DPointsFromDepthImageInto(POINTMAP &dest_pointcloud, const T3DPointsProjectionParams &projectParams, const TRangeImageFilterParams &filterParams=TRangeImageFilterParams())
Project the RGB+D images into a 3D point cloud (with color if the target map supports it) and optiona...
void points3D_convertToExternalStorage(const std::string &fileName, const std::string &use_this_base_dir)
Users won&#39;t normally want to call this, it&#39;s only used from internal MRPT programs.
bool PROJ3D_USE_LUT
(Default:true) [Only used when range_is_depth=true] Whether to use a Look-up-table (LUT) to speed up ...
#define DEFINE_SERIALIZABLE(class_name, NS)
This declaration must be inserted in all CSerializable classes definition, within the class declarati...
bool doDepthAndIntensityCamerasCoincide() const
Return true if relativePoseIntensityWRTDepth equals the pure rotation (0,0,0,-90deg,0,-90deg) (with a small comparison epsilon)
mrpt::img::TCamera cameraParamsIntensity
Projection parameters of the intensity (graylevel or RGB) camera.
size_t getScanSize() const
Get the size of the scan pointcloud.
#define MRPT_ENUM_TYPE_BEGIN(_ENUM_TYPE_WITH_NS)
Definition: TEnumType.h:62
bool range_is_depth
true: Kinect-like ranges: entries of rangeImage are distances along the +X axis; false: Ranges in ran...
float rangeUnits
The conversion factor from integer units in rangeImage and actual distances in meters.
Grayscale or RGB visible channel of the camera sensor.
bool takeIntoAccountSensorPoseOnRobot
(Default: false) If false, local (sensor-centric) coordinates of points are generated.
void load() const override
Makes sure all images and other fields which may be externally stored are loaded in memory...
A class for storing images as grayscale or RGB bitmaps.
Definition: img/CImage.h:147
bool use_origin_sensor_pose
(Default:false) If false, the conversion will be such that the 2D observation pose on the robot coinc...
float maxRange
The maximum range allowed by the device, in meters (e.g.
bool hasConfidenceImage
true means the field confidenceImage contains valid data



Page generated by Doxygen 1.8.14 for MRPT 1.9.9 Git: d1962bc6a Wed Jan 15 17:38:30 2020 +0100 at miƩ ene 15 17:45:11 CET 2020