Matrix Vision RC_Visard 3D Camera

The BVS 3D-RV0 is a smart 3D camera for robot applications. With an integrated processor and application-specific software modules on the camera, the rc_visard offers self-sufficient 3D image processing.

From horstFX version 07.2021 (valid for earlier versions without using the normal vector:

getSurfaceNormalByQuaternion(Q0, Q1, Q2, Q3); 

1. Introduction

Processed 3D information such as Pick Points are transmitted directly from the rc_visard to the robot application.

Equipped with an integrated processor and application-specific software modules, the rc_visard offers autonomous 3D image processing and can be easily used as a 3D stereo sensor. Processed 3D information like Pick Points is sent directly from the rc_visard to the robot application.

By detecting the box on the camera side and defining the gripper in the camera software, collision control between the robot/gripper and the box can be implemented.

Communication between the robot and camera is done through the REST API.

The setup of the camera is well-documented in Matrix Vision's documents. Therefore, this article mainly focuses on the communication between the camera and robot on the robot side.

2. Setup

The camera requires a power supply of 24 V, min. 24 W and can therefore be supplied directly via the terminals in the robot's control cabinet.

It is important to ensure that the IP addresses of the participants are in the same sub-range. How to set the IP of the robot is described in the following article Change IP address.

The camera's web interface can be accessed via a browser. Either via the IP address of the camera or via its host name. The host name of the camera is made up of "rc-visard-" and then the serial number of the camera.

As described in the Matrix-Vision documents, the camera can now be set up and adjusted to the robot's coordinate system using manual eye calibration.

The camera outputs the orientation of the components in quaternions. Qw, Qx, Qy, Qz of the camera correspond to Q0, Q1, Q2, Q3 of the robot.

3. Communication

The foundation of communication is described by the REST API.

On the robot side, the necessary functions are located in the Java class HttpURLconnection.

Next, we will describe the communication using the camera module Boxpick. This example can be easily applied to other camera modules.

3.1. Starting the module:

Creation of a variable containing the URL for starting the Boxpick module.

var start_boxpick = "http://192.168.0.112/api/v1/nodes/rc_boxpick/services/start";

Using the following function will activate the Boxpick module and set it to the "run" state.

function startModule()
{
   // Create a URL
     var url = new java.net.URL(start_boxpick);

   // Open Connection
     connection = url.openConnection();     
     connection.setRequestMethod("PUT");
     connection.setDoOutput(true);

   // Send an empty message to start the module.
     var out = new java.io.OutputStreamWriter(connection.getOutputStream());
     out.write("");
     out.close();
     connection.getInputStream();

   // Close the connection.
     connection.disconnect();
}

3.2. Triggering the camera:

Creating a variable that contains the URL for triggering the camera.

var trigger_boxpick = "http://192.168.0.112/api/v1/nodes/rc_boxpick/services/compute_grasps"

The connection for triggering the camera is established using the following function.

function triggerCam()
{
     var url = new java.net.URL(trigger_boxpick);

     connection = url.openConnection();
     connection.setDoOutput(true);
     connection.setRequestMethod("PUT");
     
     // Setze request property auf JSON Typ
     connection.setRequestProperty("Content-Type", "application/json; utf-8");

     connection.setConnectTimeout(5000);     // Timeout 5 sek.
     connection.setReadTimeout(5000);

}

Using the following function sends the trigger message to the camera. The message is in JSON format, and the meaning of each parameter is described in Matrix-Vision's documents.

function messageToCam()
{
     var message = {
                 "args": {
                        "pose_frame": "external",
                        "collision_detection": {
                               "gripper_id": "Messspitze_3Module",
                               "pre_grasp_offset": {
                                 "x": 0,
                                 "y": 0,
                                 "z": 0
                                    }
                             },
                             "load_carrier_id": "KLT40x30",
                             "suction_surface_width": 0.02,
                             "suction_surface_length": 0.02,
                             "robot_pose": {},
                             "item_models": [
                          {
                       "rectangle": {
                              "min_dimensions": {
                                     "x": 0.05,
                                     "y": 0.05
                                   },
                              "max_dimensions": {
                                     "x": 0.07,
                                     "y": 0.07
                                   }
                            },
                            "type": "RECTANGLE"
                          }
                        ]
                 }
          }

   // Create a JSON string.
     message = JSON.stringify(message);
     
   // Write the trigger message using the Streamwriter.
      var out = new java.io.OutputStreamWriter(connection.getOutputStream());
     out.write(message);
     out.close();
     connection.getInputStream();     
}

3.3. Receiving the camera message

The following function is used to read the camera message.

function getCameraMessage()
{
     var BufferedReader = 
          new java.io.BufferedReader(
               new java.io.InputStreamReader(
                                    connection.getInputStream()));

     var inputLine;
     var response = new java.lang.StringBuffer();
     while ((inputLine = BufferedReader.readLine()) != null) { 
          LOG.info(inputLine);
          CamString = response.append(inputLine);
     }
     BufferedReader.close();
}

3.4. Reading the object parameters

Using the following function, the necessary object parameters are extracted from the camera message, and the normal vector for approaching the reference point is calculated.

function getObjectCoordinates()
{
     // parse zu JSON Objekt
     CamResponse = JSON.parse(CamString);

     CamReturnCode = (CamResponse.response.return_code.value);
     
     if (CamReturnCode == "0")
     {

          CamX = parseFloat(CamResponse.response.grasps[0].pose.position.x);
          CamY = parseFloat(CamResponse.response.grasps[0].pose.position.y);
          CamZ = parseFloat(CamResponse.response.grasps[0].pose.position.z);
          CamQ0 = parseFloat(CamResponse.response.grasps[0].pose.orientation.w);
          CamQ1 = parseFloat(CamResponse.response.grasps[0].pose.orientation.x);
          CamQ2 = parseFloat(CamResponse.response.grasps[0].pose.orientation.y);
          CamQ3 = parseFloat(CamResponse.response.grasps[0].pose.orientation.z);
     
          /*
          show_info("CamX: " + CamX + "\nCamY: " + CamY + "\nCamZ: " + CamZ + 
                 "\nCamQ0: " + CamQ0 + "\nCamQ1: " + CamQ1 + "\nCamQ2: " + CamQ2 +
                 "\nCamQ3: " + CamQ3);
          */

   // Calculating the normal vector
     normal = getSurfaceNormalByQuaternion(CamQ0,  CamQ1,  CamQ2,  CamQ3);

     } else
     {     
          show_info(CamReturnCode);
     }
}

4.  Approaching the object position

Using the following function, the robot can drive to the object position detected by the camera. This function also allows for collision control between the robot/gripper and the box.

function moveToObject()
{
   // Reference point above the box
     move({
          'Coord': 'JOINT', 
          'MoveType': 'JOINT', 
          'PoseRelation': 'ABSOLUTE', 
          'anyconfiguration': false, 
          'blendradius.orient': 180.0, 
          'blendradius.xyz': 0.08, 
          'speed.ratio': 1.0, 
          'target': {'joints': [-0.339172, 48.548237, -7.816559, 0.010483, 49.267242, -14.856720]}, 
          'tool': 'Messspitze_3Module'
    }, "Waypoint 1");

   // Relative reference point above the object
     move({
          "speed.ratio": 1.0,
          "movetype" : "JOINT",
          "poserelation" : "ABSOLUTE",
          "target" : {"xyz+quat" : [CamX - (moveFactor*normal.x), CamY - (moveFactor*normal.y), CamZ - (moveFactor*normal.z), CamQ0, CamQ1, CamQ2, CamQ3]},
          "blendradius.xyz" : 0.05,
          "blendradius.orient" : 180
     })

   // Object position
     move({
          "speed.ratio": 1.0,
          "movetype" : "LINEAR",
          "poserelation" : "ABSOLUTE",
          "target" : {"xyz+quat" : [CamX, CamY, CamZ, CamQ0, CamQ1, CamQ2, CamQ3]},
          "blendradius.xyz" : 0.0,
          "blendradius.orient" : 180
     })

     sleep(2000);

   // Pick up the object
     output_literal( "TOOL_OUTPUT_1", 1.0 );
 
   // Relative departure point
     move({
         'Coord': 'CARTESIAN_TCP', 
          'MoveType': 'LINEAR', 
         'PoseRelation': 'RELATIVE', 
         'anyconfiguration': false, 
          'blendradius.orient': 180.0, 
          'blendradius.xyz': 0.05, 
          'speed.ratio': 1.0, 
          'target': {'xyz+quat': [0.000000, 0.000000, -0.100000, 1.000000, 0.000000, 0.000000, 0.000000]}, 
          'tool': 'Messspitze_3Module'
    }, "Relative waypoint 1");
}

5. Overall Program Flow

The following code describes a basic program flow by calling the functions mentioned above. The complete code for horstFX is attached in the appendix for reference.

try {

     while (true)
     {
          
        // Camera capture position
          move({
                     'Coord': 'JOINT', 
                     'MoveType': 'JOINT', 
                     'PoseRelation': 'ABSOLUTE', 
                     'anyconfiguration': false, 
                     'speed.ratio': 1.0, 
                     'target': {'joints': [26.043015, 34.412682, 8.933212, 0.010483, 46.667053, -10.683838]}, 
                     'tool': 'Messspitze_3Module'
          }, "Image Capture");
     
        // Initiate the camera module.
          startModule();

        // Trigger the camera
          triggerCam();
     
        // Transfer Request Parameters
          messageToCam();

        // Receive camera message
          getCameraMessage();     
     
        // Close the camera connection.
          connection.disconnect();

        // Retrieve object coordinates
          getObjectCoordinates();
     
        // Approach the object position.
          moveToObject();

        // Drive to the storage position.
          place();
     }     

} catch (e) {
     show_info(e);
} finally {
     connection.disconnect();

6. Download

Matrix Vision Box Pick