Hi Kevin.
This is kirubha again, i am facing an issue during display of point cloud data from secondary sensor.Here i attached two cloud images. First one is primary sensor that is connected in the same machine. Second image is secondary sensor data which is from server machine. When i project the point cloud data in the my client machine, the Human shows like double human. I dont know where is an issue. To understand clearly i have also added my logic below.
Code logic:
//Getting ColoredBuffer data from your code KV2ClientExample.cpp(secondarysensor)
void KV2ClientExample::AcquireAndProcessColorFrame()
{
IColorFramePtr colorFrame;
if (colorStreamer->AcquireLatestFrame(&colorFrame))
{
UINT bufferSize;
unsigned char* buffer;
colorFrame->AccessRawUnderlyingBuffer(&bufferSize, &buffer);
}
}
In our logic, we did like this
BYTE* bufferbyte = nullptr;
void AcquireAndProcessColorFrame() //In this function we are receiving the buffer data from secondary sensor
{
kv2s::IColorFramePtr colorFrame;
if (colorStreamer->AcquireLatestFrame(&colorFrame))
{
UINT bufferSize;
unsigned char* buffer;
colorFrame->AccessRawUnderlyingBuffer(&bufferSize, &buffer);
// converting unsigned char* to byte buffer
bufferbyte=reinterpret_cast<BYTE*>(buffer);
}
}
//This function gets bodyindex buffer
void AcquireAndProcessBodyIndexFrame()
{
IBodyIndexFramePtr bodyIndexFrame;
signed char* s2buffer;
if (bodyIndexStreamer->AcquireLatestFrame(&bodyIndexFrame))
{
UINT bufferSize;
bodyIndexFrame->AccessRawUnderlyingBuffer(&bufferSize, &s2buffer);
unsigned char* output = bodyIndexFrameRenderBuffer;
const signed char* bufferEnd = s2buffer + DEPTH_MULTICAST_WIDTH_DEPTH_MULTICAST_HEIGHT;
while(s2buffer < bufferEnd)
{
signed char index = *s2buffer;
output = color_mapping[3_(index+1)+0]; ++output;
output = color_mapping[3(index+1)+1]; ++output;
output = color_mapping[3(index+1)+2]; ++output;
++s2buffer;
}
indexSensor2buffer=reinterpret_cast<BYTE>(s2buffer);
bodyIndexFrame.reset();
}
}
//In this function, we are displaying the rgb point cloud data in our viewport
void PointCloudGL::PointCloudDisplay()
{
float fl_x = 1063.118f;
float fl_y = 1065.233f;
float pp_x = 962.473f;
float pp_y = 526.789f;
// Unit16* depthSensor2buffer - data comes from secondary sensor
//Cameraspacepoint* pCSS2Points - data from depthsensor2buffer is copied to cameraspacepoint
pCoordinateMapper->MapColorFrameToCameraSpace(512*424, depthSensor2buffer,1920 * 1080, pCSS2Points);
//pDepthS2SpaceBuffer - depthspacepoint - here secondary sensor buffer is copied to depthspacepoint
pCoordinateMapper->MapColorFrameToDepthSpace(512 * 424, depthSensor2buffer, 1920 * 1080, pDepthS2SpaceBuffer);
glBegin(GL_POINTS);
float xValue=0;
float yValue=0;
//Looping through rgb resolution 1920 * 1080
for (int i = 0; i < 1920; i++)
{
for (int j = 0; j < 1080; j++)
{
int colorIndex = i + (j * 1920);
DepthSpacePoint p ;
DepthSpacePoint p2 ;
if(pDepthS2SpaceBuffer!= NULL)
{
p2=pDepthS2SpaceBuffer[colorIndex]; //we are retrieving the depth space buffer values as array and store it in depthspacepoint
const CameraSpacePoint& rPt1 = pCSS2Points[colorIndex];
if (p2.X != -std::numeric_limits<float>::infinity() && p2.Y != -std::numeric_limits<float>::infinity())
{
int depthX = static_cast<int>(p2.X+0.5);
int depthY = static_cast<int>(p2.Y+0.5);
if ((depthX >= 0 && depthX < 512) && (depthY >= 0 && depthY < 424))
{
BYTE player = indexSensor2buffer[depthX + (depthY * 512)];
if (player != 0xff)
{
int Z = rPt1.Z;
float xx=0;
float yy=0;
float zz=0;
if (rPt1.Z > 0)
{
xx =(i-pp_x)*rPt1.Z/fl_x;
yy = (j-pp_y)*rPt1.Z/fl_y;
zz=rPt1.Z;
if(Sensor2Selection==0) //sensor 1 selected
{
if (bufferbyte !=nullptr)
{
glColor4ub(bufferbyte[ 3 *colorIndex], bufferbyte[3 * colorIndex + 1], bufferbyte[3 * colorIndex + 2], bufferbyte[3 * colorIndex + 3]);
glVertex3f(xx/15, -yy/15, rPt1.Z/15);
}
}
}
}
}
}
}
// In the above logic the following code used to draw 3d point cloud
float fl_x = 1063.118f;
float fl_y = 1065.233f;
float pp_x = 962.473f;
float pp_y = 526.789f;
xx =(i-pp_x)*rPt1.Z/fl_x; // 3dpoint X
yy = (j-pp_y)*rPt1.Z/fl_y; // 3dpoint Y
zz=rPt1.Z; // 3dpoint Z
glColor4ub(bufferbyte[ 3 *colorIndex], bufferbyte[3 * colorIndex + 1], bufferbyte[3 * colorIndex + 2], bufferbyte[3 * colorIndex + 3]);
glVertex3f(xx, -yy, rPt1.Z);
if we used the above logic , we are getting following screenshot named Convertedtoours.jpg and if you see primarysensor.jpg there is no overlapping of human.
![convertedtoours](https://cloud.githubusercontent.com/assets/4300012/8271631/f51fd730-183e-11e5-9fff-088150b10ead.jpg)
![primarysensor](https://cloud.githubusercontent.com/assets/4300012/8271632/f650326c-183e-11e5-94ba-da2f01d97a3e.jpg)
Actually in our existing logic we put the following code to display point cloud from primary sensor,
glColor4ub(pColorBuffer[4 * colorIndex], pColorBuffer[4 * colorIndex + 1], pColorBuffer[4 * colorIndex + 2], pColorBuffer[4 * colorIndex + 3]);
glVertex3f(xx, -yy, rPt.Z);
Note:-But if we put 4*colorIndex in secondary sensor, we are getting the following error
Unhandled exception: Access violation reading location
Please guide me how to proceed with our existing logic.
Thanks
Kiruba