Hi,
As per your suggestion, i have integrated the cloudreco and videoplayback code. I integrated local image detection also, so that the app will recognize both cloud and local images and start playing video on target. It works!!! but the video width is always bigger than the image size, height is fine. I have placed the render frame code below, please suggest me whats wrong in thi
targetPositiveDimensions[currentTarget].data[1] always returns zero, is this an issue?
Java_com_example_CloudRecognition_CloudRecoRenderer_renderFrame(JNIEnv *, jobject)
{
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the post matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
for (int i=0; i<NUM_TARGETS; i++)
{
isTracking[i] = false;
targetPositiveDimensions[i].data[0] = 0.0;
targetPositiveDimensions[i].data[1] = 0.0;
}
// Try to comment FROM HERE
if (deleteCurrentProductTexture)
{
// Deletes the product texture if necessary
if (productTexture != 0)
{
glDeleteTextures(1, &(productTexture->mTextureID));
delete productTexture;
productTexture = 0;
}
deleteCurrentProductTexture = false;
}
// If the render state indicates that the texture is generated it generates
// the OpenGL texture for start drawing the plane with the book data
if (renderState == RS_TEXTURE_GENERATED)
{
generateProductTextureInOpenGL();
}
// Try to comment TO HERE
// Did we find any trackables this frame?
if (state.getNumTrackableResults() > 0)
{
trackingStarted = true;
// If we are already tracking something we don't need
// to wait any frame before starting the 2D transition
// when the target gets lost
pthread_mutex_lock(&framesToSkipMutex);
framesToSkipBeforeRenderingTransition = 0;
pthread_mutex_unlock(&framesToSkipMutex);
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
const QCAR::ImageTarget& imageTarget = (const QCAR::ImageTarget&) trackableResult->getTrackable();
int currentTarget;
if (strcmp(imageTarget.getName(), "stones") == 0)
currentTarget=STONES;
else if (strcmp(imageTarget.getName(), "chips") == 0)
currentTarget=CHIPS;
else
currentTarget=CLOUD;
modelViewMatrixCPP[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
isTracking[currentTarget] = true;
targetPositiveDimensions[currentTarget] = imageTarget.getSize();
// The pose delivers the center of the target, thus the dimensions
// go from -width/2 to width/2, same for height
targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
targetPositiveDimensions[currentTarget].data[1] /= 2.0f;
// If the movie is ready to start playing or it has reached the end
// of playback we render the keyframe
if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
(currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
{
QCAR::Matrix44F modelViewMatrixKeyframe =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
QCAR::Matrix44F modelViewProjectionKeyframe;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixKeyframe.data[0]);
// Here we use the aspect ratio of the keyframe since it
// is likely that it is not a perfect square
float ratio=1.0;
if (textures[currentTarget]->mSuccess)
ratio = keyframeQuadAspectRatio[currentTarget];
else
ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];
SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
targetPositiveDimensions[currentTarget].data[0]*ratio,
targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixKeyframe.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixKeyframe.data[0] ,
&modelViewProjectionKeyframe.data[0]);
glUseProgram(keyframeShaderID);
// Prepare for rendering the keyframe
glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);
glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadTexCoords[0]);
glEnableVertexAttribArray(keyframeVertexHandle);
glEnableVertexAttribArray(keyframeNormalHandle);
glEnableVertexAttribArray(keyframeTexCoordHandle);
glActiveTexture(GL_TEXTURE0);
// The first loaded texture from the assets folder is the keyframe
glBindTexture(GL_TEXTURE_2D, textures[currentTarget]->mTextureID);
glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjectionKeyframe.data[0] );
glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);
// Render
glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &quadIndices[0]);
glDisableVertexAttribArray(keyframeVertexHandle);
glDisableVertexAttribArray(keyframeNormalHandle);
glDisableVertexAttribArray(keyframeTexCoordHandle);
glUseProgram(0);
}
else // In any other case, such as playing or paused, we render the actual contents
{
QCAR::Matrix44F modelViewMatrixVideo =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
QCAR::Matrix44F modelViewProjectionVideo;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixVideo.data[0]);
// Here we use the aspect ratio of the video frame
SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
targetPositiveDimensions[currentTarget].data[0]*videoQuadAspectRatio[currentTarget],
targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixVideo.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixVideo.data[0] ,
&modelViewProjectionVideo.data[0]);
glUseProgram(videoPlaybackShaderID);
// Prepare for rendering the keyframe
glVertexAttribPointer(videoPlaybackVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(videoPlaybackNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);
if (strcmp(imageTarget.getName(), "stones") == 0)
glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &videoQuadTextureCoordsTransformedStones[0]);
else if (strcmp(imageTarget.getName(), "chips") == 0)
glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &videoQuadTextureCoordsTransformedChips[0]);
else
glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &videoQuadTextureCoordsTransformedCloud[0]);
glEnableVertexAttribArray(videoPlaybackVertexHandle);
glEnableVertexAttribArray(videoPlaybackNormalHandle);
glEnableVertexAttribArray(videoPlaybackTexCoordHandle);
glActiveTexture(GL_TEXTURE0);
// IMPORTANT:
// Notice here that the texture that we are binding is not the
// typical GL_TEXTURE_2D but instead the GL_TEXTURE_EXTERNAL_OES
glBindTexture(GL_TEXTURE_EXTERNAL_OES, videoPlaybackTextureID[currentTarget]);
glUniformMatrix4fv(videoPlaybackMVPMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjectionVideo.data[0]);
glUniform1i(videoPlaybackTexSamplerOESHandle, 0 /*GL_TEXTURE0*/);
// Render
glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &quadIndices[0]);
glDisableVertexAttribArray(videoPlaybackVertexHandle);
glDisableVertexAttribArray(videoPlaybackNormalHandle);
glDisableVertexAttribArray(videoPlaybackTexCoordHandle);
glUseProgram(0);
}
// The following section renders the icons. The actual textures used
// are loaded from the assets folder
if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
(currentStatus[currentTarget] == PAUSED) || (currentStatus[currentTarget] == NOT_READY) ||
(currentStatus[currentTarget] == ERROR))
{
// If the movie is ready to be played, pause, has reached end or is not
// ready then we display one of the icons
QCAR::Matrix44F modelViewMatrixButton =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
QCAR::Matrix44F modelViewProjectionButton;
glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// The inacuracy of the rendering process in some devices means that
// even if we use the "Less or Equal" version of the depth function
// it is likely that we will get ugly artifacts
// That is the translation in the Z direction is slightly different
// Another posibility would be to use a depth func "ALWAYS" but
// that is typically not a good idea
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[1]/1.98f,
&modelViewMatrixButton.data[0]);
SampleUtils::scalePoseMatrix((targetPositiveDimensions[currentTarget].data[1]/2.0f),
(targetPositiveDimensions[currentTarget].data[1]/2.0f),
(targetPositiveDimensions[currentTarget].data[1]/2.0f),
&modelViewMatrixButton.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixButton.data[0] ,
&modelViewProjectionButton.data[0]);
glUseProgram(keyframeShaderID);
glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);
glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadTexCoords[0]);
glEnableVertexAttribArray(keyframeVertexHandle);
glEnableVertexAttribArray(keyframeNormalHandle);
glEnableVertexAttribArray(keyframeTexCoordHandle);
glActiveTexture(GL_TEXTURE0);
// Depending on the status in which we are we choose the appropriate
// texture to display. Notice that unlike the video these are regular
// GL_TEXTURE_2D textures
switch (currentStatus[currentTarget])
{
case READY:
glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
break;
case REACHED_END:
glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
break;
case PAUSED:
glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
break;
case NOT_READY:
glBindTexture(GL_TEXTURE_2D, textures[4]->mTextureID);
break;
case ERROR:
glBindTexture(GL_TEXTURE_2D, textures[5]->mTextureID);
break;
default:
glBindTexture(GL_TEXTURE_2D, textures[4]->mTextureID);
break;
}
glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjectionButton.data[0] );
glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);
// Render
glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &quadIndices[0]);
glDisableVertexAttribArray(keyframeVertexHandle);
glDisableVertexAttribArray(keyframeNormalHandle);
glDisableVertexAttribArray(keyframeTexCoordHandle);
glUseProgram(0);
// Finally we return the depth func to its original state
glDepthFunc(GL_LESS);
glDisable(GL_BLEND);
}
SampleUtils::checkGlError("VideoPlayback renderFrame");
} // end-of-for-loop
}
else
{
// Manages the 3D to 2D Transition initialization
if (!scanningMode && showAnimation3Dto2D && renderState == RS_NORMAL
&& framesToSkipBeforeRenderingTransition == 0)
{
startTransitionTo2D();
}
// Reduces the number of frames to wait before triggering
// the transition by 1
if( framesToSkipBeforeRenderingTransition > 0 && renderState == RS_NORMAL)
{
pthread_mutex_lock(&framesToSkipMutex);
framesToSkipBeforeRenderingTransition -= 1;
pthread_mutex_unlock(&framesToSkipMutex);
}
}
// Logic for rendering Transition to 2D
if (renderState == RS_TRANSITION_TO_2D && showAnimation3Dto2D)
{
renderTransitionTo2D();
}
// Logic for rendering Transition to 3D
if (renderState == RS_TRANSITION_TO_3D )
{
renderTransitionTo3D();
}
// Get the tracker manager:
QCAR::TrackerManager& trackerManager = QCAR::TrackerManager::getInstance();
// Get the image tracker:
QCAR::ImageTracker* imageTracker = static_cast<QCAR::ImageTracker*>(
trackerManager.getTracker(QCAR::Tracker::IMAGE_TRACKER));
// Get the target finder:
QCAR::TargetFinder* finder = imageTracker->getTargetFinder();
// Renders the current state - User process Feedback
if (finder->isRequesting())
{
// Requesting State - Show Requesting text in Status Bar
setStatusBarText("Requesting");
showStatusBar();
}
else
{
// Hiding Status Bar
hideStatusBar();
}
glDisable(GL_DEPTH_TEST);
glDisableVertexAttribArray(vertexHandle);
glDisableVertexAttribArray(normalHandle);
glDisableVertexAttribArray(textureCoordHandle);
QCAR::Renderer::getInstance().end();
}
See: https://developer.vuforia.com/resources/tutorials/creating-cloud-recognition-apps-unity