- Sort Posts
- 2 replies
- Last post
Video and Images With Video Play Back
Video and Images With Video Play Back
if type=0 is for video play
and if type =1 is for image load pleas tell me where i m wrong
if(type==0){
modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackable->getPose());isTracking[currentTarget] = true;
targetPositiveDimensions[currentTarget] = imageTarget->getSize();
// The pose delivers the center of the target, thus the dimensions
// go from -width/2 to width/2, same for height
targetPositiveDimensions[currentTarget].data[0] /= 2.0f ;
targetPositiveDimensions[currentTarget].data[1] /= 2.0f;// If the movie is ready to start playing or it has reached the end
// of playback we render the keyframe// else // In any other case, such as playing or paused, we render the actual contents
//{//type=1;
LOG("What is going......2");
isplay=true;
QCAR::Matrix44F modelViewMatrixVideo =
QCAR::Tool::convertPose2GLMatrix(trackable->getPose());
QCAR::Matrix44F modelViewProjectionVideo;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixVideo.data[0]);// Here we use the aspect ratio of the video frame
SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
targetPositiveDimensions[currentTarget].data[0]*videoQuadAspectRatio[currentTarget],
targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixVideo.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixVideo.data[0] ,
&modelViewProjectionVideo.data[0]);glUseProgram(videoPlaybackShaderID);
// Prepare for rendering the keyframe
glVertexAttribPointer(videoPlaybackVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(videoPlaybackNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);if (strcmp(trackable->getName(), "stones") == 0)
glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &videoQuadTextureCoordsTransformedStones[0]);
else
glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &videoQuadTextureCoordsTransformedChips[0]);glEnableVertexAttribArray(videoPlaybackVertexHandle);
glEnableVertexAttribArray(videoPlaybackNormalHandle);
glEnableVertexAttribArray(videoPlaybackTexCoordHandle);glActiveTexture(GL_TEXTURE0);
// IMPORTANT:
// Notice here that the texture that we are binding is not the
// typical GL_TEXTURE_2D but instead the GL_TEXTURE_EXTERNAL_OES
glBindTexture(GL_TEXTURE_EXTERNAL_OES, videoPlaybackTextureID[currentTarget]);
glUniformMatrix4fv(videoPlaybackMVPMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjectionVideo.data[0]);// Render
glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &quadIndices[0]);glDisableVertexAttribArray(videoPlaybackVertexHandle);
glDisableVertexAttribArray(videoPlaybackNormalHandle);
glDisableVertexAttribArray(videoPlaybackTexCoordHandle);glUseProgram(0);
//type=0;}
else if(type==1)
{
modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackable->getPose());isTracking[currentTarget] = true;
targetPositiveDimensions[currentTarget] = imageTarget->getSize();
LOG("Working.....??");
//type=0;QCAR::Matrix44F modelViewMatrixButton =
QCAR::Tool::convertPose2GLMatrix(trackable->getPose());
QCAR::Matrix44F modelViewProjectionButton;// glDepthFunc(GL_LEQUAL);
// glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[1]/1.98f,
&modelViewMatrixButton.data[0]);
SampleUtils::scalePoseMatrix((targetPositiveDimensions[currentTarget].data[1]/2.0f),
(targetPositiveDimensions[currentTarget].data[1]/2.0f),
(targetPositiveDimensions[currentTarget].data[1]/2.0f),
&modelViewMatrixButton.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixButton.data[0] ,
&modelViewProjectionButton.data[0]);glUseProgram(keyframeShaderID);
glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);
glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadTexCoords[0]);glEnableVertexAttribArray(keyframeVertexHandle);
glEnableVertexAttribArray(keyframeNormalHandle);
glEnableVertexAttribArray(keyframeTexCoordHandle);glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
(GLfloat*)&modelViewProjectionButton.data[0] );
glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
(const GLvoid*) &quadIndices[0]);
glDisableVertexAttribArray(keyframeVertexHandle);
glDisableVertexAttribArray(keyframeNormalHandle);
glDisableVertexAttribArray(keyframeTexCoordHandle);
glUseProgram(0);
}
Hi, I don't see anything wrong with your code (as it looks basically like the one in the VP sample);
however, I think the issue is how you determine the value of type (0 or 1) before entering that portion of code;
for instance, you could associate a video-rendering to a target called "Stones" and an image-rendering to a target called "Chips", then make sure you compare the name of your target to set the appropriate Type (video or image);
but the actual logic is very application specific and is up to you how you implement it.
One suggestion: add more logs and check with logcat (or from Eclipse) what is going on with your trackables at every frame.