Log in or register to post comments

video and 3d model

December 5, 2012 - 7:33am #1

Hi,

I would like to implement an app with 3dmodel and video. I can't understand how to mix the image targets sample with the videoplayback sample. Any suggestion is welcome.

 

Thanks

video and 3d model

November 4, 2014 - 12:57am #39

Please help me..when try to merge Video play back with image target, video not showing..but no errors..heres my code:



// Textures Global Variabel:
int textureCount                        =  0;
int texture1_Count			  =  0;
Texture** textures                    =  0;
Texture** textures1			 =  0;

JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv * env, jobject obj)
{
    //LOG("Java_com_qualcomm_QCARSamples_VideoPlayback_GLRenderer_renderFrame");
    // Clear color and depth buffer
 glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    // Get the state from QCAR and mark the beginning of a rendering section
 QCAR::State state = QCAR::Renderer::getInstance().begin();
    // Explicitly render the Video Background
 QCAR::Renderer::getInstance().drawVideoBackground();

    glEnable(GL_DEPTH_TEST);
    
    // We must detect if background reflection is active and adjust the culling direction.
    // If the reflection is active, this means the post matrix has been reflected as well,
    // therefore standard counter clockwise face culling will result in "inside out" models.
    glEnable(GL_CULL_FACE);
    glCullFace(GL_BACK);
    if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
        glFrontFace(GL_CW);  //Front camera
    else
        glFrontFace(GL_CCW);   //Back camera


    for (int i=0; i<NUM_TARGETS; i++)
    {
        isTracking[i] = false;
        targetPositiveDimensions[i].data[0] = 0.0;
        targetPositiveDimensions[i].data[1] = 0.0;
    }

    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
    {
        // Get the trackable:
        const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
        const QCAR::ImageTarget& imageTarget = (const QCAR::ImageTarget&) trackableResult->getTrackable();
        int currentTarget;
        // We store the modelview matrix to be used later by the tap calculation
        if (strcmp(imageTarget.getName(), "DubaiWelcome") == 0)
            currentTarget=DubaiWelcome;
        else
            currentTarget=Dubai;

        modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
        isTracking[currentTarget] = true;
        targetPositiveDimensions[currentTarget] = imageTarget.getSize();
        // The pose delivers the center of the target, thus the dimensions
        // go from -width/2 to width/2, same for height
        targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
        targetPositiveDimensions[currentTarget].data[1] /= 2.0f;
        const Texture* const thisTexture = textures[currentTarget];
       if (strcmp(imageTarget.getName(), "DubaiWelcome")){
        // If the movie is ready to start playing or it has reached the end
        // of playback we render the keyframe
        if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
        {
            QCAR::Matrix44F modelViewMatrixKeyframe =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionKeyframe;
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
                                                &modelViewMatrixKeyframe.data[0]);

            // Here we use the aspect ratio of the keyframe since it
            // is likely that it is not a perfect square

            float ratio=1.0;
            if (textures[currentTarget]->mSuccess)
                ratio = keyframeQuadAspectRatio[currentTarget];
            else
                ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];

            SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
                                         targetPositiveDimensions[currentTarget].data[0]*ratio,
                                         targetPositiveDimensions[currentTarget].data[0],
                                         &modelViewMatrixKeyframe.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixKeyframe.data[0] ,
                                        &modelViewProjectionKeyframe.data[0]);

            glUseProgram(keyframeShaderID);

            // Prepare for rendering the keyframe
            glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);
            glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadTexCoords[0]);

            glEnableVertexAttribArray(keyframeVertexHandle);
            glEnableVertexAttribArray(keyframeNormalHandle);
            glEnableVertexAttribArray(keyframeTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // The first loaded texture from the assets folder is the keyframe
            glBindTexture(GL_TEXTURE_2D, textures[currentTarget]->mTextureID);
            glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionKeyframe.data[0] );
            glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);

            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(keyframeVertexHandle);
            glDisableVertexAttribArray(keyframeNormalHandle);
            glDisableVertexAttribArray(keyframeTexCoordHandle);

            glUseProgram(0);
        }
        else // In any other case, such as playing or paused, we render the actual contents
        {
            QCAR::Matrix44F modelViewMatrixVideo =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionVideo;
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
                                             &modelViewMatrixVideo.data[0]);

            // Here we use the aspect ratio of the video frame
            SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
                                         targetPositiveDimensions[currentTarget].data[0]*videoQuadAspectRatio[currentTarget],
                                         targetPositiveDimensions[currentTarget].data[0],
                                         &modelViewMatrixVideo.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixVideo.data[0] ,
                                        &modelViewProjectionVideo.data[0]);

            glUseProgram(videoPlaybackShaderID);

            // Prepare for rendering the keyframe
            glVertexAttribPointer(videoPlaybackVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(videoPlaybackNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);

            if (strcmp(imageTarget.getName(), "DubaiWelcome") == 0)
                glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &videoQuadTextureCoordsTransformedStones[0]);
            else
                glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &videoQuadTextureCoordsTransformedChips[0]);


            glEnableVertexAttribArray(videoPlaybackVertexHandle);
            glEnableVertexAttribArray(videoPlaybackNormalHandle);
            glEnableVertexAttribArray(videoPlaybackTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // IMPORTANT:
            // Notice here that the texture that we are binding is not the
            // typical GL_TEXTURE_2D but instead the GL_TEXTURE_EXTERNAL_OES
            glBindTexture(GL_TEXTURE_EXTERNAL_OES, videoPlaybackTextureID[currentTarget]);
            glUniformMatrix4fv(videoPlaybackMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionVideo.data[0]);
            glUniform1i(videoPlaybackTexSamplerOESHandle, 0 /*GL_TEXTURE0*/);

            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(videoPlaybackVertexHandle);
            glDisableVertexAttribArray(videoPlaybackNormalHandle);
            glDisableVertexAttribArray(videoPlaybackTexCoordHandle);

            glUseProgram(0);

        }

        // The following section renders the icons. The actual textures used
        // are loaded from the assets folder

        if ((currentStatus[currentTarget] == READY)  || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == PAUSED) || (currentStatus[currentTarget] == NOT_READY)   ||
            (currentStatus[currentTarget] == ERROR))
        {
            // If the movie is ready to be played, pause, has reached end or is not
            // ready then we display one of the icons
            QCAR::Matrix44F modelViewMatrixButton =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionButton;

            glDepthFunc(GL_LEQUAL);

            glEnable(GL_BLEND);
            glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);


            // The inacuracy of the rendering process in some devices means that
            // even if we use the "Less or Equal" version of the depth function
            // it is likely that we will get ugly artifacts
            // That is the translation in the Z direction is slightly different
            // Another posibility would be to use a depth func "ALWAYS" but
            // that is typically not a good idea
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[1]/1.98f,
                                             &modelViewMatrixButton.data[0]);
            SampleUtils::scalePoseMatrix((targetPositiveDimensions[currentTarget].data[1]/2.0f),
                                         (targetPositiveDimensions[currentTarget].data[1]/2.0f),
                                         (targetPositiveDimensions[currentTarget].data[1]/2.0f),
                                         &modelViewMatrixButton.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixButton.data[0] ,
                                        &modelViewProjectionButton.data[0]);


            glUseProgram(keyframeShaderID);

            glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);
            glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadTexCoords[0]);

            glEnableVertexAttribArray(keyframeVertexHandle);
            glEnableVertexAttribArray(keyframeNormalHandle);
            glEnableVertexAttribArray(keyframeTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // Depending on the status in which we are we choose the appropriate
            // texture to display. Notice that unlike the video these are regular
            // GL_TEXTURE_2D textures
            switch (currentStatus[currentTarget])
            {
                case READY:
                    glBindTexture(GL_TEXTURE_2D, textures[2]->mTextureID);
                    break;
                case REACHED_END:
                    glBindTexture(GL_TEXTURE_2D, textures[2]->mTextureID);
                    break;
                case PAUSED:
                    glBindTexture(GL_TEXTURE_2D, textures[2]->mTextureID);
                    break;
                case NOT_READY:
                    glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
                    break;
                case ERROR:
                    glBindTexture(GL_TEXTURE_2D, textures[4]->mTextureID);
                    break;
                default:
                    glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
                    break;
            }
            glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionButton.data[0] );
            glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);

            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(keyframeVertexHandle);
            glDisableVertexAttribArray(keyframeNormalHandle);
            glDisableVertexAttribArray(keyframeTexCoordHandle);

            glUseProgram(0);
              }
              else if(strcmp(imageTarget.getName(), "Dubai")){
                  QCAR::Matrix44F modelViewMatrix =
                        QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
                QCAR::Matrix44F modelViewProjection;
                SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &modelViewMatrix.data[0]);
                SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &modelViewMatrix.data[0]);
                SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &modelViewMatrix.data[0] ,
                                    &modelViewProjection.data[0]);

                 glUseProgram(shaderProgramID);

                 glVertexAttribPointer(
                         vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotVertices[0]);
                 glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotNormals[0]);
                 glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotTexCoords[0]);

        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                           (GLfloat*)&modelViewProjection.data[0] );
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);
        }
            // Finally we return the depth func to its original state
            glDepthFunc(GL_LESS);
            glDisable(GL_BLEND);
        }

        SampleUtils::checkGlError("VideoPlayback renderFrame");
    }

    glDisable(GL_DEPTH_TEST);

    QCAR::Renderer::getInstance().end();
}

JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_initRendering(
                                                    JNIEnv* env, jobject obj)
{
    LOG("Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_initRendering");

    // Define clear color
    glClearColor(0.0f, 0.0f, 0.0f, QCAR::requiresAlpha() ? 0.0f : 1.0f);

    // Now generate the OpenGL texture objects and add settings
    for (int i = 0; i < textureCount; ++i)
    {
        // Here we create the textures for the keyframe
        // and for all the icons
        glGenTextures(1, &(textures[i]->mTextureID));
        glBindTexture(GL_TEXTURE_2D, textures[i]->mTextureID);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures[i]->mWidth,
                textures[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures[i]->mData);
    }
    for (int i = 0; i < texture1_Count; ++i)
       {
           // Here we create the textures for the keyframe
           // and for all the icons
           glGenTextures(1, &(textures1[i]->mTextureID));
           glBindTexture(GL_TEXTURE_2D, textures1[i]->mTextureID);
           glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
           glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
           glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
           glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
           glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures1[i]->mWidth,
                   textures1[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                   (GLvoid*)  textures1[i]->mData);
       }
please guide me,thank you

video and 3d model

October 8, 2013 - 9:02pm #38

dear Al,

i've tried to mix VP and IT in one project, i have no error on ndk-build but when project is running and detect the marker, application is crash and close. and 3D object does not show. could you please help me ??

here  is my vpp code after mixing :

https://www.dropbox.com/s/k5dc32cz0hmzasx/VideoPlayback.cpp

 

thanks,

 

 

regards

video and 3d model

October 7, 2013 - 12:43am #37

@ Fatanku,

if you look at your code, you will see that you are using modelViewMatrix twice, in the first line it is declared as a Matrix44F, while in the third line you are using it as if it was an array of matrix (as you index it with [currenTarget] );

so, you need to define a matrix for the first case and a matrix array in the second case, using different name for the variables

this is really just about C++ coding (defining variables, local or global, and avoid name conflicts).

 

video and 3d model

October 6, 2013 - 11:57pm #36

Please help me to answer and soleve this problem.

video and 3d model

October 5, 2013 - 10:46pm #35

Dear AL, nd other admins
I try to mixing VP with IT in one project, 

when i integrate 

QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
        modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

I allways got this error, and dont know how to solve it .

Install        : libQCAR.so => libs/armeabi/libQCAR.so
Compile++ arm    : VideoPlayback <= VideoPlayback.cpp
jni/VideoPlayback.cpp: In function 'void Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv*, jobject)':
jni/VideoPlayback.cpp:555:64: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:557:60: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:559:59: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
/cygdrive/c/Development/Android/android-ndk-r8c/build/core/build-binary.mk:263: recipe for target `obj/local/armeabi/objs/VideoPlayback/VideoPlayback.o' failed

 

Please help me on this, Please
Thanks
regards

 

video and 3d model

September 22, 2013 - 7:01am #34

can you please post your project source code , so we can use it as a referance ? 

video and 3d model

July 31, 2013 - 3:52pm #33

May someone help me with this problem?

 

 

Install        : libQCAR.so => libs/armeabi/libQCAR.so
Compile++ arm    : VideoPlayback <= VideoPlayback.cpp
jni/VideoPlayback.cpp: In function 'void Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv*, jobject)':
jni/VideoPlayback.cpp:739:59: error: 'thisTexture' was not declared in this scope
jni/VideoPlayback.cpp:742:60: error: 'modelViewProjection' was not declared in this scope
make: *** [obj/local/armeabi/objs/VideoPlayback/VideoPlayback.o] Error 1
 
here is my code:
QCAR::Matrix44F modelViewProjection;
             modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
                     SampleUtils::translatePoseMatrix((0.0f), (0.0f), (0.0f),
                                                      &modelViewMatrix[currentTarget].data[0]);
                     SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                                  &modelViewMatrix[currentTarget].data[0]);
                     SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                                 &modelViewMatrix[currentTarget].data[0] ,
                                                 &modelViewProjection.data[0]);

                     glUseProgram(shaderProgramID);

                     glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                                  (const GLvoid*) &tazaOBJVerts[0]);
                            glVertexAttribPointer(normalHandle, 0, GL_FLOAT, GL_FALSE, 0,
                                                  (const GLvoid*) &tazaOBJNormals[0]);
                            glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                                  (const GLvoid*) &tazaOBJTexCoords[0]);

                     glEnableVertexAttribArray(vertexHandle);
                     glEnableVertexAttribArray(normalHandle);
                     glEnableVertexAttribArray(textureCoordHandle);

                     glActiveTexture(GL_TEXTURE0);
                             glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
                             glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
                             glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                                                (GLfloat*)&modelViewProjection.data[0] );
                             glDrawArrays(GL_TRIANGLES, 0, tazaOBJNumVerts);

 

 

video and 3d model

July 3, 2013 - 1:51am #32

which 1 is much better and easier, merging VP to IT (image target) rather than merging IT to VP ??

I would say that VP is way more complex than IT, so, if I were to do that, I would likely start from VP and merge IT into VP.

video and 3d model

July 2, 2013 - 1:24am #31

Hi, Al....I trying to implement the methode bellow..
an issue i face are :

1. point 2 adjust the _initRendering() function

2. point 5 setup the shader both for video and for teapot

3. point 6 merge the rendering code

4. could you give an example ?

5. which 1 is much better and easier, merging VP to IT (image target) rather than merging IT to VP ??

 

thanks

 

regard

 

AlessandroB wrote:

Hi, this should not be too difficult, these are some guidelines you can follow:

1.  start from the VideoPlayback sample and integrate the few missing elements from ImageTargets

2.  in VideoPlayback.java you can see a method called loadTextures(); this is the same as in ImageTargets.java, however in this case it loads 5 textures which include the videos icons, while in the ImageTargets.java the texture array contains the 3 textures for the teapot meshes; so you need to adjust this code (for instance you can have two different texture arrays, one for the video icons and one for the teapot meshes)

3.  in VideoPlayback.cpp you need to adjust the _initRendering() function, in particular to create two distinct texture arrays, one for the video icons and one for the teapot meshes (now you just have one texture array called "textures[]"); this should reflect the change done in Java that I mention in previous point;

4.  in VideoPlayback.cpp, you also need to include the "teapot.h" and "CubeShaders.h", (copy the files from the ImageTargets sample)

5.  in VideoPlayback.cpp, again in the initRendering(), you will need to setup the shaders both for video rendering and for the teapot (so you need to merge the shader initialization code from ImageTargets with the shader intialization code from VideoPlayback)

6.  again in VideoPlayback.cpp, you need to merge the rendering code of the teapot (from ImageTargets) with the one of the VideoPlayback sample; if you look at the for (...) loop that scans the active trackables, you can see that for each trackable the Pose is retrieved and used to setup a modelview matrix; so basically you need to define a modelViewMatrix specifically for the teapot so to avoid conflicting with the other modelview matrices and basically use the same code that you see in ImageTargets, for each trackable:

------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &teapotModelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &teapotModelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &teapotModelViewMatrix.data[0] ,
                                    &teapotModelViewProjection.data[0]);

        glUseProgram(shaderProgramID);
        
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotVertices[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotTexCoords[0]);
       
        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);
       
        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                           (GLfloat*)&teapotModelViewProjection.data[0] );
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);

-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

You may also want to consider adding the QCAR_onUpdate function (see ImageTargets.cpp) in case you want to be able to swap dataset.

It will be a bit of work, but that should be doable.

I hope this helps.

 

 

video and 3d model

April 8, 2013 - 6:13am #30

Hi, 

I can tell you that the first few lines are already wrong:

QCAR::Matrix44F modelViewMatrix =
                        QCAR::Tool::convertPose2GLMatrix(result->getPose());
        modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

 

as you can see, you define modelViewMatrix as a local matrix, but at the same time you have modelViewMatrix[currentTarget] in the second line, which seems to refer to the global variable modelViewMatrix[ ].

These are basic C++ programming issues, you should try to fix those issues by reviewing your code with a bit of patience.

video and 3d model

April 8, 2013 - 6:13am #29

Hi,

thanks for your suggestion.

May i know how you initialise variables below?

planeVertices
planeNormals
planeTexcoords
planeIndices'

video and 3d model

April 8, 2013 - 5:39am #28

 if (!strcmp(imageTarget.getName(), "stones")){

              QCAR::Matrix44F modelViewMatrix =
                        QCAR::Tool::convertPose2GLMatrix(result->getPose());
        modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        isTracking[currentTarget] = true;

        targetPositiveDimensions[currentTarget] = imageTarget.getSize();

        // The pose delivers the center of the target, thus the dimensions
        // go from -width/2 to width/2, same for height
        targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
        targetPositiveDimensions[currentTarget].data[1] /= 2.0f;

        // If the movie is ready to start playing or it has reached the end
        // of playback we render the keyframe
        if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
        {
            QCAR::Matrix44F modelViewMatrixKeyframe =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionKeyframe;
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
                                                &modelViewMatrixKeyframe.data[0]);

            // Here we use the aspect ratio of the keyframe since it
            // is likely that it is not a perfect square

            float ratio=1.0;
            if (textures[currentTarget]->mSuccess)
                ratio = keyframeQuadAspectRatio[currentTarget];
            else
                ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];

            SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
                                         targetPositiveDimensions[currentTarget].data[0]*ratio,
                                         targetPositiveDimensions[currentTarget].data[0],
                                         &modelViewMatrixKeyframe.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixKeyframe.data[0] ,
                                        &modelViewProjectionKeyframe.data[0]);

            glUseProgram(keyframeShaderID);

            // Prepare for rendering the keyframe
            glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);
            glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadTexCoords[0]);

            glEnableVertexAttribArray(keyframeVertexHandle);
            glEnableVertexAttribArray(keyframeNormalHandle);
            glEnableVertexAttribArray(keyframeTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // The first loaded texture from the assets folder is the keyframe
            glBindTexture(GL_TEXTURE_2D, textures[currentTarget]->mTextureID);
            glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionKeyframe.data[0] );
            glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);
        
            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(keyframeVertexHandle);
            glDisableVertexAttribArray(keyframeNormalHandle);
            glDisableVertexAttribArray(keyframeTexCoordHandle);

            glUseProgram(0);
        }
        }

        else  if (!strcmp(imageTarget.getName(), "chips")){
            int textureIndex;
            if (strcmp(imageTarget.getName(), "chips") == 0)
                    {
                        textureIndex = 0;
                    }
                    else if (strcmp(imageTarget.getName(), "stones") == 0)
                    {
                        textureIndex = 1;
                    }
                    else
                    {
                        textureIndex = 2;
                    }

            const Texture* const thisTexture = textures[textureIndex];
     #ifdef USE_OPENGL_ES_1_1
             // Load projection matrix:
             glMatrixMode(GL_PROJECTION);
             glLoadMatrixf(projectionMatrix.data);

             // Load model view matrix:
             glMatrixMode(GL_MODELVIEW);
             glLoadMatrixf(modelViewMatrix.data);
             glTranslatef(0.f, 0.f, kObjectScale);
             glScalef(kObjectScale, kObjectScale, kObjectScale);

             // Draw object:
             glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
             glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
             glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
             glNormalPointer(GL_FLOAT, 0,  (const GLvoid*) &teapotNormals[0]);
             glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                            (const GLvoid*) &teapotIndices[0]);
     #else

             QCAR::Matrix44F modelViewProjection;

             SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                              &modelViewMatrix.data[0]);
             SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                          &modelViewMatrix.data[0]);
             SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                         &modelViewMatrix.data[0] ,
                                         &modelViewProjection.data[0]);

             glUseProgram(shaderProgramID);

             glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                   (const GLvoid*) &teapotVertices[0]);
             glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                   (const GLvoid*) &teapotNormals[0]);
             glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                   (const GLvoid*) &teapotTexCoords[0]);

             glEnableVertexAttribArray(vertexHandle);
             glEnableVertexAttribArray(normalHandle);
             glEnableVertexAttribArray(textureCoordHandle);

             glActiveTexture(GL_TEXTURE0);
             glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
             glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
             glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                                (GLfloat*)&modelViewProjection.data[0] );
             glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                            (const GLvoid*) &teapotIndices[0]);

             SampleUtils::checkGlError("ImageTargets renderFrame");
     #endif

             }

        else // In any other case, such as playing or paused, we render the actual contents
        {
            QCAR::Matrix44F modelViewMatrixVideo =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionVideo;
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
                                             &modelViewMatrixVideo.data[0]);

            // Here we use the aspect ratio of the video frame
            SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
                                         targetPositiveDimensions[currentTarget].data[0]*videoQuadAspectRatio[currentTarget],
                                         targetPositiveDimensions[currentTarget].data[0],
                                         &modelViewMatrixVideo.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixVideo.data[0] ,
                                        &modelViewProjectionVideo.data[0]);

            glUseProgram(videoPlaybackShaderID);

            // Prepare for rendering the keyframe
            glVertexAttribPointer(videoPlaybackVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(videoPlaybackNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);

            if (strcmp(imageTarget.getName(), "stones") == 0)
                glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &videoQuadTextureCoordsTransformedStones[0]);
            else
                glVertexAttribPointer(videoPlaybackTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &videoQuadTextureCoordsTransformedChips[0]);

            glEnableVertexAttribArray(videoPlaybackVertexHandle);
            glEnableVertexAttribArray(videoPlaybackNormalHandle);
            glEnableVertexAttribArray(videoPlaybackTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // IMPORTANT:
            // Notice here that the texture that we are binding is not the
            // typical GL_TEXTURE_2D but instead the GL_TEXTURE_EXTERNAL_OES
            glBindTexture(GL_TEXTURE_EXTERNAL_OES, videoPlaybackTextureID[currentTarget]);
            glUniformMatrix4fv(videoPlaybackMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionVideo.data[0]);
            glUniform1i(videoPlaybackTexSamplerOESHandle, 0 /*GL_TEXTURE0*/);

            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(videoPlaybackVertexHandle);
            glDisableVertexAttribArray(videoPlaybackNormalHandle);
            glDisableVertexAttribArray(videoPlaybackTexCoordHandle);

            glUseProgram(0);

        }

        // The following section renders the icons. The actual textures used
        // are loaded from the assets folder

        if ((currentStatus[currentTarget] == READY)  || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == PAUSED) || (currentStatus[currentTarget] == NOT_READY)   ||
            (currentStatus[currentTarget] == ERROR))
        {
            // If the movie is ready to be played, pause, has reached end or is not
            // ready then we display one of the icons
            const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
            QCAR::Matrix44F modelViewMatrixButton =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionButton;

            glDepthFunc(GL_LEQUAL);

            glEnable(GL_BLEND);
            glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);

            // The inacuracy of the rendering process in some devices means that
            // even if we use the "Less or Equal" version of the depth function
            // it is likely that we will get ugly artifacts
            // That is the translation in the Z direction is slightly different
            // Another posibility would be to use a depth func "ALWAYS" but
            // that is typically not a good idea
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[1]/1.98f,
                                             &modelViewMatrixButton.data[0]);
            SampleUtils::scalePoseMatrix((targetPositiveDimensions[currentTarget].data[1]/2.0f),
                                         (targetPositiveDimensions[currentTarget].data[1]/2.0f),
                                         (targetPositiveDimensions[currentTarget].data[1]/2.0f),
                                         &modelViewMatrixButton.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixButton.data[0] ,
                                        &modelViewProjectionButton.data[0]);

            glUseProgram(keyframeShaderID);

            glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);
            glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadTexCoords[0]);

            glEnableVertexAttribArray(keyframeVertexHandle);
            glEnableVertexAttribArray(keyframeNormalHandle);
            glEnableVertexAttribArray(keyframeTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // Depending on the status in which we are we choose the appropriate
            // texture to display. Notice that unlike the video these are regular
            // GL_TEXTURE_2D textures
            switch (currentStatus[currentTarget])
            {
                case READY:
                    glBindTexture(GL_TEXTURE_2D, textures[2]->mTextureID);
                    break;
                case REACHED_END:
                    glBindTexture(GL_TEXTURE_2D, textures[2]->mTextureID);
                    break;
                case PAUSED:
                    glBindTexture(GL_TEXTURE_2D, textures[2]->mTextureID);
                    break;
                case NOT_READY:
                    glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
                    break;
                case ERROR:
                    glBindTexture(GL_TEXTURE_2D, textures[4]->mTextureID);
                    break;
                default:
                    glBindTexture(GL_TEXTURE_2D, textures[3]->mTextureID);
                    break;
            }
            glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionButton.data[0] );
            glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);

            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(keyframeVertexHandle);
            glDisableVertexAttribArray(keyframeNormalHandle);
            glDisableVertexAttribArray(keyframeTexCoordHandle);

            glUseProgram(0);

            // Finally we return the depth func to its original state
            glDepthFunc(GL_LESS);
            glDisable(GL_BLEND);
        }

        SampleUtils::checkGlError("VideoPlayback renderFrame");
    }

    glDisable(GL_DEPTH_TEST);

    QCAR::Renderer::getInstance().end();
}
 

Above is my code.

video and 3d model

April 8, 2013 - 3:59am #27

Thanks for the code; the error log seems to sugegst that you have not defined the modelViewMatrix array... could you check ?

 

video and 3d model

April 8, 2013 - 3:36am #26
I am using it for displaying image instead of 3d modeler , it may help you 


else if (strcmp(imageTarget.getName(), "media2") == 0 || strcmp(imageTarget.getName(), "media3") == 0 )
        {
        	
        	int textureIndex;
        	if(strcmp(imageTarget.getName(), "media2") == 0)
        	{
        		currentTarget=media2;
        		  textureIndex = 0;
        	}
        	else if(strcmp(imageTarget.getName(), "media3") == 0)
        	{
        		currentTarget=media3;
        		  textureIndex = 1;
        	}
        	
        	const Texture* const thisTexture = textures[textureIndex];
        	

        	modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        	// assuming this is an image target
			QCAR::Vec2F targetSize = imageTarget.getSize();
		; 	
			QCAR::Matrix44F modelViewProjection;
			SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
			                                 &modelViewMatrix[currentTarget].data[0]);

			SampleUtils::scalePoseMatrix(targetSize.data[0], targetSize.data[1], 1.0f,
			                             &modelViewMatrix[currentTarget].data[0]);

			SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
			                            &modelViewMatrix[currentTarget].data[0] ,
			                            &modelViewProjection.data[0]);
		
			glUseProgram(shaderProgramID);
			glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
			                      (const GLvoid*) &planeVertices[0]);
			glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
			                      (const GLvoid*) &planeNormals[0]);
			glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
			                      (const GLvoid*) &planeTexcoords[0]);
			glEnableVertexAttribArray(vertexHandle);
			glEnableVertexAttribArray(normalHandle);
			glEnableVertexAttribArray(textureCoordHandle);
	
			glActiveTexture(GL_TEXTURE0);
			glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
			glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
			                   (GLfloat*)&modelViewProjection.data[0] );
			glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT,
			               (const GLvoid*) &planeIndices[0]); 
	             

        }
    }

 

video and 3d model

April 8, 2013 - 1:32am #25

Now i have the error as below.

Install        : libQCAR.so => libs/armeabi/libQCAR.so
Compile++ arm    : VideoPlayback <= VideoPlayback.cpp
jni/VideoPlayback.cpp: In function 'void Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv*, jobject)':
jni/VideoPlayback.cpp:555:64: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:557:60: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:559:59: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
/cygdrive/c/Development/Android/android-ndk-r8c/build/core/build-binary.mk:263: recipe for target `obj/local/armeabi/objs/VideoPlayback/VideoPlayback.o' failed
 

How should i resolve this?

video and 3d model

April 8, 2013 - 12:14am #24

Hi, when you see an error like

'kObjectScale' was not declared in this scope

that means that you have not declared the kObjectScale variable; similarly for the other errors that you are seeing.

For the teapotVertices error, you forgot to include the "teapot.h".

 

video and 3d model

April 7, 2013 - 11:51pm #23

Hi, i try to merge the rendering code of the teapot (from ImageTargets) with the videopalyback.cpp.

I encounter some error which are:

jni/VideoPlayback.cpp: In function 'void Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv*, jobject)':
jni/VideoPlayback.cpp:551:54: error: 'kObjectScale' was not declared in this scope
jni/VideoPlayback.cpp:552:59: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:554:55: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:556:54: error: request for member 'data' in 'modelViewMatrix', which is of non-class type 'QCAR::Matrix44F [2]'
jni/VideoPlayback.cpp:562:48: error: 'teapotVertices' was not declared in this scope
jni/VideoPlayback.cpp:564:48: error: 'teapotNormals' was not declared in this scope
jni/VideoPlayback.cpp:566:48: error: 'teapotTexCoords' was not declared in this scope
jni/VideoPlayback.cpp:573:38: error: 'thisTexture' was not declared in this scope
jni/VideoPlayback.cpp:577:38: error: 'NUM_TEAPOT_OBJECT_INDEX' was not declared in this scope
jni/VideoPlayback.cpp:578:41: error: 'teapotIndices' was not declared in this scope
 

i got this error after i modify the code in videoPlayback.cpp which is:

      if (!strcmp(imageTarget.getName(), "stones")){
        modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        isTracking[currentTarget] = true;

        targetPositiveDimensions[currentTarget] = imageTarget.getSize();

        // The pose delivers the center of the target, thus the dimensions
        // go from -width/2 to width/2, same for height
        targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
        targetPositiveDimensions[currentTarget].data[1] /= 2.0f;

        // If the movie is ready to start playing or it has reached the end
        // of playback we render the keyframe
        if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
        {
            QCAR::Matrix44F modelViewMatrixKeyframe =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionKeyframe;
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
                                                &modelViewMatrixKeyframe.data[0]);

            // Here we use the aspect ratio of the keyframe since it
            // is likely that it is not a perfect square

            float ratio=1.0;
            if (textures[currentTarget]->mSuccess)
                ratio = keyframeQuadAspectRatio[currentTarget];
            else
                ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];

            SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
                                         targetPositiveDimensions[currentTarget].data[0]*ratio,
                                         targetPositiveDimensions[currentTarget].data[0],
                                         &modelViewMatrixKeyframe.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixKeyframe.data[0] ,
                                        &modelViewProjectionKeyframe.data[0]);

            glUseProgram(keyframeShaderID);

            // Prepare for rendering the keyframe
            glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);
            glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadTexCoords[0]);

            glEnableVertexAttribArray(keyframeVertexHandle);
            glEnableVertexAttribArray(keyframeNormalHandle);
            glEnableVertexAttribArray(keyframeTexCoordHandle);

            glActiveTexture(GL_TEXTURE0);

            // The first loaded texture from the assets folder is the keyframe
            glBindTexture(GL_TEXTURE_2D, textures[currentTarget]->mTextureID);
            glUniformMatrix4fv(keyframeMVPMatrixHandle, 1, GL_FALSE,
                               (GLfloat*)&modelViewProjectionKeyframe.data[0] );
            glUniform1i(keyframeTexSampler2DHandle, 0 /*GL_TEXTURE0*/);
        
            // Render
            glDrawElements(GL_TRIANGLES, NUM_QUAD_INDEX, GL_UNSIGNED_SHORT,
                           (const GLvoid*) &quadIndices[0]);

            glDisableVertexAttribArray(keyframeVertexHandle);
            glDisableVertexAttribArray(keyframeNormalHandle);
            glDisableVertexAttribArray(keyframeTexCoordHandle);

            glUseProgram(0);
        }
        }
        else  if (!strcmp(imageTarget.getName(), "chips")){

#ifdef USE_OPENGL_ES_1_1
        // Load projection matrix:
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix.data);

        // Load model view matrix:
        glMatrixMode(GL_MODELVIEW);
        glLoadMatrixf(modelViewMatrix.data);
        glTranslatef(0.f, 0.f, kObjectScale);
        glScalef(kObjectScale, kObjectScale, kObjectScale);

        // Draw object:
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
        glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
        glNormalPointer(GL_FLOAT, 0,  (const GLvoid*) &teapotNormals[0]);
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);
#else

        QCAR::Matrix44F modelViewProjection;

        SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &modelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &modelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &modelViewMatrix.data[0] ,
                                    &modelViewProjection.data[0]);

        glUseProgram(shaderProgramID);

        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotVertices[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotTexCoords[0]);

        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                           (GLfloat*)&modelViewProjection.data[0] );
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);

        SampleUtils::checkGlError("ImageTargets renderFrame");
#endif

        }

 

How can o resolve this error?

video and 3d model

March 27, 2013 - 11:14am #22

Hi, thanks for the refereence to the threads; first thing, in rassal's steps, the first step mentions VideoPlayback.cpp but the code is in Java, so it should go into VideoPlayback.java

The other steps are instead in CPP, that's fine.

What you need to do is to integrate one little bit of code at a time and recompile, so to see at which point the build errors appear; 

then you can narrow down the problem; if you try to integrate the whole code at once and then rebuild, you won't be able to tell where is the problem.

video and 3d model

March 27, 2013 - 10:08am #21

Below are the threads.

 

rassall_jubair wrote:

Hi,

I am also trying to merge imagetarger over videoplayback app so that my application can display 3D model and same time play video.

For this i have done the following changes according to AlessandroB guidline. Please can anybody check that I did anything wrong here?

1. In videoplayback.cpp :

    add another texture
    
    mTextures1 = new Vector<Texture>();

and added following images in

private void loadTextures()

        {  mTextures.add(Texture.loadTextureFromApk("TextureTeapotBrass.png",
                                                 getAssets()));
              mTextures.add(Texture.loadTextureFromApk("TextureTeapotBlue.png",
                                                 getAssets()));
               mTextures.add(Texture.loadTextureFromApk("TextureTeapotRed.png",
                getAssets()));

}

2. In VideoPlayback.cpp you need to adjust the _initRendering() function.....:

    Texture** textures1 =0;

    for (int i = 0; i < textureCount; ++i)
    {
        // Here we create the textures for the keyframe
        // and for all the icons
        glGenTextures(1, &(textures[i]->mTextureID));
      glGenTextures(1, &(textures1[i]->mTextureID));
        glBindTexture(GL_TEXTURE_2D, textures[i]->mTextureID);
      glBindTexture(GL_TEXTURE_2D, textures1[i]->mTextureID);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures[i]->mWidth,
                textures[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures[i]->mData);
      glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures1[i]->mWidth,
                textures1[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures1[i]->mData);

    }

4. In VideoPlayback.cpp, you also need to include the "teapot.h" and "CubeShaders.h".....:
        
        #include "Texture.h"
        #include "CubeShaders.h"

5. In VideoPlayback.cpp, again in the initRendering(), you will need to setup the shaders both
:

    keyframeShaderID                = SampleUtils::createProgramFromBuffer(
                                                keyframeVertexShader,
                                                keyframeFragmentShader);
    keyframeVertexHandle            = glGetAttribLocation(keyframeShaderID,
                                                "vertexPosition");
    keyframeNormalHandle            = glGetAttribLocation(keyframeShaderID,
                                                "vertexNormal");
    keyframeTexCoordHandle          = glGetAttribLocation(keyframeShaderID,
                                                "vertexTexCoord");
    keyframeMVPMatrixHandle         = glGetUniformLocation(keyframeShaderID,
                                                "modelViewProjectionMatrix");
    keyframeTexSampler2DHandle      = glGetUniformLocation(keyframeShaderID,
                                                "texSampler2D");

    keyframeQuadAspectRatio[STONES] = (float)textures[0]->mHeight / (float)textures[0]->mWidth;
    keyframeQuadAspectRatio[CHIPS]  = (float)textures[1]->mHeight / (float)textures[1]->mWidth;

    shaderProgramID     = SampleUtils::createProgramFromBuffer(cubeMeshVertexShader,
                                                            cubeFragmentShader);

    vertexHandle        = glGetAttribLocation(shaderProgramID,
                                                "vertexPosition");
    normalHandle        = glGetAttribLocation(shaderProgramID,
                                                "vertexNormal");
    textureCoordHandle  = glGetAttribLocation(shaderProgramID,
                                                "vertexTexCoord");
    mvpMatrixHandle     = glGetUniformLocation(shaderProgramID,
                                                "modelViewProjectionMatrix");
    texSampler2DHandle  = glGetUniformLocation(shaderProgramID,
                                                "texSampler2D");

6. again in VideoPlayback.cpp, you need to merge the rendering code of the teapot (from ImageTargets) with the:

if (!strcmp(trackable->getName(), "stones")){

modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        isTracking[currentTarget] = true;

        targetPositiveDimensions[currentTarget] = imageTarget.getSize();

        // The pose delivers the center of the target, thus the dimensions
        // go from -width/2 to width/2, same for height
        targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
        targetPositiveDimensions[currentTarget].data[1] /= 2.0f;

        // If the movie is ready to start playing or it has reached the end
        // of playback we render the keyframe
        if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
        {
.....................
..............}

else  if (!strcmp(trackable->getName(), "chips")){

QCAR::Matrix44F modelViewProjection;

        SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &modelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &modelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &modelViewMatrix.data[0] ,
                                    &modelViewProjection.data[0]);

        glUseProgram(shaderProgramID);
         
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &macbookVerts[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &macbookNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,

..................
..................

}
 

Thanks

Regards

 

Rassall

 

AlessandroB wrote:

Hi, this should not be too difficult, these are some guidelines you can follow:

- start from the VideoPlayback sample and integrate the few missing elements from ImageTargets

- in VideoPlayback.java you can see a method called loadTextures(); this is the same as in ImageTargets.java, however in this case it loads 5 textures which include the videos icons, while in the ImageTargets.java the texture array contains the 3 textures for the teapot meshes; so you need to adjust this code (for instance you can have two different texture arrays, one for the video icons and one for the teapot meshes)

- in VideoPlayback.cpp you need to adjust the _initRendering() function, in particular to create two distinct texture arrays, one for the video icons and one for the teapot meshes (now you just have one texture array called "textures[]"); this should reflect the change done in Java that I mention in previous point;

- in VideoPlayback.cpp, you also need to include the "teapot.h" and "CubeShaders.h", (copy the files from the ImageTargets sample)

- in VideoPlayback.cpp, again in the initRendering(), you will need to setup the shaders both for video rendering and for the teapot (so you need to merge the shader initialization code from ImageTargets with the shader intialization code from VideoPlayback)

- again in VideoPlayback.cpp, you need to merge the rendering code of the teapot (from ImageTargets) with the one of the VideoPlayback sample; if you look at the for (...) loop that scans the active trackables, you can see that for each trackable the Pose is retrieved and used to setup a modelview matrix; so basically you need to define a modelViewMatrix specifically for the teapot so to avoid conflicting with the other modelview matrices and basically use the same code that you see in ImageTargets, for each trackable:

SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &teapotModelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &teapotModelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &teapotModelViewMatrix.data[0] ,
                                    &teapotModelViewProjection.data[0]);

        glUseProgram(shaderProgramID);
        
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotVertices[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotTexCoords[0]);
       
        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);
       
        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                           (GLfloat*)&teapotModelViewProjection.data[0] );
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);

 You may also want to consider adding the QCAR_onUpdate function (see ImageTargets.cpp) in case you want to be able to swap dataset.

It will be a bit of work, but that should be doable.

I hope this helps.

 

 

video and 3d model

March 27, 2013 - 10:02am #20

Thanks.

The makefile looks OK, so it did not change.

You were referring to some steps posted by rassall_jubair. 

can you poiint me to the thread were these steps are described ?

 

video and 3d model

March 27, 2013 - 9:17am #19

ok.. this is my Android.mk file.

#==============================================================================
#            Copyright (c) 2012-2013 QUALCOMM Austria Research Center GmbH.
#            All Rights Reserved.
#            Qualcomm Confidential and Proprietary
#
# This  Vuforia(TM) sample application in source code form ("Sample Code") for the
# Vuforia Software Development Kit and/or Vuforia Extension for Unity
# (collectively, the "Vuforia SDK") may in all cases only be used in conjunction
# with use of the Vuforia SDK, and is subject in all respects to all of the terms
# and conditions of the Vuforia SDK License Agreement, which may be found at
# https://developer.vuforia.com/legal/license.
#
# By retaining or using the Sample Code in any manner, you confirm your agreement
# to all the terms and conditions of the Vuforia SDK License Agreement.  If you do
# not agree to all the terms and conditions of the Vuforia SDK License Agreement,
# then you may not retain or use any of the Sample Code in any manner.
#
#==============================================================================

# An Android.mk file must begin with the definition of the LOCAL_PATH
# variable. It is used to locate source files in the development tree. Here
# the macro function 'my-dir' (provided by the build system) is used to return
# the path of the current directory.

LOCAL_PATH := $(call my-dir)

# The following section is used for copying the libQCAR.so prebuilt library
# into the appropriate folder (libs/armeabi and libs/armeabi-v7a respectively)
# and setting the include path for library-specific header files.

include $(CLEAR_VARS)
LOCAL_MODULE := QCAR-prebuilt
LOCAL_SRC_FILES = ../../../build/lib/$(TARGET_ARCH_ABI)/libQCAR.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/../../../build/include
include $(PREBUILT_SHARED_LIBRARY)

#-----------------------------------------------------------------------------

# The CLEAR_VARS variable is provided by the build system and points to a
# special GNU Makefile that will clear many LOCAL_XXX variables for you
# (e.g. LOCAL_MODULE, LOCAL_SRC_FILES, LOCAL_STATIC_LIBRARIES, etc...),
# with the exception of LOCAL_PATH. This is needed because all build
# control files are parsed in a single GNU Make execution context where
# all variables are global.

include $(CLEAR_VARS)

# The LOCAL_MODULE variable must be defined to identify each module you
# describe in your Android.mk. The name must be *unique* and not contain
# any spaces. Note that the build system will automatically add proper
# prefix and suffix to the corresponding generated file. In other words,
# a shared library module named 'foo' will generate 'libfoo.so'.

LOCAL_MODULE := VideoPlayback

# This sample always uses OpenGL ES 2.0.

OPENGLES_LIB  := -lGLESv2
OPENGLES_DEF  := -DUSE_OPENGL_ES_2_0

# An optional set of compiler flags that will be passed when building
# C and C++ source files.
#
# The flag "-Wno-write-strings" removes warnings about deprecated conversion
#   from string constant to 'char*'.
# The flag "-Wno-psabi" removes warning about "mangling of 'va_list' has
#   changed in GCC 4.4" when compiled with certain Android NDK versions.

LOCAL_CFLAGS := -Wno-write-strings -Wno-psabi $(OPENGLES_DEF)

# The list of additional linker flags to be used when building your
# module. Use the "-l" prefix in front of the name of libraries you want to
# link to your module.

LOCAL_LDLIBS := \
    -llog $(OPENGLES_LIB)

# The list of shared libraries this module depends on at runtime.
# This information is used at link time to embed the corresponding information
# in the generated file. Here we reference the prebuilt library defined earlier
# in this makefile.

LOCAL_SHARED_LIBRARIES := QCAR-prebuilt

# The LOCAL_SRC_FILES variables must contain a list of C/C++ source files
# that will be built and assembled into a module. Note that you should not
# list header file and included files here because the build system will
# compute dependencies automatically for you, just list the source files
# that will be passed directly to a compiler.

LOCAL_SRC_FILES := VideoPlayback.cpp SampleUtils.cpp Texture.cpp SampleMath.cpp

# By default, ARM target binaries will be generated in 'thumb' mode, where
# each instruction is 16-bit wide. You can set this variable to 'arm' to
# set the generation of the module's object files to 'arm' (32-bit
# instructions) mode, resulting in potentially faster yet somewhat larger
# binary code.

LOCAL_ARM_MODE := arm

# BUILD_SHARED_LIBRARY is a variable provided by the build system that
# points to a GNU Makefile script being in charge of collecting all the
# information you have defined in LOCAL_XXX variables since the latest
# 'include $(CLEAR_VARS)' statement, determining what and how to build.
# Replace it with the statement BUILD_STATIC_LIBRARY to generate a static
# library instead.

include $(BUILD_SHARED_LIBRARY)
 

video and 3d model

March 27, 2013 - 8:36am #18

It looks like you have changed the Android.mk in a way which breaks the build; can you share the changes to the Android.mk that you did ?

 

video and 3d model

March 27, 2013 - 7:23am #17

ya, at first it is building ok. i follow the steps post by rassall_jubair. I noticed that everytime if i compile the project second time, this error will come out but i do not know why.

video and 3d model

March 27, 2013 - 4:30am #16

Have you started from one of the two samples ? was it building OK at first ?

if yes, then you are maybe integrating in the wrong way;

all you need to do in geenral is to take some code from the second sample and port that code into the existing sample code from where you started;

when you do that, you need to pay attention to rename the jNI C++ functions so to reflect the package name of the corresponsing java classed into which you are porting;

also, if you port an entire file (for instance ImageTargets.cpp) from one project to another, you need to add that file to the Android.mk file.

If you do it incrementally, step by step, you should be able to compile without problems.

 

video and 3d model

March 27, 2013 - 4:16am #15

Oh, because i am working on this which is combining imagetarget and videoplayback. When i want to compile with cygwin, i encounter this problem.

video and 3d model

March 27, 2013 - 4:13am #14

Hi feilechuu, can you create a new thread on this issue ?

I don't see how it fits with the current discussion topic.

Thank you.

video and 3d model

March 27, 2013 - 3:51am #13

Hi, i meet some problems in compiling with cygwin.

 

Android NDK: ERROR:jni/Android.mk:QCAR-prebuilt: LOCAL_SRC_FILES points to a missing file
Android NDK: Check that jni/../../../build/lib/armeabi/libQCAR.so exists  or that its path is correct
/cygdrive/c/Development/Android/android-ndk-r8c/build/core/prebuilt-library.mk:43: *** Android NDK: Aborting    .  Stop.
 

what is the error about?

video and 3d model

March 6, 2013 - 10:51am #12

Great ! Glad to be helpful.

video and 3d model

March 5, 2013 - 5:18pm #11

Thanks for the help. I  done this. Now 3D model and Video work in same applications.

 

Regards

Rassall

video and 3d model

March 4, 2013 - 5:20am #10

Hi, in your OpenGL code I can see a clear mistake in this code snippet:

Texture** textures1 =0;

    for (int i = 0; i < textureCount; ++i)
    {
        // Here we create the textures for the keyframe
        // and for all the icons
        glGenTextures(1, &(textures[i]->mTextureID));
      glGenTextures(1, &(textures1[i]->mTextureID));
        glBindTexture(GL_TEXTURE_2D, textures[i]->mTextureID);
      glBindTexture(GL_TEXTURE_2D, textures1[i]->mTextureID);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures[i]->mWidth,
                textures[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures[i]->mData);
      glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures1[i]->mWidth,
                textures1[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures1[i]->mData);
    }

 

The problem there is in the fact that you are mixing the texture binding of "textures" with the texture binding of the "textures1";(for instance, you can see that you do glBindTexture(... textures[i]->mTextureID) and in the line immediatly after, you call glBindTexture(....  textures1[i]->mTextureID);

this is incorrect because OpenGL does not work in such a way; when you bind a texture, every susequent OpenGL call will refer to the currently bound texture;

what you need to do instead is to write two distinct "for" loops, i.e. one loop like this:

 

for (int i = 0; i < textureCount; ++i)
{ 

and then anther loop just for "texture1":

 

for (int i = 0; i < texture1_Count; ++i)
{

Note: there might be other errors at OpenGL level, but playing with OpenGL code typically requires some mastery of the OpenGL API, otherwise it can be really tricky.

 

video and 3d model

March 4, 2013 - 4:53am #9

Hi,

I am also trying to merge imagetarger over videoplayback app so that my application can display 3D model and same time play video.

For this i have done the following changes according to AlessandroB guidline. Please can anybody check that I did anything wrong here?

1. In videoplayback.cpp :

    add another texture
    
    mTextures1 = new Vector<Texture>();

and added following images in

private void loadTextures()

        {  mTextures.add(Texture.loadTextureFromApk("TextureTeapotBrass.png",
                                                 getAssets()));
              mTextures.add(Texture.loadTextureFromApk("TextureTeapotBlue.png",
                                                 getAssets()));
               mTextures.add(Texture.loadTextureFromApk("TextureTeapotRed.png",
                getAssets()));

}

2. In VideoPlayback.cpp you need to adjust the _initRendering() function.....:

    Texture** textures1 =0;

    for (int i = 0; i < textureCount; ++i)
    {
        // Here we create the textures for the keyframe
        // and for all the icons
        glGenTextures(1, &(textures[i]->mTextureID));
      glGenTextures(1, &(textures1[i]->mTextureID));
        glBindTexture(GL_TEXTURE_2D, textures[i]->mTextureID);
      glBindTexture(GL_TEXTURE_2D, textures1[i]->mTextureID);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures[i]->mWidth,
                textures[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures[i]->mData);
      glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textures1[i]->mWidth,
                textures1[i]->mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                (GLvoid*)  textures1[i]->mData);

    }

4. In VideoPlayback.cpp, you also need to include the "teapot.h" and "CubeShaders.h".....:
        
        #include "Texture.h"
        #include "CubeShaders.h"

5. In VideoPlayback.cpp, again in the initRendering(), you will need to setup the shaders both
:

    keyframeShaderID                = SampleUtils::createProgramFromBuffer(
                                                keyframeVertexShader,
                                                keyframeFragmentShader);
    keyframeVertexHandle            = glGetAttribLocation(keyframeShaderID,
                                                "vertexPosition");
    keyframeNormalHandle            = glGetAttribLocation(keyframeShaderID,
                                                "vertexNormal");
    keyframeTexCoordHandle          = glGetAttribLocation(keyframeShaderID,
                                                "vertexTexCoord");
    keyframeMVPMatrixHandle         = glGetUniformLocation(keyframeShaderID,
                                                "modelViewProjectionMatrix");
    keyframeTexSampler2DHandle      = glGetUniformLocation(keyframeShaderID,
                                                "texSampler2D");

    keyframeQuadAspectRatio[STONES] = (float)textures[0]->mHeight / (float)textures[0]->mWidth;
    keyframeQuadAspectRatio[CHIPS]  = (float)textures[1]->mHeight / (float)textures[1]->mWidth;

    shaderProgramID     = SampleUtils::createProgramFromBuffer(cubeMeshVertexShader,
                                                            cubeFragmentShader);

    vertexHandle        = glGetAttribLocation(shaderProgramID,
                                                "vertexPosition");
    normalHandle        = glGetAttribLocation(shaderProgramID,
                                                "vertexNormal");
    textureCoordHandle  = glGetAttribLocation(shaderProgramID,
                                                "vertexTexCoord");
    mvpMatrixHandle     = glGetUniformLocation(shaderProgramID,
                                                "modelViewProjectionMatrix");
    texSampler2DHandle  = glGetUniformLocation(shaderProgramID,
                                                "texSampler2D");

6. again in VideoPlayback.cpp, you need to merge the rendering code of the teapot (from ImageTargets) with the:

if (!strcmp(trackable->getName(), "stones")){

modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        isTracking[currentTarget] = true;

        targetPositiveDimensions[currentTarget] = imageTarget.getSize();

        // The pose delivers the center of the target, thus the dimensions
        // go from -width/2 to width/2, same for height
        targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
        targetPositiveDimensions[currentTarget].data[1] /= 2.0f;

        // If the movie is ready to start playing or it has reached the end
        // of playback we render the keyframe
        if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
            (currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
        {
.....................
..............}

else  if (!strcmp(trackable->getName(), "chips")){

QCAR::Matrix44F modelViewProjection;

        SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &modelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &modelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &modelViewMatrix.data[0] ,
                                    &modelViewProjection.data[0]);

        glUseProgram(shaderProgramID);
         
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &macbookVerts[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &macbookNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,

..................
..................

}
 

Thanks

Regards

 

Rassall

 

AlessandroB wrote:

Hi, this should not be too difficult, these are some guidelines you can follow:

- start from the VideoPlayback sample and integrate the few missing elements from ImageTargets

- in VideoPlayback.java you can see a method called loadTextures(); this is the same as in ImageTargets.java, however in this case it loads 5 textures which include the videos icons, while in the ImageTargets.java the texture array contains the 3 textures for the teapot meshes; so you need to adjust this code (for instance you can have two different texture arrays, one for the video icons and one for the teapot meshes)

- in VideoPlayback.cpp you need to adjust the _initRendering() function, in particular to create two distinct texture arrays, one for the video icons and one for the teapot meshes (now you just have one texture array called "textures[]"); this should reflect the change done in Java that I mention in previous point;

- in VideoPlayback.cpp, you also need to include the "teapot.h" and "CubeShaders.h", (copy the files from the ImageTargets sample)

- in VideoPlayback.cpp, again in the initRendering(), you will need to setup the shaders both for video rendering and for the teapot (so you need to merge the shader initialization code from ImageTargets with the shader intialization code from VideoPlayback)

- again in VideoPlayback.cpp, you need to merge the rendering code of the teapot (from ImageTargets) with the one of the VideoPlayback sample; if you look at the for (...) loop that scans the active trackables, you can see that for each trackable the Pose is retrieved and used to setup a modelview matrix; so basically you need to define a modelViewMatrix specifically for the teapot so to avoid conflicting with the other modelview matrices and basically use the same code that you see in ImageTargets, for each trackable:

SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &teapotModelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &teapotModelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &teapotModelViewMatrix.data[0] ,
                                    &teapotModelViewProjection.data[0]);

        glUseProgram(shaderProgramID);
        
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotVertices[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotTexCoords[0]);
       
        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);
       
        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                           (GLfloat*)&teapotModelViewProjection.data[0] );
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);

 You may also want to consider adding the QCAR_onUpdate function (see ImageTargets.cpp) in case you want to be able to swap dataset.

It will be a bit of work, but that should be doable.

I hope this helps.

 

 

video and 3d model

December 10, 2012 - 6:14am #8

Glad to hear that!

video and 3d model

December 10, 2012 - 6:13am #7

I managed to get it working.

I had not initialized the shading correct

video and 3d model

December 10, 2012 - 5:14am #6

Hi Alesandro.

I have a problem getting this to work.
How do I create texSampler2DHandle?
I am getting error: texSampler2DHandle was not declared in this scope 

glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/); 

video and 3d model

December 10, 2012 - 4:49am #5

Hi realbruz.

 

I cannot get it working.

Can you post your renderFrame code from VideoPlayback.cpp showing the 3d model?
I am getting a gl error 502 if that helps anything (INVALID_OPERATION) 

video and 3d model

December 6, 2012 - 6:59am #4

You're welcome, and glad to hear that you made it !

video and 3d model

December 6, 2012 - 5:49am #3

thank you very much,

it WORKS!!!!!

video and 3d model

December 5, 2012 - 9:33am #2

Hi, this should not be too difficult, these are some guidelines you can follow:

- start from the VideoPlayback sample and integrate the few missing elements from ImageTargets

- in VideoPlayback.java you can see a method called loadTextures(); this is the same as in ImageTargets.java, however in this case it loads 5 textures which include the videos icons, while in the ImageTargets.java the texture array contains the 3 textures for the teapot meshes; so you need to adjust this code (for instance you can have two different texture arrays, one for the video icons and one for the teapot meshes)

- in VideoPlayback.cpp you need to adjust the _initRendering() function, in particular to create two distinct texture arrays, one for the video icons and one for the teapot meshes (now you just have one texture array called "textures[]"); this should reflect the change done in Java that I mention in previous point;

- in VideoPlayback.cpp, you also need to include the "teapot.h" and "CubeShaders.h", (copy the files from the ImageTargets sample)

- in VideoPlayback.cpp, again in the initRendering(), you will need to setup the shaders both for video rendering and for the teapot (so you need to merge the shader initialization code from ImageTargets with the shader intialization code from VideoPlayback)

- again in VideoPlayback.cpp, you need to merge the rendering code of the teapot (from ImageTargets) with the one of the VideoPlayback sample; if you look at the for (...) loop that scans the active trackables, you can see that for each trackable the Pose is retrieved and used to setup a modelview matrix; so basically you need to define a modelViewMatrix specifically for the teapot so to avoid conflicting with the other modelview matrices and basically use the same code that you see in ImageTargets, for each trackable:

SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &teapotModelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
                                     &teapotModelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                    &teapotModelViewMatrix.data[0] ,
                                    &teapotModelViewProjection.data[0]);

        glUseProgram(shaderProgramID);
        
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotVertices[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotNormals[0]);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                              (const GLvoid*) &teapotTexCoords[0]);
       
        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);
       
        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
                           (GLfloat*)&teapotModelViewProjection.data[0] );
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);

 You may also want to consider adding the QCAR_onUpdate function (see ImageTargets.cpp) in case you want to be able to swap dataset.

It will be a bit of work, but that should be doable.

I hope this helps.

 

 

Log in or register to post comments