Log in or register to post comments

How QCAR detect the trackable image

March 19, 2013 - 5:00am #1

Hi, 

I want to know about the code when the application trying to recognize the ImageTarget.

Does the QCAR takes the camera frames and detects via each frame.

In the sample app ImageTarget,ImageTarget is being detected by the below method.

JNIEXPORT void JNICALL
Java_com_example_vfuoria_myapp_ImageTargetsRenderer_renderFrame(JNIEnv *, jobject)
{
	//LOG("Java_com_example_vfuoria_myapp_ImageTargetsRenderer_renderFrame");

	// Clear color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Get the state from QCAR and mark the beginning of a rendering section
	QCAR::State state = QCAR::Renderer::getInstance().begin();

	// Explicitly render the Video Background
	QCAR::Renderer::getInstance().drawVideoBackground();

#ifdef USE_OPENGL_ES_1_1
	// Set GL11 flags:
	glEnableClientState(GL_VERTEX_ARRAY);
	glEnableClientState(GL_NORMAL_ARRAY);
	glEnableClientState(GL_TEXTURE_COORD_ARRAY);

	glEnable(GL_TEXTURE_2D);
	glDisable(GL_LIGHTING);

#endif

	glEnable(GL_DEPTH_TEST);

	// We must detect if background reflection is active and adjust the culling direction.
	// If the reflection is active, this means the post matrix has been reflected as well,
	// therefore standard counter clockwise face culling will result in "inside out" models.
	glEnable(GL_CULL_FACE);
	glCullFace(GL_BACK);
	if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
	glFrontFace(GL_CW); //Front camera
	else
	glFrontFace(GL_CCW); //Back camera


	// Did we find any trackables this frame?
	for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
	{
		// Get the trackable:
		const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
		const QCAR::Trackable& trackable = result->getTrackable();
		QCAR::Matrix44F modelViewMatrix =
		QCAR::Tool::convertPose2GLMatrix(result->getPose());

		// Choose the texture based on the target name:
		int textureIndex;
		if (strcmp(trackable.getName(), "leaves_original") == 0)
		{
			textureIndex = 0;
		}

		const Texture* const thisTexture = textures[textureIndex];

#ifdef USE_OPENGL_ES_1_1
		// Load projection matrix:
		glMatrixMode(GL_PROJECTION);
		glLoadMatrixf(projectionMatrix.data);

		// Load model view matrix:
		glMatrixMode(GL_MODELVIEW);
		glLoadMatrixf(modelViewMatrix.data);
		glTranslatef(0.f, 0.f, kObjectScale);
		glScalef(kObjectScale, kObjectScale, kObjectScale);

		// Draw object:
		glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
		glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
		glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
		glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &teapotNormals[0]);
		glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
				(const GLvoid*) &teapotIndices[0]);
#else

		QCAR::Matrix44F modelViewProjection;

		SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
				&modelViewMatrix.data[0]);
		SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
				&modelViewMatrix.data[0]);
		SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
				&modelViewMatrix.data[0] ,
				&modelViewProjection.data[0]);

		glUseProgram(shaderProgramID);

		glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
				(const GLvoid*) &teapotVertices[0]);
		glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
				(const GLvoid*) &teapotNormals[0]);
		glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
				(const GLvoid*) &teapotTexCoords[0]);

		glEnableVertexAttribArray(vertexHandle);
		glEnableVertexAttribArray(normalHandle);
		glEnableVertexAttribArray(textureCoordHandle);

		glActiveTexture(GL_TEXTURE0);
		glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
		glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
		glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
				(GLfloat*)&modelViewProjection.data[0] );
		glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
				(const GLvoid*) &teapotIndices[0]);

		SampleUtils::checkGlError("ImageTargets renderFrame");
#endif

	}

	glDisable(GL_DEPTH_TEST);

#ifdef USE_OPENGL_ES_1_1        
	glDisable(GL_TEXTURE_2D);
	glDisableClientState(GL_VERTEX_ARRAY);
	glDisableClientState(GL_NORMAL_ARRAY);
	glDisableClientState(GL_TEXTURE_COORD_ARRAY);
#else
	glDisableVertexAttribArray(vertexHandle);
	glDisableVertexAttribArray(normalHandle);
	glDisableVertexAttribArray(textureCoordHandle);
#endif

	QCAR::Renderer::getInstance().end();
}

 

All i want to know is that does the above method takes the camera frame on real time and compare it if yes then the below method is the main function for the detection right ?

 

 

void configureVideoBackground(int zoom) {
	//MODE_DEFAULT & MODE_OPTIMIZE_QUALITY
	// Get the default video mode:
	QCAR::CameraDevice& cameraDevice = QCAR::CameraDevice::getInstance();
	QCAR::VideoMode videoMode = cameraDevice. getVideoMode(
			QCAR::CameraDevice::MODE_DEFAULT);

	// Configure the video background
	QCAR::VideoBackgroundConfig config;
	config.mEnabled = true;
	config.mSynchronous = true;
	config.mPosition.data[0] = 0.0f;
	config.mPosition.data[1] = 0.0f;

	if (isActivityInPortraitMode) {
		//LOG("configureVideoBackground PORTRAIT");
		/*config.mSize.data[0] = videoMode.mHeight * (screenHeight
		 / (float) videoMode.mWidth);
		 config.mSize.data[1] = screenHeight;
		 */
		if (zoom != 0) {
			config.mSize.data[0] = videoMode.mHeight * (screenHeight / (float) videoMode.mWidth);
			config.mSize.data[1] = screenHeight;

			config.mSize.data[0] = config.mSize.data[0] * zoom;
			config.mSize.data[1] = config.mSize.data[1] * zoom;
		} else {
			LOG("configureVideoBackground PORTRAIT MODE > FIRST EXECUTION");
			config.mSize.data[0] = videoMode.mHeight * (screenHeight / (float) videoMode.mWidth);
			config.mSize.data[1] = screenHeight;
		}

		/*if (config.mSize.data[0] < screenWidth) {
		 LOG(
		 "Correcting rendering background size to handle missmatch between screen and video aspect ratios.");
		 config.mSize.data[0] = screenWidth;
		 config.mSize.data[1] = screenWidth * (videoMode.mWidth
		 / (float) videoMode.mHeight);

		 }*/
	} else {
		//LOG("configureVideoBackground LANDSCAPE");
		/*	config.mSize.data[0] = screenWidth;
		 config.mSize.data[1] = videoMode.mHeight * (screenWidth
		 / (float) videoMode.mWidth);*/
		if (zoom != 0) {
			config.mSize.data[0] = screenWidth;
			config.mSize.data[1] = videoMode.mHeight * (screenWidth / (float) videoMode.mWidth);

			config.mSize.data[0] = config.mSize.data[0] * zoom;
			config.mSize.data[1] = config.mSize.data[1] * zoom;
		} else {
			LOG("configureVideoBackground LANDSCAPE MODE > FIRST EXECUTION");
			config.mSize.data[0] = screenWidth;
			config.mSize.data[1] = videoMode.mHeight * (screenWidth / (float) videoMode.mWidth);
		}

		/*if (config.mSize.data[1] < screenHeight) {
		 LOG(
		 "Correcting rendering background size to handle missmatch between screen and video aspect ratios.");
		 config.mSize.data[0] = screenHeight * (videoMode.mWidth
		 / (float) videoMode.mHeight);
		 config.mSize.data[1] = screenHeight;

		 }*/
	}

	LOG(
			"Configure Video Background : Video (%d,%d), Screen (%d,%d), mSize (%d,%d),ZOOM PERCENTAGE (%d)",
			videoMode.mWidth, videoMode.mHeight, screenWidth, screenHeight, config.mSize.data[0], config.mSize.data[1], zoom);

	// Set the config:
	QCAR::Renderer::getInstance().setVideoBackgroundConfig(config);
}

Is it possible that if i pass the config,mSize.data[0] and config,mSize.data[1] to the target tracking funtion for the detection.

 

Thanks

How QCAR detect the trackable image

March 25, 2013 - 11:56pm #12

To replace the teapot with another model, you can have a look at this step-by-step guide:

https://developer.vuforia.com/forum/faq/android-how-do-i-replace-teapot

 

How QCAR detect the trackable image

March 25, 2013 - 10:01pm #11

AlessandroB wrote:

Hi, all the changes should be done in C++ in ImageTargets.cpp (no need to touch the Java code);

the relevant functions for rendering and scaling are in the _renderFrame function, where you already find some code about translating and scaling the modelview matrix.

My strong advice is to take your time to read the BackgroundTextureAccess example, to understand how the background texture is rendered using a custom created mesh.

 

 

 

AlessandroB Thank you for your answer. I have implemented the zoom and image detection is almost successful even when i do zoom. I want to know how can i show and create my custom 3D object i.e not Teapot and display it when image isrecognized.

Where i have to edited the code ro do extra work can you guide me on this .

Thanks in advance waiting for your response

 

 

How QCAR detect the trackable image

March 25, 2013 - 1:00am #10

Hi, all the changes should be done in C++ in ImageTargets.cpp (no need to touch the Java code);

the relevant functions for rendering and scaling are in the _renderFrame function, where you already find some code about translating and scaling the modelview matrix.

My strong advice is to take your time to read the BackgroundTextureAccess example, to understand how the background texture is rendered using a custom created mesh.

 

 

 

How QCAR detect the trackable image

March 24, 2013 - 10:37pm #9

AlessandroB can you give me more clear idea how to achieve it,Where i have to make changes in my jni file of ImageTarget.cpp or The Java code.

Should i have to call this piece of code on updateFrames in java code 

                              // Query display dimensions:
				storeScreenDimensions();

				// Update viewport via renderer:
				mRenderer.updateRendering(mScreenWidth, mScreenHeight);

				// Update projection matrix:
				setProjectionMatrix();

				// Cache last rotation used for setting projection matrix:
				mLastScreenRotation = currentScreenRotation;

 

Waiting for your reply Thanks

How QCAR detect the trackable image

March 22, 2013 - 1:46am #8

Hi, providing the sample code to do that is a bit beyond the scope of what I could do.

However I would encourage to have a lok at the code in the renderFrame fucntion of ImageTargets.cpp, in particular there are some functions starting with "SampleUtils::" that are meant to set the scale and other transformations.

 

How QCAR detect the trackable image

March 21, 2013 - 9:42pm #7

AlessandroB wrote:

Ok, thanks for the clarification.

Actually if you impleemnt the "zoom" that way, you will introduce a discrepancy between what the real camera sees and what you see on the screen;

not saying that it is incorrect to setup the cofig.mSize that way, but if you do that, you will also need to adjust your projection matrix accordingly, which is going to be tricky (and we don't have a specific tutorial for that).

A simpler option could be to just keep the config parameters (mSize, mPosition) like in the "original" sample code, but scale the entire 3D world by the zoom factor, i.e.:

- render the background texture by yourself, following the example code given in the BackgroundTextureAccess sample

- apply a scale factor (corresponding to your zoom) to the background mesh that you render in previous point

- apply the same scale also to the modelview matrix that comes from the trackable Pose

That should work (although we never tested such a thing).

 

AlessandroB Thank your for your answer can you provide me a sample example I am new in JNI and Vuforia I would really appericate your help and provide me  sample code for my function or my idea.I am unable to understand how to do it properly

How QCAR detect the trackable image

March 21, 2013 - 6:21am #6

Ok, thanks for the clarification.

Actually if you impleemnt the "zoom" that way, you will introduce a discrepancy between what the real camera sees and what you see on the screen;

not saying that it is incorrect to setup the cofig.mSize that way, but if you do that, you will also need to adjust your projection matrix accordingly, which is going to be tricky (and we don't have a specific tutorial for that).

A simpler option could be to just keep the config parameters (mSize, mPosition) like in the "original" sample code, but scale the entire 3D world by the zoom factor, i.e.:

- render the background texture by yourself, following the example code given in the BackgroundTextureAccess sample

- apply a scale factor (corresponding to your zoom) to the background mesh that you render in previous point

- apply the same scale also to the modelview matrix that comes from the trackable Pose

That should work (although we never tested such a thing).

 

How QCAR detect the trackable image

March 20, 2013 - 11:42pm #5

AlessandroB wrote:

Hi, as I said, the processing occurs in the background, there is not a specific "core" function that you can call to detect targets; in practice Vuforia analazes every frame from the camera and eventually detects a target and then tracks it. 

Could you explain a bit better what you are after and why would you need such a "core" function ?

 

what i want is that when I do zoom in camera preview ,it should detects the zoomed camera frames. Below is my code how i created zoom effect using your sample project.

if (isActivityInPortraitMode) {
	
		if (zoom != 0) {
			config.mSize.data[0] = videoMode.mHeight * (screenHeight / (float) videoMode.mWidth);
			config.mSize.data[1] = screenHeight;

			config.mSize.data[0] = config.mSize.data[0] * zoom;
			config.mSize.data[1] = config.mSize.data[1] * zoom;
		} else {
			LOG("configureVideoBackground PORTRAIT MODEs > FIRST EXECUTION");
			config.mSize.data[0] = videoMode.mHeight * (screenHeight / (float) videoMode.mWidth);
			config.mSize.data[1] = screenHeight;
		}

		
	} else {
	
		if (zoom != 0) {
			config.mSize.data[0] = screenWidth;
			config.mSize.data[1] = videoMode.mHeight * (screenWidth / (float) videoMode.mWidth);

			config.mSize.data[0] = config.mSize.data[0] * zoom;
			config.mSize.data[1] = config.mSize.data[1] * zoom;
		} else {
			LOG("configureVideoBackground LANDSCAPE MODE > FIRST EXECUTION");
			config.mSize.data[0] = screenWidth;
			config.mSize.data[1] = videoMode.mHeight * (screenWidth / (float) videoMode.mWidth);
		}

	
	}

	LOG(
			"Configure Video Background : Video (%d,%d), Screen (%d,%d), mSize (%d,%d),ZOOM PERCENTAGE (%d)",
			videoMode.mWidth, videoMode.mHeight, screenWidth, screenHeight, config.mSize.data[0], config.mSize.data[1], zoom);

	// Set the config:
	QCAR::Renderer::getInstance().setVideoBackgroundConfig(config);

 

When i do zoom my TargetImage is not being detected as it should be.When i completely zoomout/Take the camera to original state then TargetImage is being detected successfully.Can you help me how to detect zoomed camera preview.

How QCAR detect the trackable image

March 20, 2013 - 10:59pm #4

Hi, as I said, the processing occurs in the background, there is not a specific "core" function that you can call to detect targets; in practice Vuforia analazes every frame from the camera and eventually detects a target and then tracks it. 

Could you explain a bit better what you are after and why would you need such a "core" function ?

 

How QCAR detect the trackable image

March 20, 2013 - 10:08pm #3

AlessandroB wrote:

Hi, the Java_com_example_vfuoria_myapp_ImageTargetsRenderer_renderFrame function is just the rendering function;

in that function we also query the State object, in this line:

QCAR::State state = QCAR::Renderer::getInstance().begin();

and then we iterate over the currently detected/tracked trackables in this code:

for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
    {
        // Get the trackable:
        const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
        const QCAR::Trackable& trackable = result->getTrackable();
        QCAR::Matrix44F modelViewMatrix =
        QCAR::Tool::convertPose2GLMatrix(result->getPose());

However, this does not mean that the trackables are detected in that function; Vuforia uses a multi-threaded model, so the processing happens in the background; in the function above we simply query the state to know what is currently available.

 

 

AlessandroB can you tell me which function is the core function which detect the targetImage. In my application I have create a zoom camera effect and now i want QCAR to detect that zoom camera preview I am using latest SDK for Android

How QCAR detect the trackable image

March 19, 2013 - 8:18am #2

Hi, the Java_com_example_vfuoria_myapp_ImageTargetsRenderer_renderFrame function is just the rendering function;

in that function we also query the State object, in this line:

QCAR::State state = QCAR::Renderer::getInstance().begin();

and then we iterate over the currently detected/tracked trackables in this code:

for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
    {
        // Get the trackable:
        const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
        const QCAR::Trackable& trackable = result->getTrackable();
        QCAR::Matrix44F modelViewMatrix =
        QCAR::Tool::convertPose2GLMatrix(result->getPose());

However, this does not mean that the trackables are detected in that function; Vuforia uses a multi-threaded model, so the processing happens in the background; in the function above we simply query the state to know what is currently available.

 

 

Log in or register to post comments