Log in or register to post comments

Using UIView instead of 3D Model

August 16, 2013 - 1:26pm #1

I'm trying to alter the ImageTargets sample to display a UIView over the tracker rather than the teapot.  Any direction will be greatly appreciated.  I've searched the forums for hours trying to find a post similar to mine but no luck.

Using UIView instead of 3D Model

October 20, 2013 - 1:49am #5

This definitely helps me get a better understanding. How would I setup the UIView to transform with the target?

Using UIView instead of 3D Model

August 20, 2013 - 9:10am #4

Great - thanks for sharing.  If you can format the code that would be even better!

N

Using UIView instead of 3D Model

August 20, 2013 - 8:08am #3

I got it working the way that I wanted to after reading some of the knowledge base articles.  I've attached the working code that belongs in EAGLView.mm

I created a UIView property in EAGLView.h named annotation to demonstrate this working.  Enjoy!

 

- (id)initWithFrame:(CGRect)frame

{

    self = [super initWithFrame:frame];

    

if (self)

    {

        // create list of textures we want loading - ARViewController will do this for us

        int nTextures = sizeof(textureFilenames) / sizeof(textureFilenames[0]);

        for (int i = 0; i < nTextures; ++i)

            [textureList addObject: [NSString stringWithUTF8String:textureFilenames[i]]];

_annotation = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 40, 40)];

_annotation.backgroundColor = [UIColor blueColor];

    }

    return self;

}

- (void)renderFrameQCAR
{
[self setFramebuffer];

// Clear colour and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

// Render video background and retrieve tracking state
QCAR::State state = QCAR::Renderer::getInstance().begin();
QCAR::Renderer::getInstance().drawVideoBackground();

//NSLog(@"active trackables: %d", state.getNumActiveTrackables());

if (QCAR::GL_11 & qUtils.QCARFlags) {
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
}

glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the pose matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera

for (int i = 0; i < state.getNumTrackableResults(); ++i) {
// Get the trackable
const QCAR::TrackableResult* result = state.getTrackableResult(i);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());

// Choose the texture based on the target name
int targetIndex = 0; // "stones"
if (!strcmp(trackable.getName(), "chips"))
targetIndex = 1;
else if (!strcmp(trackable.getName(), "tarmac"))
targetIndex = 2;

Object3D *obj3D = [objects3D objectAtIndex:targetIndex];

dispatch_async(dispatch_get_main_queue(), ^(void) {
if (!self.annotation.superview) {
[self addSubview:self.annotation];
}
});

QCAR::Matrix34F pose = result->getPose();

dispatch_async(dispatch_get_main_queue(), ^(void) {
CGSize targetSize = CGSizeMake(pose.data[0], pose.data[1]);
CGPoint targetCenter = [self calcScreenCoordsOf:targetSize inPose:pose];
self.annotation.center = targetCenter;

});

// Render using the appropriate version of OpenGL
// if (QCAR::GL_11 & qUtils.QCARFlags) {
// // Load the projection matrix
// glMatrixMode(GL_PROJECTION);
// glLoadMatrixf(qUtils.projectionMatrix.data);
//
// // Load the model-view matrix
// glMatrixMode(GL_MODELVIEW);
// glLoadMatrixf(modelViewMatrix.data);
// glTranslatef(0.0f, 0.0f, -kObjectScale);
// glScalef(kObjectScale, kObjectScale, kObjectScale);
//
// // Draw object
// glBindTexture(GL_TEXTURE_2D, [obj3D.texture textureID]);
// glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*)obj3D.texCoords);
// glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*)obj3D.vertices);
// glNormalPointer(GL_FLOAT, 0, (const GLvoid*)obj3D.normals);
// glDrawElements(GL_TRIANGLES, obj3D.numIndices, GL_UNSIGNED_SHORT, (const GLvoid*)obj3D.indices);
// }
#ifndef USE_OPENGL1
// else {
// // OpenGL 2
// QCAR::Matrix44F modelViewProjection;
//
// ShaderUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale, &modelViewMatrix.data[0]);
// ShaderUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale, &modelViewMatrix.data[0]);
// ShaderUtils::multiplyMatrix(&qUtils.projectionMatrix.data[0], &modelViewMatrix.data[0], &modelViewProjection.data[0]);
//
// glUseProgram(shaderProgramID);
//
// glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.vertices);
// glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.normals);
// glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.texCoords);
//
// glEnableVertexAttribArray(vertexHandle);
// glEnableVertexAttribArray(normalHandle);
// glEnableVertexAttribArray(textureCoordHandle);
//
// glActiveTexture(GL_TEXTURE0);
// glBindTexture(GL_TEXTURE_2D, [obj3D.texture textureID]);
// glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE, (const GLfloat*)&modelViewProjection.data[0]);
// glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
// glDrawElements(GL_TRIANGLES, obj3D.numIndices, GL_UNSIGNED_SHORT, (const GLvoid*)obj3D.indices);
//
// ShaderUtils::checkGlError("EAGLView renderFrameQCAR");
// }
#endif
}

// glDisable(GL_DEPTH_TEST);
// glDisable(GL_CULL_FACE);
//
// if (QCAR::GL_11 & qUtils.QCARFlags) {
// glDisable(GL_TEXTURE_2D);
// glDisableClientState(GL_VERTEX_ARRAY);
// glDisableClientState(GL_NORMAL_ARRAY);
// glDisableClientState(GL_TEXTURE_COORD_ARRAY);
// }
#ifndef USE_OPENGL1
// else {
// glDisableVertexAttribArray(vertexHandle);
// glDisableVertexAttribArray(normalHandle);
// glDisableVertexAttribArray(textureCoordHandle);
// }
#endif

QCAR::Renderer::getInstance().end();
[self presentFramebuffer];
}

- (CGPoint) projectCoord:(CGPoint)coord inView:(const QCAR::CameraCalibration&)cameraCalibration andPose:(QCAR::Matrix34F)pose withOffset:(CGPoint)offset andScale:(CGFloat)scale
{
CGPoint converted;
QCAR::Vec3F vec(coord.x,coord.y,0);
QCAR::Vec2F sc = QCAR::Tool::projectPoint(cameraCalibration, pose, vec);
converted.x = sc.data[0]*scale - offset.x;
converted.y = sc.data[1]*scale - offset.y;
return converted;
}
- (CGPoint)calcScreenCoordsOf:(CGSize)target inPose:(QCAR::Matrix34F)pose
{
// 0,0 is at centre of target so extremities are at w/2,h/2
CGFloat w = target.width/2;
CGFloat h = target.height/2;
// need to account for the orientation on view size
CGFloat viewWidth = self.frame.size.height; // Portrait
CGFloat viewHeight = self.frame.size.width; // Portrait
UIInterfaceOrientation orientation = [UIApplication sharedApplication].statusBarOrientation;
if (UIInterfaceOrientationIsLandscape(orientation))
{
viewWidth = self.frame.size.width;
viewHeight = self.frame.size.height;
}
// calculate any mismatch of screen to video size
QCAR::CameraDevice& cameraDevice = QCAR::CameraDevice::getInstance();
const QCAR::CameraCalibration& cameraCalibration = cameraDevice.getCameraCalibration();
QCAR::VideoMode videoMode = cameraDevice.getVideoMode(QCAR::CameraDevice::MODE_DEFAULT);
CGFloat scale = viewWidth/videoMode.mWidth;
if (videoMode.mHeight * scale < viewHeight)
scale = viewHeight/videoMode.mHeight;
CGFloat scaledWidth = videoMode.mWidth * scale;
CGFloat scaledHeight = videoMode.mHeight * scale;
CGPoint margin = {(scaledWidth - viewWidth)/2, (scaledHeight - viewHeight)/2};
// now project the 4 corners of the target

// CGPoint s0 = [self projectCoord:CGPointMake(-w,h) inView:cameraCalibration andPose:pose withOffset:margin andScale:scale];
// CGPoint s1 = [self projectCoord:CGPointMake(-w,-h) inView:cameraCalibration andPose:pose withOffset:margin andScale:scale];
// CGPoint s2 = [self projectCoord:CGPointMake(w,-h) inView:cameraCalibration andPose:pose withOffset:margin andScale:scale];
// CGPoint s3 = [self projectCoord:CGPointMake(w,h) inView:cameraCalibration andPose:pose withOffset:margin andScale:scale];

CGPoint targetCenter = [self projectCoord:CGPointMake(0,0) inView:cameraCalibration andPose:pose withOffset:margin andScale:scale];

return targetCenter;
}

 

Using UIView instead of 3D Model

August 16, 2013 - 4:10pm #2
Log in or register to post comments