/* Joshua Stough October 14,2002 COMP 238, Assignment 3 Glut skeleton code written by Gary Bishop for COMP 236 This is a basic lightfield renderer. An input file is expected in an MIT-like format, as follows: DataCameraFocalLength ____ DataCameraSpacing ____ DesiredCameraFocalLength ____ DesiredRenderingDistance ____ DesiredCameraPosition ___ ___ ___ width __ height __ file [column] [row] filename "" ... Images are simple TGA format. Numbers are in meters. This program assumes all cameras point to the direction normal to the plane along which all centers of projection lie. So all cameras were on the x-y plane, facing in the negative z direction. Thus, this is a plane at infinity setting. During the rendering, the user is allowed to move the eye around and zoom, and twist, and move the rendering distance. Rays that do not exist are rendered black. */ #pragma warning(disable:4786) #include #include #include "Buffer.h" #include #include #include #include #include #include #include // some types for later use typedef unsigned char byte; typedef byte rgb[3]; // these are 3 byte pixels in RGB order // a few colors to use static rgb red = { 255, 0, 0 }; static rgb blue = { 0, 0, 255 }; // an object to represent the framebuffer where I draw struct FrameBuffer { int width, height; rgb *pix; FrameBuffer(int w, int h) { width=w; height=h; pix = new rgb[width*height]; } void Clear(const rgb p); } *fb; // global framebuffer object for use in callbacks void FrameBuffer::Clear(const rgb p) { register byte r = p[0]; register byte g = p[1]; register byte b = p[2]; register byte* p0 = pix[0]; register byte* p1 = p0 + sizeof(rgb)*width*height; while(p0 < p1) { p0[0] = r; p0[1] = g; p0[2] = b; p0 += 3; } } //A map to keep all of the images in. std::map ImageMap; Buffer *ill, *ilr, *lul, *iur, *curimage; //The relevant images, ur = upper right. //Variables I want global--their size is minimal compared to the images, I'm sure. double cop[3]; //The center of projection. double dov[3]; //The direction of view, lookp - eyep of lightfield cameras double cdov[3]; //and the current dov. double up_dir[3]; //the up-direction vector, picked up during file input. double foc,cfoc; //The focal length of the lightfield cameras, and current. double spacing; //the spacing between rows and columns. double distance; //The distance of the parallel rendering plane. int width; int height; //the number of columns and rows in the lightfield. int xres = 100; int yres = 100; int image_resx, image_resy; //These enclosed are modifiable variables concerning the current //eye orientation. double foc_change = .005; //defualt 5 mm increments on focal length. double cop_change = .1; //100 mm movement of the camera. double angle_change = 0.0872665; //about 5 degrees. double cur_phi = 3.141592654/2.0; //The angle between up_dir and cdov. struct Projection { //this is sort of the current projection matrix. double mat[3][3]; Projection(double dov[], double foc); //Initializes the world-from-image projection matrix. void Invert(); //Inverts mat. void Convert(double v[]); //Applies mat to v, where v is image coords [i j 1], returning //v = mat*v, a vector in the direction of that image plane point, //such that cop + v is on the image plane. void Convert_I(double x[], double c[], double p[]); //Recognizes that we are getting [t*i t*j t] from multiplying //mat and x - c. This way, we convert from a point on the //rendering plane to the pixel on the image quickly. } *Pwfi; //global projection matrix for the eye, //and for four relevant cops from the lightfield. pur = upper right. Projection *ImageProj; //These will store the prjection matrix for the lightfield sample images. Projection::Projection(double dov[], double foc) /*** This function initializes a projection matrix for a pinhole camera model using the One True Way (see UNC's Bishop). Meters are the unit here, and the 36mm x 36mm film is modeled. ***/ { double dov_norm = sqrt(double(pow(dov[0],2)+pow(dov[1],2)+pow(dov[2],2))); double foc_v[3]; foc_v[0] = foc*dov[0]/dov_norm; foc_v[1] = foc*dov[1]/dov_norm; foc_v[2] = foc*dov[2]/dov_norm; double foc_norm = sqrt(double(pow(foc_v[0],2)+pow(foc_v[1],2)+pow(foc_v[2],2))); //Unit vector in foc_v direction. double foc_vu[3]; foc_vu[0] = foc_v[0]/foc_norm; foc_vu[1] = foc_v[1]/foc_norm; foc_vu[2] = foc_v[2]/foc_norm; double up_foc_dot = up_dir[0]*foc_vu[0]+ up_dir[1]*foc_vu[1]+ up_dir[2]*foc_vu[2]; //The b image plane vector. double b[3]; b[0] = up_dir[0]-up_foc_dot*foc_vu[0]; b[1] = up_dir[1]-up_foc_dot*foc_vu[1]; b[2] = up_dir[2]-up_foc_dot*foc_vu[2]; double b_norm = sqrt(double(pow(b[0],2)+pow(b[1],2)+pow(b[2],2))); b[0]=0.036*b[0]/b_norm; b[1]=0.036*b[1]/b_norm; b[2]=0.036*b[2]/b_norm; //The a image plane vector, the cross of foc_vu and b. double a[3]; a[0] = foc_vu[1]*b[2]-b[1]*foc_vu[2]; a[1] = b[0]*foc_vu[2]-foc_vu[0]*b[2]; a[2] = foc_vu[0]*b[1]-b[0]*foc_vu[1]; double a_norm = sqrt(double(pow(a[0],2)+pow(a[1],2)+pow(a[2],2))); a[0]=0.036*a[0]/a_norm; a[1]=0.036*a[1]/a_norm; a[2]=0.036*a[2]/a_norm; //o, a vector pointing to the lower right hand corner of the image plane. double o[3]; o[0] = foc_v[0] - 0.5*b[0] - 0.5*a[0]; o[1] = foc_v[1] - 0.5*b[1] - 0.5*a[1]; o[2] = foc_v[2] - 0.5*b[2] - 0.5*a[2]; //Resize for the resolution, and off we go... a[0] = a[0]/xres; a[1] = a[1]/xres; a[2] = a[2]/xres; b[0] = b[0]/yres; b[1] = b[1]/yres; b[2] = b[2]/yres; for (int i = 0; i < 3; i ++) { mat[i][0] = a[i]; mat[i][1] = b[i]; mat[i][2] = o[i]; } //Now, a ray going out into the world from the cop through the pixel (i,j) //in the image plane is cop + mat*(i j 1)'. } void Projection::Invert() //Invert the matrix. { int i; double A[3][3]; double D = mat[0][0]*(mat[1][1]*mat[2][2] - mat[2][1]*mat[1][2]); D -= mat[0][1]*(mat[1][0]*mat[2][2] - mat[2][0]*mat[1][2]); D += mat[0][2]*(mat[1][0]*mat[2][1] - mat[2][0]*mat[1][1]); for (i = 0; i < 3; i ++) { A[i][0] = mat[i][0]; A[i][1] = mat[i][1]; A[i][2] = mat[i][2]; } if (fabs(D) < 1.0e-10) printf("Noninvertible projection matrix...D'oh!\n"); //Now reset the matrix to its inverse. mat[0][0] = (A[1][1]*A[2][2] - A[2][1]*A[1][2])/D; mat[0][1] = (A[0][2]*A[2][1] - A[2][2]*A[0][1])/D; mat[0][2] = (A[0][1]*A[1][2] - A[1][1]*A[0][2])/D; mat[1][0] = (A[1][2]*A[2][0] - A[2][2]*A[1][0])/D; mat[1][1] = (A[0][0]*A[2][2] - A[2][0]*A[0][2])/D; mat[1][2] = (A[0][2]*A[1][0] - A[1][2]*A[0][0])/D; mat[2][0] = (A[1][0]*A[2][1] - A[2][0]*A[1][1])/D; mat[2][1] = (A[0][1]*A[2][0] - A[2][1]*A[0][0])/D; mat[2][2] = (A[0][0]*A[1][1] - A[1][0]*A[0][1])/D; //whew... } void Projection::Convert(double v[]) /*** Simply apply the projection matrix to the referenced vector v, and return the new v. ***/ { int i,j; double mat_b[3] = {0.0,0.0,0.0}; for (i = 0; i < 3; i ++) { for (j = 0; j < 3; j ++) { mat_b[i] += mat[i][j]*v[j]; } } v[0] = mat_b[0]; v[1] = mat_b[1]; v[2] = mat_b[2]; } void Projection::Convert_I(double x[], double c[], double p[]) //Apply mat to x-c and return values in p. { int i,j; double q[3], mat_b[3] = {0.0,0.0,0.0}; q[0] = x[0] - c[0]; q[1] = x[1] - c[1]; q[2] = x[2] - c[2]; for (i = 0; i < 3; i ++) { for (j = 0; j < 3; j ++) { mat_b[i] += mat[i][j]*q[j]; } } p[0] = mat_b[0]; p[1] = mat_b[1]; p[2] = mat_b[2]; } double Intersect_plane(double v[], int which) //In this ray retrieval scheme, this isn't a general plane and vector, //it's the cop and a provided v intersecting the x-y plane with normal //in the negative z direction. Or if it's the rendering plane, then //simply replace the numer. { double numer; numer = -1.0*cop[2]; if (which != 0) numer += -1.0*distance; double denom = v[2]; if (fabs(denom) < 1.0e-14) { return -1.0; }else { return (numer/denom); } } int debugn; void RetrieveColor(double v[], double color[]) //Here, using the current cetner of projection and the ray v provided //compute from the lightfield images the color returned for this ray. //The way we're going to do it is intersect this cop and ray with the //the plane of samples, find the closest four (or two) sample images, //and in them the four closest pixels. Do the bi-linear weighting //in the image and then between the images to get the best color for //this ray cop pair. { int i, j; double t = Intersect_plane(v, 0), x, y; double uvp[3], stp[3], p[3]; Vec3f c, ll, lr, ur, ul, temp1, temp2; uvp[0] = cop[0] + t*v[0]; uvp[1] = cop[1] + t*v[1]; uvp[2] = cop[2] + t*v[2]; if (fabs(uvp[2]) > 1.0e-8) printf("Projected point not on x-y plane.\n"); //So we have the point that the ray intersects the lightfield plane. //Now, get the point that the ray hits the rendering plane at. t = Intersect_plane(v, 1); stp[0] = cop[0] + t*v[0]; stp[1] = cop[1] + t*v[1]; stp[2] = cop[2] + t*v[2]; // double uind, vind; //These will be for the bilin interpolation on the camera plane. char curfile[32]; int interpx = 1, interpy = 1; //whether to interpolate between camera images. x = uvp[0]/spacing; y = uvp[1]/spacing; //If the cameras were on the unit grid, //this is where the ray we want is. i = (int) floor(x); j = (int) floor(y); //Image coords, as stored. uind = x - i; vind = y - j; uvp[0] = i*spacing; uvp[1] = j*spacing; uvp[2] = 0.0; //Now the cops lie on integer coordinates. //uvp[0] = floor(uvp[0]); uvp[1] = floor(uvp[1]); //Bilinearly interpolate between the 4 closest camera images. // //This will be the ll image. //This may now be prone to numerical error. sprintf(&curfile[0], "image_%d_%d", i, j); //This is the camera's tag. //These next lines get the relevant pixel coordinates. ImageProj->Convert_I(stp, uvp, p); //So now p should be of the form [t*i t*j t]. if (fabs(p[2]) < 1.0e-10) printf ("the t parameter is close to zero.\n"); p[0] /= p[2]; p[1] /= p[2]; //Pick up this camera's relevant color. if (i >= 0 && i < height && j >= 0 && j < width && p[0] < image_resx && p[1] < image_resy && p[0] >= 0 && p[1] >= 0) { curimage = ImageMap[curfile]; ll = curimage->get_bilin_color(p[0], p[1]); }else {ll.x = 0.0; ll.y = 0.0; ll.z = 0.0; interpx = 0;} //Now pick up the color for lr. i ++; uvp[0] += spacing; // sprintf(&curfile[0], "image_%d_%d", i, j); //This is the camera's tag. //These next lines get the relevant pixel coordinates. ImageProj->Convert_I(stp, uvp, p); //So now p should be of the form [t*i t*j t]. if (fabs(p[2]) < 1.0e-10) printf ("the t parameter is close to zero.\n"); p[0] /= p[2]; p[1] /= p[2]; //Pick up this camera's relevant color. if (i >= 0 && i < height && j >= 0 && j < width && p[0] < image_resx && p[1] < image_resy && p[0] >= 0 && p[1] >= 0){ curimage = ImageMap[curfile]; lr = curimage->get_bilin_color(p[0], p[1]); if (interpx) { temp1 = (1.0-uind)*ll + uind*lr; }else temp1 = lr; }else { lr.x = 0.0; lr.y = 0.0; lr.z = 0.0; if (interpx) temp1 = ll; else interpy = 0; } // //Now pick up the color for ur. j ++; uvp[1] += spacing; interpx = 1; sprintf(&curfile[0], "image_%d_%d", i, j); //This is the camera's tag. //These next lines get the relevant pixel coordinates. ImageProj->Convert_I(stp, uvp, p); //So now p should be of the form [t*i t*j t]. if (fabs(p[2]) < 1.0e-10) printf ("the t parameter is close to zero.\n"); p[0] /= p[2]; p[1] /= p[2]; //Pick up this camera's relevant color. if (i >= 0 && i < height && j >= 0 && j < width && p[0] < image_resx && p[1] < image_resy && p[0] >= 0 && p[1] >= 0) { curimage = ImageMap[curfile]; ur = curimage->get_bilin_color(p[0], p[1]); }else {ur.x = 0.0; ur.y = 0.0; ur.z = 0.0; interpx = 0;} //Finally, get the ul color. i --; uvp[0] -= spacing; //printf("1"); sprintf(&curfile[0], "image_%d_%d", i, j); //This is the camera's tag. //These next lines get the relevant pixel coordinates. ImageProj->Convert_I(stp, uvp, p); //So now p should be of the form [t*i t*j t]. if (fabs(p[2]) < 1.0e-10) printf ("the t parameter is close to zero.\n"); p[0] /= p[2]; p[1] /= p[2]; //Pick up this camera's relevant color. if (i >= 0 && i < height && j >= 0 && j < width && p[0] < image_resx && p[1] < image_resy && p[0] >= 0 && p[1] >= 0) { //printf ("Accessing file %s\n", curfile); curimage = ImageMap[curfile]; ul = curimage->get_bilin_color(p[0], p[1]); if (interpx) { temp2 = (1.0-uind)*ul + uind*ur; }else temp2 = ul; if (interpy) c = (1.0-vind)*temp1 + vind*temp2; else c = temp2; //printf("2."); }else { ul.x = 0.0; ul.y = 0.0; ul.z = 0.0; if (interpx) temp2 = ur; else { if (interpy) c = temp1; } } // //Okay, now we can bilinearly interpolate between the colors. But //something has to be done for when some of the cameras weren't there, //Otherwise we'll get color falloff on the edges. //Debug, see if the basic works again. /* sprintf(&curfile[0], "image_%d_%d", i, j); ImageProj->Convert_I(stp, uvp, p); //So now p should be of the form [t*i t*j t]. // if (fabs(p[2]) < 1.0e-10) printf ("the t parameter is close to zero.\n"); p[0] /= p[2]; p[1] /= p[2]; //Pick up this camera's relevant color. if (i >= 0 && i < height && j >= 0 && j < width && p[0] < image_resx && p[1] < image_resy && p[0] >= 0 && p[1] >= 0) { curimage = ImageMap[curfile]; //i = (int) floor(p[0]); j = (int) floor(p[1]); c = curimage->get_bilin_color(p[0],p[1]); }else {c.x = 0.0; c.y = 0.0; c.z = 0.0;} */ color[0] = c.x; color[1] = c.y; color[2] = c.z; } void Phi(double angle) //look up by the angle amount. Thus, we change cdov here, based on //up_dir and cdov. //We do this with a spherical linear interpolation between up_dir //and dov. then we write cdov. { double u = (cur_phi + angle)/cur_phi; cur_phi += angle; double excess1 = 1.0 - u; double dot, gamma, temp1, temp2; dot = up_dir[0]*cdov[0] + up_dir[1]*cdov[1] + up_dir[2]*cdov[2]; //gamma is the angle between up_dir and cdov. gamma = acos(dot); temp1 = sin(excess1*gamma); temp1 = temp1/(sin(gamma)); temp2 = sin(u*gamma); temp2 = temp2/(sin(gamma)); cdov[0] = temp1*up_dir[0] + temp2*cdov[0]; cdov[1] = temp1*up_dir[1] + temp2*cdov[1]; cdov[2] = temp1*up_dir[2] + temp2*cdov[2]; } void Theta(double angle) //This function, like the Phi above, will change the cdov. //Basically, rotate the cdov about the up_dir by angle. { //We take the x,z of the cdov and rotate clockwise in the plane. double ndov[3], c = cos(angle), s = sin(angle); ndov[0] = c*cdov[0] + s*cdov[2]; ndov[2] = -s*cdov[0] + c*cdov[2]; cdov[0] = ndov[0]; cdov[2] = ndov[2]; //Now, renormalize. c = sqrt(cdov[0]*cdov[0] + cdov[1]*cdov[1] + cdov[2]*cdov[2]); cdov[0] /= c; cdov[1] /= c; cdov[2] /= c; } void myDisplay(void) { // copy the framebuffer to the screen glDrawPixels(fb->width, fb->height, GL_RGB, GL_UNSIGNED_BYTE, fb->pix); glFlush(); } void myDraw() { int i, j; double v[3]; double color[3]; register byte *p; //clock_t stime1, ftime1, stime2, ftime2, sum = 0; //stime1 = clock(); //The current projection matrix. This need only be done if the //dov or focal length has changed. Pwfi = new Projection(cdov, cfoc); // for (i = 0; i < 10000; i ++) { // sprintf(&curfile[0], "image_%d_%d", i, j); // curimage = &ImageMap[curfile]; // } for(i = 0; i < xres; i ++) { for (j = 0; j < yres; j ++) { v[0] = 1.0*i; v[1] = 1.0*j; v[2] = 1.0; Pwfi->Convert(v); color[0] = 0.0; color[1] = 0.0; color[2] = 0.0; //if(debugn == 5 && i == 89 && j >= 0) //printf("i j %d %d ", i, j); //Retrieve the color of this ray. //stime2 = clock(); RetrieveColor(v, color); //ftime2 = clock(); //sum += (ftime2 - stime2); p = fb->pix[j*xres + i]; p[0] = (byte) 255*color[0]; p[1] = (byte) 255*color[1]; p[2] = (byte) 255*color[2]; } } p = fb->pix[0]; p[0] = (byte) 255; p[1] = (byte) 0; p[2] = (byte) 0; p = fb->pix[99]; p[0] = (byte) 0; p[1] = (byte) 255; p[2] = (byte) 0; p = fb->pix[99*xres]; p[0] = (byte) 0; p[1] = (byte) 0; p[2] = (byte) 255; //ftime1 = clock(); //x = (ftime1 - stime1); // x = 1.0*sum/x; // printf("time %lf\n", x); // then transfer to the screen // I can do this as often as I like, even after every pixel for debugging purposes. myDisplay(); } void myInit() { // initialize GL stuff for speed #if 1 // this makes quite a difference on the PC with software GL glDisable(GL_DITHER); #endif // SGI claims some of these might produce speedups for some architectures #if 0 glDisable(GL_ALPHA_TEST); glDisable(GL_BLEND); glDisable(GL_DEPTH_TEST); glDisable(GL_FOG); glDisable(GL_LIGHTING); glDisable(GL_LOGIC_OP); glDisable(GL_STENCIL_TEST); glDisable(GL_TEXTURE_1D); glDisable(GL_TEXTURE_2D); glPixelTransferi(GL_MAP_COLOR, GL_FALSE); glPixelTransferi(GL_RED_SCALE, 1); glPixelTransferi(GL_RED_BIAS, 0); glPixelTransferi(GL_GREEN_SCALE, 1); glPixelTransferi(GL_GREEN_BIAS, 0); glPixelTransferi(GL_BLUE_SCALE, 1); glPixelTransferi(GL_BLUE_BIAS, 0); glPixelTransferi(GL_ALPHA_SCALE, 1); glPixelTransferi(GL_ALPHA_BIAS, 0); #endif } void KeyInputHandler(unsigned char Key, int x, int y) { switch(Key) { case 27: exit(0); break; case 'X': cop[0] -= cop_change; break; case 'x': cop[0] += cop_change; break; case 'Y': cop[1] -= cop_change; break; case 'y': cop[1] += cop_change; break; case 'Z': cop[2] -= cop_change; break; case 'z': cop[2] += cop_change; debugn ++; break; case 'p': Phi(angle_change); break; case 'P': Phi(-angle_change); break; case 't': Theta(angle_change); break; case 'T': Theta(-angle_change); break; case 'F': cfoc -= foc_change; break; case 'f': cfoc += foc_change; break; case 's': spacing += .1; break; case 'S': spacing -= .1; break; case 'd': distance += .5; break; case 'D': distance -= .5; break; case 'h': cop[0] = 0.0; cop[1] = 0.0; cop[2] = 0.0; cfoc = foc; break; case 'q': printf("position x y z %f %f %f\n", cop[0], cop[1], cop[2]); printf("distance %f, spacing %f, focal %f\n", distance, spacing, cfoc); break; default: break; }; myDraw(); } void ReadRayDataBase(char *filename) //Read in lightfield information, and set defualt values of variables. { int i,j; char curfile[20], *rayfile = new char[100]; up_dir[0] = 0; up_dir[1] = 1; up_dir[2] = 0; dov[0] = 0; dov[1] = 0; dov[2] = -1; cdov[0] = dov[0]; cdov[1] = dov[1]; cdov[2] = dov[2]; FILE *fin; fin = fopen(filename, "rb"); fscanf(fin, "DataCameraFocalLength %lf\n", &foc); printf ("Focal length is %lf\n", foc); fscanf(fin, "DataCameraSpacing %lf\n", &spacing); printf ("Spacing is %lf\n", spacing); fscanf(fin, "DesiredCameraFocalLength %lf\n", &cfoc); printf ("Desired focal length %lf\n", cfoc); fscanf(fin, "DesiredRenderingDistance %lf\n", &distance); printf("Rendering Plane at %lf\n", distance); fscanf(fin, "DesiredCameraPosition %lf %lf %lf\n", &cop[0], &cop[1], &cop[2]); printf ("Desired camera position %lf %lf %lf\n", cop[0], cop[1], cop[2]); fscanf(fin, "width %d\n", &width); fscanf(fin, "height %d\n", &height); printf ("camera field width and height %d %d\n", width, height); //Now we have to read in the images. double vecz[3] = {0.0,0.0, -1.0}; //Build the projection matrix and invert it. ImageProj = new Projection(vecz, foc); ImageProj->Invert(); while (!feof(fin)) { fscanf(fin, "file %d %d %s\n", &j, &i, &rayfile[0]); //Build a tag for easy image retrieval. sprintf(&curfile[0], "image_%d_%d" , i, j); printf("\tHashing filename %s\n", curfile); //curimage = ImageMap[curfile]; curimage = new Buffer(); ImageMap[curfile] = curimage; //Read the image file. curimage->ReadTGA(rayfile); } image_resx = curimage->get_height(); image_resy = curimage->get_width(); fclose(fin); } void main(int argc, char* argv[]) { char *filename = new char[80]; filename = "shapes4.txt"; //Now read in the the ray Data Base. ReadRayDataBase(filename); /* filename = "shapes4.txt"; char curfile[32]; int curint; ofstream fin; fin.open(filename); for (int i = 0; i <= 15; i ++) { for (int j = 0; j <= 15; j ++) { curint = i*16 + j + 1; sprintf(&curfile[0], "image_%d.tga", curint); fin << "file " << j << " " << i << " " << curfile << endl; } } fin.close(); */ fb = new FrameBuffer(xres, yres); glutInitDisplayMode(GLUT_RGB); glutInitWindowPosition(50, 50); glutInitWindowSize(xres, yres); glutCreateWindow("OpenGL skeleton"); glutKeyboardFunc(KeyInputHandler); glutDisplayFunc(myDisplay); myInit(); srand(time(NULL)); glutMainLoop(); }