I have reinstalled my compiler successfully (finally) and got right back to work. I have created a glModel class as follows:

class glModel
{
    private:
    int numv;
    int numn;
    unsigned int model;
    unsigned int textu;
    unsigned int norma;
    float *vars;
    float *norm;
    public:
    glModel():numv(0),numn(0){vex=new float[1];tex=new float[1];nex=new float[1];}
    glModel& Vertex(float tx, float ty, float x, float y, float z);
    glModel& Compile();
    glModel& Draw();
    glModel& Move(float x, float y, float z){glTranslatef(x,y,z);}
};
glModel &glModel::Vertex(float tx, float ty, float x, float y, float z)
{
    float *temp=new float[numv*5];
    for (int i=0; i<numv*5; i++)
        temp[i]=vars[i];
    delete[]vars;vars=NULL;
    vars=new float[numv*5+5];
    for (int i=0; i<numv*5; i++)
        vars[i]=temp[i];
    vars[numv*5]=tx;
    vars[numv*5+1]=ty;
    vars[numv*5+2]=x;
    vars[numv*5+3]=y;
    vars[numv*5+4]=z;
    delete[]temp;temp=NULL;
    numv++;
    if (numv%3==0)
    {
        temp=new float[numn*3];
        for (int i=0; i<numn*3; i++)
            temp[i]=norm[i];
        delete[]norm;norm=NULL;
        norm=new float[numn*3+3];
        for (int i=0; i<numn*3; i++)
            norm[i]=temp[i];
        delete[]temp;temp=NULL;
        float x1,x2,x3,y1,y2,y3,z1,z2,z3;
        z3=vars[numn*5-1];
        y3=vars[numn*5-2];
        x3=vars[numn*5-3];
        z2=vars[numn*5-6];
        y2=vars[numn*5-7];
        x2=vars[numn*5-8];
        z1=vars[numn*5-11];
        y1=vars[numn*5-12];
        x1=vars[numn*5-13];
        float dx1,dx2,dy1,dy2,dz1,dz2;
        dx1=x2-x1;
        dx2=x3-x1;
        dy1=y2-y1;
        dy2=y3-y1;
        dz1=z2-z1;
        dz2=z3-z1;
        float ox,oy,oz;
        ox=dy1*dz2-dz1*dy2;
        oy=dz1*dx2-dx1*dz2;
        oz=dx1*dy2-dy1*dx2;
        float mag=sqrt(ox*ox+oy*oy+oz*oz);
        ox/=mag;
        oy/=mag;
        oz/=mag;
        norm[numn*3]=ox;
        norm[numn*3+1]=oy;
        norm[numn*3+2]=oz;
        numn++;
    }
    return *this;
}
glModel &glModel::Compile()
{
    float *temp=new float[numv*3];
    for (int i=0; i<numv; i++)
    {
        temp[i*3]=vars[i*5+2];
        temp[i*3+1]=vars[i*5+3];
        temp[i*3+2]=vars[i*5+4];
    }
    glGenBuffers(1,&model);
    glBindBuffer(GL_ARRAY_BUFFER, model);
    glBufferData(GL_ARRAY_BUFFER, numv*3*sizeof(float), temp, GL_STATIC_DRAW);
    delete[]temp;temp=NULL;
    temp=new float[numv*2];
    for (int i=0; i<numv; i++)
    {
        temp[i*2]=vars[i*5];
        temp[i*2+1]=vars[i*5+1];
    }
    glGenBuffers(1,&textu);
    glBindBuffer(GL_ARRAY_BUFFER, textu);
    glBufferData(GL_ARRAY_BUFFER, numv*2*sizeof(float), temp, GL_STATIC_DRAW);
    delete[]temp;temp=NULL;
    glGenBuffers(1,&norma);
    glBindBuffer(GL_ARRAY_BUFFER, norma);
    glBufferData(GL_ARRAY_BUFFER, numn*3*sizeof(float), norm, GL_STATIC_DRAW);
    return *this;
}
glModel& glModel::Draw()
{
    glBindBuffer(GL_ARRAY_BUFFER, model);
    glVertexPointer(3, GL_FLOAT, 0, NULL);
    glBindBuffer(GL_ARRAY_BUFFER, textu);
    glTexCoordPointer(2, GL_FLOAT, 0, NULL);
    glBindBuffer(GL_ARRAY_BUFFER, norma);
    glNormalPointer(GL_FLOAT, 0, NULL);
    glDrawArrays(GL_TRIANGLE,0,numv);
    return *this;
}

This class is designed to be used simply by the 3d model designers that are helping me out (I suck at making 3D model, they suck at programming), therefore it can be used simply like this:

glModel model;
model.Vertex(a,b,c,d,e).Vertex(a,b,c,d,e).Vertex(a,b,c,d,e).Compile().Draw();

The problem is that this format and the use of VBOs makes it dang near impossible to debug. I am getting a problem (Windows Error Report) and I would like to see what is wrong with my code. Thank you in advance.

There are several things wrong with your code.

First, you have completely redundant pieces of code that clearly abuse dynamic memory allocation. As in this snippet:

float *temp=new float[numv*5];
    for (int i=0; i<numv*5; i++)
        temp[i]=vars[i];
    delete[]vars;vars=NULL;
    vars=new float[numv*5+5];
    for (int i=0; i<numv*5; i++)
        vars[i]=temp[i];
    delete[]temp;temp=NULL;

You allocate, copy, delete, allocate, copy and delete. One of those repetition is useless. Here is the proper way to do it:

float *temp=new float[numv*5 + 5]; //allocate with new size.
    for (int i=0; i<numv*5; i++)
        temp[i]=vars[i];               //copy.
    delete[]vars;vars=NULL;            //delete old data.
    vars = temp;                       //swap pointers.
    //then, fill the values of the new element.

Additionally, the use of a C-style array is very inefficient. You should use std::vector instead, or, at least, preallocate enough memory ahead of time.

Secondly, in Compile(), you don't need to build two new arrays for the vertices and tex-coords that are already interleaved in your "vars" array. OpenGL already has functionality to handle this kind of interleaved arrays (which is standard practice too). This is handled using the "stride" parameter in all the functions. So, all you need in Compile() to send your vertices and tex-coords is the following:

glGenBuffers(1,&model);
    glBindBuffer(GL_ARRAY_BUFFER, model);
    glBufferData(GL_ARRAY_BUFFER, numv * 5 * sizeof(float), vars, GL_STATIC_DRAW); //notice '5' and 'vars'

And then, in your Draw() function, you can simply do:

glBindBuffer(GL_ARRAY_BUFFER, model);
    glVertexPointer(3, GL_FLOAT, 5 * sizeof(float), 0);
    glTexCoordPointer(2, GL_FLOAT, 5 * sizeof(float), 3 * sizeof(float) );

The last two parameters first tell OpenGL to skip 5 floats between each vertex, and then tell it to start at an offset of 3 floats from the start of the array (for the tex-coords). This will avoid you any unnecessary creation of temporary arrays and copying, then sending to OpenGL and then deleting, which is incredibly wasteful (which is why this "stride" functionality is standard everywhere in OpenGL and many other libraries that do similar things).

Finally, lets get to the real issue here, the one that makes your program crash. When using OpenGL VBOs, you have two choices about things like normals and colors, either you have one for all vertices, or you have one for each vertex. What you did was give OpenGL one normal vector for each triangle. That's not how it goes. When you do DrawArrays(), OpenGL will expect to find 'numv' normal vectors and will try to read 'numv' normal vectors, even though you actually have 'numn' normals. You will end up reading significantly past the size of the normal vector array, and it will crash the program. You need to have one normal vector per vertex in your model (even if you simply set the normal vectors of all the points on one triangle to the same vector-value).

With all those things in mind, this is probably going to work much better:

class glModel
{
  private:
    int numv;
    int numn;
    unsigned int model;
    
    struct VertDef {
      float v[3];
      float t[2];
      float n[3];
    };
    VertDef *vars;
  public:
    glModel() : numv(0), numn(0), model(0), vars(NULL) { }
    glModel& Vertex(float tx, float ty, float x, float y, float z);
    glModel& Compile();
    glModel& Draw();
    glModel& Move(float x, float y, float z){glTranslatef(x,y,z);} //IMO, this function is entirely useless.
};

glModel& glModel::Vertex(float tx, float ty, float x, float y, float z)
{
    VertDef* temp = new VertDef[numv + 1];
    for (int i = 0; i < numv; ++i)
        temp[i] = vars[i];
    delete[] vars; vars = temp; temp = NULL;
    vars[numv].t[0] = tx;
    vars[numv].t[1] = ty;
    vars[numv].v[0] = x;
    vars[numv].v[1] = y;
    vars[numv].v[2] = z;
    vars[numv].n[0] = 0.0;
    vars[numv].n[1] = 0.0;
    vars[numv].n[2] = 0.0;
    ++numv;
    if (numv%3==0)
    {
        float z3 = vars[numv-1].v[2];
        float y3 = vars[numv-1].v[1];
        float x3 = vars[numv-1].v[0];
        float z2 = vars[numv-2].v[2];
        float y2 = vars[numv-2].v[1];
        float x2 = vars[numv-2].v[0];
        float z1 = vars[numv-3].v[2];
        float y1 = vars[numv-3].v[1];
        float x1 = vars[numv-3].v[0];
        float dx1 = x2 - x1; float dx2 = x3 - x1;
        float dy1 = y2 - y1; float dy2 = y3 - y1;
        float dz1 = z2 - z1; float dz2 = z3 - z1;
        float ox = dy1 * dz2 - dz1 * dy2;
        float oy = dz1 * dx2 - dx1 * dz2;
        float oz = dx1 * dy2 - dy1 * dx2;
        float mag=sqrt(ox*ox+oy*oy+oz*oz);
        ox/=mag;
        oy/=mag;
        oz/=mag;
        vars[numv-1].n[0] = ox;
        vars[numv-1].n[1] = oy;
        vars[numv-1].n[2] = oz;
        vars[numv-2].n[0] = ox;
        vars[numv-2].n[1] = oy;
        vars[numv-2].n[2] = oz;
        vars[numv-3].n[0] = ox;
        vars[numv-3].n[1] = oy;
        vars[numv-3].n[2] = oz;
        numn++;
    }
    return *this;
}
glModel &glModel::Compile()
{
    glGenBuffers(1, &model);
    glBindBuffer(GL_ARRAY_BUFFER, model);
    glBufferData(GL_ARRAY_BUFFER, numv*sizeof(VertDef), vars, GL_STATIC_DRAW);
    return *this;
}
glModel& glModel::Draw()
{
    if(numn == 0)
      return *this;
    glBindBuffer(GL_ARRAY_BUFFER, model);
    glEnableClientState(GL_VERTEX_ARRAY);
    glVertexPointer(3, GL_FLOAT, sizeof(VertDef), 0);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);
    glTexCoordPointer(2, GL_FLOAT, sizeof(VertDef), &(vars[0].t[0]) - &(vars[0].v[0]));
    glEnableClientState(GL_NORMAL_ARRAY);
    glNormalPointer(GL_FLOAT, sizeof(VertDef), &(vars[0].n[0]) - &(vars[0].v[0]));
    glDrawArrays(GL_TRIANGLE, 0, 3 * numn); //draw only vertices that are part of a triangle.
    return *this;
}

Thank you, still working on the extra implementation so haven't tested yet, but I am hopeful.

P.S.: IMO line 19 is useless but as a quote from my model makers "Model.Draw().Move().Draw().Move().Draw() will make drawing recurring shapes easier and faster!"

I got a problem, it seems we are using different headers as my headers define glTexCoord as:

GLAPI void APIENTRY glTexCoordPointer( GLint size, GLenum type, GLsizei stride, const GLvoid *ptr );

This means that your code uses invalid arguments. How do I modify your code to fix this?

I was just illustrating the idea. When I throw together a piece of code in 5 min. to illustrate "how to" fix it, don't expect the code to compile directly, you have to use your own wits too. I guess you would have to define the last parameters to the gl**Pointer() functions as so for example:

glTexCoordPointer(2, GL_FLOAT, sizeof(VertDef), 
      (char*)(NULL) + sizeof(float) * (&(vars[0].t[0]) - &(vars[0].v[0])));
    glNormalPointer(GL_FLOAT, sizeof(VertDef), 
      (char*)(NULL) + sizeof(float) * (&(vars[0].n[0]) - &(vars[0].v[0])));

The above doesn't fundamentally change anything, this was just a small omission on my part, you should learn to fix those kinds of trivial problems on your own, otherwise you will have a long way to go.

BTW: "it seems we are using different headers" ... I am not "using" anything, I'm not compiling your stuff and trying it out, nor did I do that with the code I posted, I'm just suggesting fixes and improvements, you're the one who has to do 99% of the work. Which includes reading all the required resources, and being able to fix small issues like this one by yourself.

I understand that, the issue is that my glTexCoordPointer does not seem to take info from the GL_ARRAY_BUFFER as it requires I specify an array. Also it does not allow me to give it an offset. I cannot think of how to do this without using multiple temporary arrays.

Read the documentation carefully. For instance, under glTexCoordPointer, you will find:

If a non-zero named buffer object is bound to the GL_ARRAY_BUFFER target (see glBindBuffer) while a texture coordinate array is specified, pointer is treated as a byte offset into the buffer object's data store.

In plain english, it means that the last parameter to glTexCoordPointer is treated not as a pointer (even though that is its type) but as an offset from the start of the array in the buffer that is currently bound (in your case, the 'model' buffer).

And:

To enable and disable a texture coordinate array, call glEnableClientState and glDisableClientState with the argument GL_TEXTURE_COORD_ARRAY.

And:

glTexCoordPointer updates the texture coordinate array state of the active client texture unit, specified with glClientActiveTexture.

glInterleavedArrays is also an option.

Edited 5 Years Ago by mike_2000_17: n/a

Ok, I understand (I think and hope) I got rid of all compiling errors, but I am still plagued by the error report. What is wrong now?

class glModel
{
    private:
    int numv;
    unsigned int model;
    struct glVertexStructure
    {
        float vert[3];
        float text[2];
        float norm[3];
    };
    glVertexStructure *vars;
    public:
    glModel():numv(0),model(0),vars(0){}
    glModel &Vertex(float tx, float ty, float x, float y, float z);
    glModel &Compile();
    glModel &Draw();
    glModel &Move(float x, float y, float z){glTranslatef(x,y,z);return *this;}
};




glModel &glModel::Vertex(float tx, float ty, float x, float y, float z)
{
    glVertexStructure *temp=new glVertexStructure[numv+1];
    for (int i=0; i<numv; i++)
        temp[i]=vars[i];
    delete[]vars;
    vars=temp;
    temp=NULL;
    vars[numv].vert[0]=x;
    vars[numv].vert[1]=y;
    vars[numv].vert[2]=z;
    vars[numv].text[0]=tx;
    vars[numv].text[1]=ty;
    vars[numv].norm[0]=0.0;
    vars[numv].norm[1]=0.0;
    vars[numv].norm[2]=0.0;
    numv++;
    if (numv%3==0)
    {
        float &zz0=vars[numv-3].vert[2];
        float &zz1=vars[numv-2].vert[2];
        float &zz2=vars[numv-1].vert[2];
        float &yy0=vars[numv-3].vert[1];
        float &yy1=vars[numv-2].vert[1];
        float &yy2=vars[numv-1].vert[1];
        float &xx0=vars[numv-3].vert[0];
        float &xx1=vars[numv-2].vert[0];
        float &xx2=vars[numv-1].vert[0];
        float ox=(yy1-yy0)*(zz2-zz0)-(zz1-zz0)*(yy2-yy0);
        float oy=(zz1-zz0)*(xx2-xx0)-(xx1-xx0)*(zz2-zz0);
        float oz=(xx1-xx0)*(yy2-yy0)-(yy1-yy0)*(xx2-xx0);
        float mag=sqrt(ox*ox+oy*oy+oz*oz);
        ox/=mag;
        oy/=mag;
        oz/=mag;
        vars[numv-1].norm[0]=vars[numv-2].norm[0]=vars[numv-3].norm[0]=ox;
        vars[numv-1].norm[1]=vars[numv-2].norm[1]=vars[numv-3].norm[1]=oy;
        vars[numv-1].norm[2]=vars[numv-2].norm[2]=vars[numv-3].norm[2]=oz;
    }
    return *this;
}
glModel &glModel::Compile()
{
    glGenBuffers(1,&model);
    glBindBuffer(GL_ARRAY_BUFFER, model);
    glBufferData(GL_ARRAY_BUFFER, numv*sizeof(glVertexStructure), vars, GL_STATIC_DRAW);
    return *this;
}
glModel &glModel::Draw()
{
    int numt=numv-numv%3;
    if (numt==0)
        return *this;
    glBindBuffer(GL_ARRAY_BUFFER,model);
    glEnableClientState(GL_VERTEX_ARRAY);
    glVertexPointer(3,GL_FLOAT,sizeof(glVertexStructure),0);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);
    glTexCoordPointer(2,GL_FLOAT,sizeof(glVertexStructure),(void*)((char*)(NULL)+sizeof(float)*(&(vars[0].text[0])-&(vars[0].vert[0]))));
    glEnableClientState(GL_NORMAL_ARRAY);
    glNormalPointer(GL_FLOAT,sizeof(glVertexStructure),(void*)((char*)(NULL)+sizeof(float)*(&(vars[0].norm[0])-&(vars[0].vert[0]))));
    glDrawArrays(GL_TRIANGLES, 0, 3*numt);
    return *this;
}
This article has been dead for over six months. Start a new discussion instead.