I have been having a serious problem relating to memory allocation and arrays/vectors in C++ using the G++ compilier on both a windows machine(Dev-C++) and linux(debian). I would post all the code, but it is proprietary technology(I'm a Graduate Student) and therefore I can't share it openly without permission.

That said, the part of the code that is having a problem is not the proprietary part, so I will give a redacted version of what I am doing and explain the error I am getting.


Basically, the program has 2 classes, which we will Trainer and Runner. The main function merely calls an instance of Trainer once with certain parameters, which in turn calls one instance of Runner with certain parameters.


The redacted header files for each look something like this:

#include <iostream>
#include <fstream>
#include <cstdlib>
#include <iomanip>
#include <string.h>
#include <time.h>
#include <math.h>
#include <vector>

using namespace std;

class Runner {

private:
   int num_gates;                      
   int num_models;                     
   int num_trainings;                  
   int total;



  vector< vector<float> > training_data;              //array of training pairs


public:

       Runner();
   Runner(int n_gates, int n_models, int n_trainings, char *train_file);             //constructor
   ~Runner();                                     //destructor

   float compute_output();

   vector< vector<float> > load_training_data();	//loads training data

   void setsigmas(vector<float> sig);
#include "Runner.h"


class Trainer {

private:
             
   vector<vector<float> > sig_data;             

   int population_size;     
   int num_generations;     //number of generations
  

   Runner g;      
   int num_trainings;       //number of training pairs
   int num_gates;           //number of gate variables
   int num_models;          //number of models

   void save_pop();
   vector<vector<float> > load_pop();

   vector<float> mutcross_sig;

   void mutcross();

Basically, each class has a 2D Vector which holds training set data and paramter data respectively. These do not change in size once filled, and the training data never changes once loaded.

Here is the constructor and load data for the Runner class:

#include "Runner.h"

Runner::Runner(){
       
                           }

Runner::Runner(int n_gates, int n_models, int n_trainings){


   num_gates = n_gates;
   num_models = n_models;
   num_trainings = n_trainings;
   total = n_gates + n_models + 1;


 try{
training_data = vector<vector<float> >(n_trainings, vector<float> (total) );
}
 catch (bad_alloc) {
  cout << "PROBLEM";
 }


 training_data = load_training_data();


}


vector<vector<float> > Runner::load_training_data()
{

   ifstream c("test1.txt");
   float abc;
   
   vector< vector<float> > t_data;
   vector<float> def;

      for(int i = 0; i < num_trainings; i++){

		for(int j = 0; j < total; j++){

         	c >> abc;
             def.push_back(abc);

              }

              t_data.push_back(def);
              def.clear();

     }
    return t_data;
}

This is the constructor and load data for the Trainer class:

#include "Trainer.h"
Trainer::Trainer(int pop_size, int num_gen, int n_trainings, int n_gates, int n_models){
      
      


     total = n_gates +1;  
     float t_sig[total-1];

     top=1000;
   
   
char savefile[40];         
float holdB;
      int nogo=0;

   //set the parameters
   population_size = pop_size;
   num_generations = num_gen;
   num_trainings = n_trainings;
   num_gates = n_gates;
   num_models = n_models;

   
   cout <<"Save To Filename" << endl;
   cin >> savefile; 
   pop.open(savefile);
  gen_report.open("abctre4.txt");


 
    g = Runner(num_gates, num_models, num_trainings);
	 
   
   int load=0;
   cout << "Load Data? (1=YES, 0=NO)";
   cin >> load;
   
   if(load == 1){

           sig_data = load_pop();
              gen_best=0;
              gen_avg=0;
           
           }
   else{
   
   for(int t=0;t<pop_size;t++){
   sig_data.push_back(vector<float>());
}
   for(int y=0; y < pop_size; y++){

           for(int j=0; j < num_gates; j++){
                   holdB=random(min_value, max_value);

                              try{
                 sig_data[y].push_back(holdB);
                 }
                 catch(bad_alloc){
                                  cout << " MEMLIMIT ";
                                  try{
                 sig_data[y].push_back(holdB);
                 }
                 catch(bad_alloc){
                                  cout << " MEMLIMIT_AGAIN ";
                                  nogo=1;
                                  }
                                  }
                 t_sig[j]=holdB;
                 }

                g.setsigmas(t_sig);
                holdB=g.crossvalidation_run(0);

             sig_data[y].push_back(holdB);

                       
             if(holdB <= top){
                    gen_best = y;
                    top = holdB;
                    }   
                    
                 if(nogo == 0){
                       gen_avg = gen_avg + holdB;                                        
                    }
                    else{
                    gen_avg = gen_avg + (gen_avg/y+1);
                       nogo = 0;        
                    }
                    }

}

                    for(int x=0; x < num_gates; x++){
                              try{
                 mutcross_sig.push_back(0);
                 }
                 catch(bad_alloc){
                                  cout << " MUTMEMLIMIT ";
                                  try{
                 mutcross_sig.push_back(0);
                 }
                 catch(bad_alloc){
                                  cout << " MUTMEMLIMIT_AGAIN ";
                                  }
                                  }
                                  }
              

   
}
/***************************************************************/

vector<vector<float> > Trainer::load_pop()
{

char loadfile[40];

cout <<"Load From Filename" << endl;

cin >> loadfile; 
 	ifstream inpop(loadfile);
   float abc;
   vector< vector<float> > s_data;
   vector<float> indef;
      for(int i = 0; i < population_size; i++){

		for(int j = 0; j < total; j++){

         	inpop >> abc;
             indef.push_back(abc);

              }

              s_data.push_back(indef);
              indef.clear();
     }
     inpop.close();
    return s_data;
}

The Trainer then goes through iterations of setting certain paramters in the Runner and running it, then mutating the parameters based on the output from that process.

Unfortunately, regardless of if I try to load the data from a file or randomly generate it, it will often fire off bad allocation errors or crash the program altogether. The same thing happens if I use float ** and "new" to create the 2D Array. Any ideas?

Before I tell you what I think is wrong I am going to tell you what I think has lead you to not being able to solve this.

First: the layout. It is a mess. Code has to be tidy. I understand you are most likely not a CS grad but please take some pride in your code. It should be the same as your lab work etc.

Second: the try blocks. Put ONE round the whole lot. You then refine later.

Third:

catch(bad_alloc)
{
    cout << " MUTMEMLIMIT ";
     try{
        mutcross_sig.push_back(0);
        }
}

Are you insane - What do you think is going to happen now! You have freed no large block of memory, (since the code above is singular addition.) This is just a 100% mess.

Fourth: Include string.h/math.h is about 10 years out of date.

Five: using namespace std; as a global is asking for trouble.

Six: This doesn't compile, it is not that you just couldn't be bothered to copy with the mouse and that you forgot whole section of classes. Trainer refers directly to stuff in runner etc.

Seven: What is this loading string, then using char[40] to get a filename...

Eight: How many times do you create a large 2d vector THEN create another version in a copy. e.g. vector<vector<float> > Runner::load_training_data()

Nine:

training_data = vector<vector<float> >(n_trainings, vector<float> (total) );
training_data = load_training_data();

So excellent create something into training_data and then forget it and write something new.

Ten: You are using std::vector (why I don't know) and then decide to mix in float t_sig[total-1]

Eleven: Well you couldn't even be bothered to write a short int main() so we can see how to use the code.

Twelve: You decide not to give us runner's destructor but you are having memory allocation/deallocation/corruption problems??

Anyway now you have read this far...... Well I had no problem running it after I fixed all the problems. If you wish to post better code and an example that we can all run and shows a problem you might get some more success. The problem is not directly here.

In short: stop treating the community that are normally more than happy to help, particularly interesting stuff like genetic learning code with such little respect.

FINALLY: DON'T DOUBLE POST ON TWO FORUMS AT THE SAME TIME.
http://www.neowin.net/forum/index.php?showtopic=734718&pid=590566894&st=0&#entry590566894

Thanks for the help. As for why the code won't compile or anything like that, as I wrote, "I would post all the code, but it is proprietary technology". I basically had to cut a lot out. Seriously, about half the code deals with or references something that no one outside my department and a few people at a certain cancer research center know about. As for why it is a little shoddy/old looking, the original code was written in 1998-99, I have had to rewrite a lot of it to get it to work. Originally it was just one long file with crap going everywhere, this is a lot cleaner than before.

As for double posting, I didn't realize it was an issue. I just assumed more forums would give more people a chance to look at it.

Sorry but the "but it is proprietary technology" is complete junk.
What made me irritated, was that you couldn't be bothered to post a working example of the problem. Sure if you don't want to post proprietry code then just base your example on it. BUT MAKE IT WORK. MAKE IT COMPLETE

You have learnt very little about your problem and we have gained no new insight into stl problems/interesting code etc.

What you are doing using an old code like that especially in the field of genetic algorithms, you are going to suffer, since a lot has been discovered since 2000.

Additionally, if you are using proprietary code for you Phd thesis, expect two things (a) that an external examiner will want to see the code -- but then because he is old etc will want to give it to his grad student etc. (b) that the next person who hires you will want to help eductate his/her group with your techniques. Both of these are going to be very very unpleasent experiences with your proprietary code.

Sorry but the "but it is proprietary technology" is complete junk.
What made me irritated, was that you couldn't be bothered to post a working example of the problem. Sure if you don't want to post proprietry code then just base your example on it. BUT MAKE IT WORK. MAKE IT COMPLETE

You have learnt very little about your problem and we have gained no new insight into stl problems/interesting code etc.

What you are doing using an old code like that especially in the field of genetic algorithms, you are going to suffer, since a lot has been discovered since 2000.

Additionally, if you are using proprietary code for you Phd thesis, expect two things (a) that an external examiner will want to see the code -- but then because he is old etc will want to give it to his grad student etc. (b) that the next person who hires you will want to help eductate his/her group with your techniques. Both of these are going to be very very unpleasent experiences with your proprietary code.

Sorry it wasn't complete. The code was theorized by my adviser and tested around 1999, but computers of the time weren't fast enough AND the problems weren't big enough for this method to gain notice. Now this theory, along with some useful additions, is both plausible and there are problems which can utilize its full capabilities. There is nothing like this out there now, and the professor I work with is one of the founders of the field of Machine Intelligence(he is quite old :) ).

It isn't actually a GA at all, though it uses Differential Evolution as a training method. To give a very basic understanding without giving away too much, it is a method that combines various classifiers(BPNN, SVMs, etc) in a way that is far better than simply averaging their results. There is a lot more to it than that, but we are planning to publish several papers on it soon, hence not giving away too many details.

I am only a first year Graduate Student(going for my Master's, then PhD after that). My undergrad is in Bioengineering, and my CS background is mostly what I've taught myself. The code itself is merely a means to test this theory, after which CS students could rewrite the code to be cleaner looking, faster running, and have a GUI. My term "proprietary" is more in the sense that we don't want people to beat us in publications somehow by disseminating the theory right now. It is the first of many things I will work on, and in a year or two I imagine that this theory/code will be available if someone wants to use it.

Sorry to be such a pain though, I will try to clean up things better next time I post a question.

I decided to go back to 2D dynamic arrays and have replaced the proprietary stuff with a simple mathematical equation. This should compile fine and if you run it a couple times, it will crash. Please help if you can!

Project.cpp

#include "Trainer.h"


int main(int argc, char * argv[])

{
   int n_gates=14;
   int n_models=3;
   int n_trainings=89;

   int pop_size=500;                 
   int num_gen=100;                 
   float min=0;
   float max=50;
   float cross_factor=.5;               
   float mut_factor=.5;
   

    Trainer d(pop_size, num_gen, min, max, cross_factor, mut_factor, n_trainings, n_gates, n_models);
     
     d.run();

}

Trainer.cpp

#include "Trainer.h"
Trainer::Trainer(int pop_size, int num_gen, float min, float max, float cross_factor, float mut_factor, 
int n_trainings, int n_gates, int n_models){
      
      


     total = n_gates +1;  
     float t_sig[total-1];

     top=1000;
   gen_best=0;
   gen_avg=0;
   
   
   char savefile[40];         
   float holdB;
      int nogo=0;
   //set the parameters
   population_size = pop_size;
   num_generations = num_gen;
   min_value = min;
   max_value = max;   
   crossover_factor = cross_factor;
   mutation_factor = mut_factor;

   
   cout <<"Save To Filename" << endl;
   cin >> savefile; 
   pop.open(savefile);
  gen_report.open("abctre4.txt");
   //set the Runner and training data
   num_trainings = n_trainings;
   num_gates = n_gates;
   num_models = n_models;

g = Runner(num_gates, num_models, num_trainings);

try{
   sig_data= (float **) malloc (pop_size * sizeof(float *));
   for(int i=0;i<pop_size;i++){
           sig_data[i] = (float *) malloc (total * sizeof(float));
           }
}
catch(bad_alloc){
                 "SIGNOOOO";
                 }       




   initialize_random_generator(); 
   

  
     
           for(int y=0; y < pop_size; y++){
                 for(int j=0; j < num_gates; j++){
                      holdB=random(min_value, max_value);
                      t_sig[j]=holdB;
                      sig_data[y][j] = holdB;   
                 }
                

                 g.setparameters(t_sig);

                 
                 holdB=g.crossvalidation_run(0);


                 sig_data[y][total-1] = holdB;
                                    
                 if(holdB <= top){
                    gen_best = y;
                    top = holdB;
                    }   
                 gen_avg = gen_avg + holdB;                                      
                 }

          

}
/***************************************************************/

void Trainer::save_pop(){




     cout << " SAVING ";
     cout << "SAVE STREAM OPEN ";
     for(int v=0; v < population_size; v++){
              for(int f=0; f < total; f++){
              pop << sig_data[v][f] << " ";
              }
              pop << endl;
     cout << "SAVED";
     }
}

/****************************************************************************/

Trainer::~Trainer()
{
 delete sig_data;
}

/****************************************************************************/

void Trainer::run(){
     

     mut_data = (float *) malloc(total*sizeof(float));

   float replace = 1000;
    cout << "GEN: 0 " << endl;
    report(0, gen_best, gen_avg);

   for(int q=1; q < num_generations+1; q++){
           gen_best=0;
           gen_avg=0;
           cout << "GEN:" << q << endl;
                for(int i=0; i < population_size; i++){
                        mutcross(i);                    
                        g.setparameters(mut_data);                     
                        replace = g.crossvalidation_run(0);
                        
                        if(sig_data[i][num_gates] > replace){
                                                  cout << "MUT" << endl;
                                   for(int p=0; p < num_gates; p++){
                                           sig_data[i][p] = mut_data[p];
                                                  }
                                   sig_data[i][num_gates] = replace;
                                   }
                        if(sig_data[i][num_gates] < sig_data[gen_best][num_gates]){
                                                  gen_best = i;
                                                  cout << "BEST SWITCH: " << gen_best << endl;                                                  
                                                  }
                        gen_avg = gen_avg + sig_data[i][num_gates];
                        
                        }
                
                        
                report(q, gen_best, gen_avg);
                }
   save_pop();             
   gen_report.close();

}


/****************************************************************************/

void Trainer::mutcross(int cur){
      int base, diff1, diff2;
      float x=0;
      float y=1;   

   
      random(0, population_size-1, cur, base, diff1, diff2);
      for(int r=0; r < num_gates; r++){
	     if(random(x, y) < crossover_factor){
               mut_data[r] = sig_data[cur][r];
		}
		else{
		mut_data[r] = sig_data[base][r] + (mutation_factor * (sig_data[diff1][r] - sig_data[diff2][r]));
               }
	}


}

/****************************************************************************/

void Trainer::report(int gen, int best, float avg){


     float outAz=0;
     float temp_sig[num_gates];

     if(gen == 0){
            gen_report << "Gen     Avg-mse     Smallest-mse     Az" << endl;
            }
     for(int s=0; s < num_gates; s++){     
              temp_sig[s] = sig_data[best][s];
              }
              
     g.setparameters(temp_sig);

     outAz = g.crossvalidation_run(1);       
     
     gen_report << setw(3) << gen << "  ";
          
     gen_report << setw(10) << (avg / population_size) << "  ";
          
     gen_report << setw(10) << sig_data[best][num_gates] << "  ";
          
     gen_report << setw(10) << outAz << endl;
     
      gen_report << setw(10) << "BEST: " << best << endl;
     
}

/****************************************************************************/

void Trainer::initialize_random_generator()
{
   for (unsigned long i = 0; i < 500000; i++) ; //start the system clock
   srand((unsigned)time(NULL));                  //initialize random seed 
}

/****************************************************************************/

float Trainer::random()
{
   return rand()/(float)32767;          //32767 is the maximum random integer
}

/****************************************************************************/

float Trainer::random(float low, float high)
{
   return (low + (high - low)*random());
}

/****************************************************************************/

int Trainer::random(int low, int high)
{
   return (low + (int)((high - low + 1) * random()));
}

/***************************************************************************/

int Trainer::random(int low, int high, int exclude, int &one, int &two, int &three)
{
   do
   {
      one = random((int)low, (int)high);   
   }
   while (one == exclude);

   do
   {
      two = random((int)low, (int)high);   
   }
   while (two == exclude || two == one);

   do
   {
      three = random((int)low, (int)high);   
   }
   while (three == exclude || three == one || three == two);
   
   return 0;			
}

Trainer.h

#include "Runner.h"


class Trainer {

private:
                   
   float **sig_data;

   int population_size;     //size of population
   int num_generations;     //number of generations
   float min_value;         //minimum string value
   float max_value;         //maximum string value
   float crossover_factor;  //crossover factor
   float mutation_factor;   //mutation factor



   ofstream pop;   
   ofstream gen_report;
   
   float top;
   int gen_best;
   float gen_avg;
   int total;
   Runner g;          //Runner Project

   int num_trainings;       
   int num_gates;          
   int num_models;         

   void save_pop();
   void load_pop();
   void initialize_random_generator(); //initializes the random seed
   float random();                     //returns a real between 0 and 1
   float random(float low, float high);//returns a real between low and high
   int random(int low, int high);      //returns an int between low and high
   int random(int low, int high, int exclude, int &one, int &two, int &three);
                                       //returns three integers beween low and
   float *mut_data;                                       //high excluding exclude
   void mutcross(int cur);


public:
       

       
   Trainer(int pop_size, int num_gen, float min, float max, float cross_factor, float mut_factor, int n_trainings, int n_gates, int n_models);   //constructor
       
   ~Trainer();                                  //destructor



   void run();    //generates successive generations

   void report(int gen, int best, float avg);
};

Runner.cpp

#include "Runner.h"

Runner::Runner(){         

                           }

Runner::Runner(int n_gates, int n_models, int n_trainings){

   num_gates = n_gates;
   num_models = n_models;
   num_trainings = n_trainings;

   total = n_gates + n_models + 1;
try{
   training_data = (float **) malloc (n_trainings * sizeof (float *));
   for(int i=0;i<n_trainings;i++){
           training_data[i] = (float *) malloc (total * sizeof(float));
           }}
           catch(bad_alloc){
                            
                            "WHOOPS";}



   load_training_data();



   parameters = (float *) malloc (num_gates * sizeof(float));             
   errors = (float *) malloc (num_gates * sizeof(float));              
   weights = (float *) malloc (num_gates * sizeof(float));          


}



/**************************************************************************/

Runner::~Runner()
{

    //delete []parameters;
	delete []errors;
   delete []weights;

       
   

}

    

/**************************************************************************/

float Runner::compute_output(int exclude)
{

float temp=0;
         temp = training_data[exclude][num_gates + num_models] - training_data[exclude][num_gates]/parameters[exclude];
 


   return temp;
   

}


/***************************************************************************/

void Runner::load_training_data()
{

   ifstream c("abc.txt");
  

      for(int i = 0; i < num_trainings; i++){
		for(int j = 0; j < num_gates + num_models + 1; j++){
        c >> training_data[i][j];
        }
     }

}


/***************************************************************************/


void Runner::setparameters(float *sig){
     
      for (int i = 0; i < num_gates; i++)
      {
       parameters[i] = sig[i];
      }
     }

/***************************************************************************/



float Runner::getAz(float *Z_pred, float *Z_actual, int NumTrain){

       
     
       float Az = 0.0; 


//*** Calculate Hit Rate and FAR


      int maxbinsize = NumTrain;

      int numofcuts = 40;
      int cutoff=0;
      float truepos=0;
      float trueneg=0;
      float falsepos=0;
      float falseneg=0;
      float ROCStore[numofcuts+1][2];
      float cutsize=0;
      float delta = 20;


      while(cutsize <= 1){
                    
      truepos=0;
      trueneg=0;
      falsepos=0;
      falseneg=0;
      

//We find the False Positive, True Positive, False Negative, and True Negative for each cutoff point, in this case increments of .05 from -1 to 1      
      for(int p = 0; p < maxbinsize; p++){

            if (Z_actual[p] == 1){
                 if(Z_pred[p] > cutsize){
                              truepos++;
                              }           
                 else if(Z_pred[p] < cutsize){
                              falseneg++;
                              }
                 else if(Z_pred[p] == cutsize){ //err on side of positive
                              truepos++;
                              }                               
                 }     
             else if (Z_actual[p] == -1){
                 if(Z_pred[p] > cutsize){
                              falsepos++;
                              }           
                 else if(Z_pred[p] < cutsize){
                              trueneg++;
                              }                            
                 else if(Z_pred[p] == cutsize){ //err on side of positive
                              falsepos++;
                              }           
                 }
                                                            
       }
       
      
                                
       float sensitivity=0;
       float specificity=0;
       float FAR=0; //False Alarm Rate   
       
       
                 

//We find the sensitivities and specificities for each cutoff point and then put them into an array
       if((truepos + falseneg) > 0){
                  sensitivity = (truepos / (truepos + falseneg));   
                                                                      
            }
       else{
                  sensitivity = 1;    
                  }
				  
       if((trueneg + falsepos) > 0){
                  
                  specificity = (trueneg / (trueneg + falsepos));                                                                 
            }
       else{
                  specificity = 1;    
                  } 

       FAR = 1-specificity;                   
       ROCStore[cutoff][1] = sensitivity;                   
       ROCStore[cutoff][0] = FAR;
                
                
                         
       cutoff++;
       cutsize = (cutoff / delta) - 1;    

       }            



       Az=0;
       int notset=1;
       int partAzStart=0;
       float getslope=0;


//Sort by FAR

       int sortB;
       float index=0;
       float indexB=0;       
       
       for(int sortA=1; sortA <= numofcuts+1; sortA++){
               
       index = ROCStore[sortA][1];
       indexB = ROCStore[sortA][0];  
           
       sortB = sortA;
       
       while ((sortB > 0) && (ROCStore[sortB-1][0] >= indexB))
       {
             ROCStore[sortB][1] = ROCStore[sortB-1][1];
             ROCStore[sortB][0] = ROCStore[sortB-1][0];
             sortB = sortB-1;
             }
             
       ROCStore[sortB][1] = index;      
       ROCStore[sortB][0] = indexB;
                                 
       }
// End Sort

 
       for(int h=0; h < numofcuts+1; h++){
              
       Az = Az + (((ROCStore[h+1][0] - ROCStore[h][0])*(ROCStore[h][1] + ROCStore[h+1][1]))/2);
       }
       
//  Partial Az disabled to save computation time
/*          if (ROCStore[h+1][1]>.9 && notset == 1){
                partAzStart=h;
                notset=0;
                }
                }

                
       float partROC[numofcuts-partAzStart][2];
       
       getslope = ((ROCStore[partAzStart+1][1]-ROCStore[partAzStart][1])/(ROCStore[partAzStart+1][0]-ROCStore[partAzStart][0]));
       partROC[0][0] = ROCStore[partAzStart][0] + ((.9-ROCStore[partAzStart][1])/getslope);
       partROC[0][1] = .9;
       
       for(int q=0; q < (numofcuts - partAzStart); q++){
               partROC[q][0] = ROCStore[partAzStart+q][0];             
               partROC[q][1] = ROCStore[partAzStart+q][1];
               }    
     
       for(int r=0; r < (numofcuts - partAzStart - 1); r++){

               partAz = partAz + ((partROC[r+1][0] - partROC[r][0]) * ((partROC[r][1] + partROC[r+1][1])/2));
               }           
  */

  if(Az > 1){
        return 0;
        }
  return Az;

}



/***************************************************************************/
/****** 1-holdout CrossValidation -- By Clayton T. Morrison - 5/14/99 *******/

float Runner::crossvalidation_run(int doAz)

{

	float predict[num_trainings];
	float actual[num_trainings];
	float perform;
	

	for(int i=0; i < num_trainings; i++)
	        {                 
                 predict[i] = compute_output(i);    
                 actual[i] = training_data[i][num_gates + num_models];
                 }   
	
    if (doAz == 0){
    	      perform = getMSE(predict, actual, num_trainings);
        }
    else{
	     perform = getAz(predict, actual, num_trainings);
       }
       
	return perform;
	

} // end 1-holdout CrossValidation method

float Runner::getMSE(float *Z_pred, float *Z_actual, int NumTrain){
     
       float MSE = 0.0; 
       float error = 0.0;
       
       
       for(int i=0; i < NumTrain; i++){
               error = error + ((Z_actual[i]-Z_pred[i]) * (Z_actual[i]-Z_pred[i])); 
               }

       return (error / NumTrain);
                     
       
}

Runner.h

#include <iostream>
#include <fstream>
#include <cstdlib>
#include <iomanip>
#include <string.h>
#include <time.h>
#include <math.h>


using namespace std;

class Runner {

private:
   int num_gates;                      
   int num_models;                     
   int num_trainings;                  

   float *parameters;                     
   float *errors;                      
   float *weights;                     
 int total;
                
  float** training_data; 



public:

       Runner();
   Runner(int n_gates, int n_models, int n_trainings);             
   ~Runner();                                     

   float compute_output(int exclude);
   
   void load_training_data();	
   
   float crossvalidation_run(int doAz);

   void setparameters(float * sig);
   float getMSE(float *Z_pred, float *Z_actual, int NumTrain);
                                                   
   float getAz(float *Z_pred, float *Z_actual, int NumTrain);
    
                                         
};

And the following is the input file with the data randomized is included:

I-Oracle-View1.txt

Well first off, thanks for actually posting a proper program.

The following code crashes for at least four reasons. (all easily discoverable with 20 seconds with the debugger. [which is both why you should learn to use ddd/gdb, why you should compile with full warnings, and why you should post complete code]

The first error. in Runner::Runner(int,int,int) you allocate memory with malloc. Then in Runner::~Runner() you deallocate that memory with delete []. This is not correct and will result in memory corruption. What happens after that I have no idea.

Second: Trainer::random() does not work. using rand() gives a number between 0 and INT_MAX. BUT you have hand coded in int max on a 16 bit machine!! So you don't get a number between 0 and 1. That messes up many other things, e.g. base becomes lots.

Third: Initialization, you do none. So you end up deleting memory locations that don't exist. If you write a default constructor then write a proper initializer list.

Forth: assignment operators : operator=(const Runner&) is compiler constructed. BUT you have allocated memory. 99.99% certain to fail. Write one/ OR if you think it is not used declare it private. Then you will see that g=Runner() uses it. [And is a pointless create/copy/delete sequence.]

That fixes just about everything. Well I didn't worry about the results. But it runs without memory corruption.

Be a part of the DaniWeb community

We're a friendly, industry-focused community of developers, IT pros, digital marketers, and technology enthusiasts meeting, networking, learning, and sharing knowledge.