I've made some functions which can insert file inside a file but only with a small text files and they're very slowly. I will thank you if someone know a better way. Here are my functions:

1st

struct Tree {
    char Symbol;
    Tree* next;
} ;


Tree* GetTreeHandler(unsigned long num) ;


bool SavFil(char* mafile, char* infile, unsigned long ptr, unsigned long size) {
    Tree Cache;
    std::fstream maFile(mafile, std::fstream::in | std::fstream::out | std::fstream::binary);
    std::fstream inFile(infile, std::fstream::in | std::fstream::out | std::fstream::binary);
    std::fstream cacFile("Data.dat", std::fstream::in | std::fstream::out | std::fstream::trunc);
    inFile.seekg(0, std::ios::end);
    maFile.seekg(0, std::ios::end);
    const unsigned long endin = inFile.tellg();
    const unsigned long endma = maFile.tellg();
    unsigned long NumTree(0);
    for(unsigned long Curr(0); Curr<ptr; ++Curr) {
        cacFile.seekp(Curr, std::ios::beg);
        maFile.seekg(Curr, std::ios::beg);
        cacFile.put(maFile.get());
    }
    Tree* CurrSym=&Cache;
    unsigned long Currptr(0);
    for(; Currptr<=endma-ptr+(endin-size); ++Currptr) {
        maFile.seekg(Currptr+ptr, std::ios::beg);
        if(Currptr<endin) {
            if(Currptr>=size) {
                CurrSym->Symbol=maFile.get();
                CurrSym=(CurrSym->next=new Tree);
            }
            inFile.seekg(Currptr, std::ios::beg);
            cacFile.seekp(Currptr+ptr, std::ios::beg);
            cacFile.put(inFile.get());
        }
        else if(Currptr>=endin) {
            if(Currptr<size) {
                maFile.seekg(size+ptr+NumTree, std::ios::beg);
                cacFile.seekp(Currptr+ptr, std::ios::beg);
            }
            else {
                maFile.seekg(Currptr+ptr, std::ios::beg);
                cacFile.seekp(Currptr+ptr, std::ios::beg);
            }
            CurrSym->Symbol=maFile.get();
            CurrSym=(CurrSym->next=new Tree);
            cacFile.put(GetTreeHandler(NumTree++)->Symbol);
        }
    }
    maFile.close();
    maFile.open(mafile, std::fstream::in | std::fstream::out | std::fstream::binary | std::fstream::trunc);
    cacFile.seekg(0, std::ios::end);
    const unsigned long endcac = (int)cacFile.tellg()-1;
    for(unsigned long Curr(0); Curr<endcac; ++Curr) {
        cacFile.seekg(Curr, std::ios::beg);
        maFile.seekp(Curr, std::ios::beg);
        maFile.put(cacFile.get());
    }
    cacFile.close();
    remove("Data.dat");
    return (Currptr+1==endcac);
}

This function is too slow it will takes an ages for inserting an 24mb for example.

2st

bool SavFil(char* mafile, char* infile, unsigned long ptr, unsigned long size) {
    std::fstream maFile(mafile, std::fstream::in | std::fstream::out | std::fstream::binary);
    std::fstream inFile(infile, std::fstream::in | std::fstream::out | std::fstream::binary);
    inFile.seekg(0, std::ios::end);
    maFile.seekg(0, std::ios::end);
    const unsigned long endin = inFile.tellg();
    const unsigned long endma = maFile.tellg();
    char* PTRFL=new char[endma+endin-size+1];
    maFile.seekg(0, std::ios::beg);
    maFile.get(PTRFL, ptr+1);
    inFile.seekg(0, std::ios::beg);
    inFile.get(&PTRFL[ptr], endin+1);
    maFile.seekg(ptr+size, std::ios::beg);
    maFile.get(&PTRFL[ptr+endin], endma);
    maFile.close();
    std::ofstream maFileO(mafile);
    maFileO.write(PTRFL, endma+endin-size);
    delete[] PTRFL;
    return (1);
}

This one is giving an errors while working with large files( it can copy the full file in the string ).

I'll be vary happy for an advice!

Your functions represent two extremes:

  1. Reading and writing character-by-character
  2. Reading and writing whole files as a string

You can get performance benefits without excessive memory usage by reading and writing the files in blocks. This is a happy medium from the two extremes, look into the read() and write() member functions for doing block I/O.

lol I'm extremist! Thanks for the advice but how can I sort the memory into blocks?

Edited 4 Years Ago by sasho648

This article has been dead for over six months. Start a new discussion instead.