Single-Threaded Memory Pooling

Using the new and delete operator frequently is not such good. These operators are generic one. They can allocate any size and on multi or single threaded applications. This verity adds some performance tradeoff. Let’s talk technically. If we run the code below, it performs in 375 ms:

class
Rational {

public:

    Rational (int
a = 0, int
b = 1 ) : n(a), d(b) {}

private:

    int
n; // Numerator

    int
d; // Denominator

};

int
main()

{

    Rational *array[1000];

    …

        // Start timing here

        for (int
j = 0; j < 500; j++) {

            for (int
i = 0; i < 1000; i++) {

                array[i] = new
Rational(i);

            }

            for (i = 0; i < 1000; i++) {

                delete
array[i];

            }

        }

        // Stop timing here

        …

}

Here we are going to allocate and deallocate objects from Rational frequently, what about creating a custom memory manager for it? Watch the code below:

class
NextOnFreeList {

public:

    NextOnFreeList *next;

};

 

class
Rational {

public:

    Rational (int
a = 0, int
b = 1 ) : n(a), d(b) {}

    inline
void *operator
new(size_t
size);

    inline
void
operator
delete(void *doomed, size_t
size);

    static
void
newMemPool() { expandTheFreeList(); }

    static
void
deleteMemPool();

 

private:

    static
NextOnFreeList *freeList; // A free list of

    // Rational objects.

    static
void
expandTheFreeList();

    enum { EXPANSION_SIZE = 32};

    int
n; // Numerator

    int
d; // Denominator

};

 

inline

void * Rational::operator
new(size_t
size)

{

    if (0 == freeList) {// If the list is empty, fill it up.

        expandTheFreeList();

    }

    NextOnFreeList *head = freeList;

    freeList = head->next;

    return
head;

}

 

inline
void
Rational::operator
delete(void *doomed, size_t
size)

{

    NextOnFreeList *head = static_cast <NextOnFreeList *> (doomed);

    head->next = freeList;

    freeList = head;

}

 

void
Rational::expandTheFreeList()

{

    // We must allocate an object large enough to contain the next pointer.

    size_t
size = (sizeof(Rational) > sizeof(NextOnFreeList *)) ? sizeof(Rational) : sizeof(NextOnFreeList *);

    

    NextOnFreeList *runner = (NextOnFreeList*)new
char [size];

    

    freeList = runner;

    

    for (int
i = 0; i < EXPANSION_SIZE; i++)

    {

        runner->next = (NextOnFreeList*) new
char [size];

        runner = runner->next;

    }

    runner->next = 0;

}

 

void
Rational::deleteMemPool()

{

    NextOnFreeList *nextPtr;

    for (nextPtr = freeList; nextPtr != NULL; nextPtr = freeList) {

        freeList = freeList->next;

        delete [] nextPtr;

    }

}

 

NextOnFreeList *Rational::freeList = 0;

 

 

int
main ()

{

    DWORD
time1, time2;

    Rational *array[1000];

    Rational::newMemPool();

    int
i;

    

    time1 = GetTickCount();

 

    for (int
j = 0; j < 500; j++) {

        for (i = 0; i < 1000; i++) {

            array[i] = new
Rational(i);

        }

        for (i = 0; i < 1000; i++) {

            delete
array[i];

        }

    }

    time2 = GetTickCount();

 

    Rational::deleteMemPool();

    
 

    

    std::cout<<“Performance:”<<time2time1 << “\n”;

 

    return 0;

}

Surprisingly it works in 62 ms! Further if we make it more generic it takes 94 ms. See code below:

class
NextOnFreeList {

public:

    NextOnFreeList *next;

};

 

template < class
T >

class
MemoryPool {

public:

    MemoryPool (size_t
size = EXPANSION_SIZE);

    ~MemoryPool ();

    // Allocate a T element from the free list.

    inline
void* alloc (size_t
size);

    // Return a T element to the free list.

    inline
void
free (void *someElement);

private:

    // next element on the free list.

    MemoryPool<T> *next;

    // If the freeList is empty, expand it by this amount.

    enum { EXPANSION_SIZE = 32};

    // Add free elements to the free list

    void
expandTheFreeList(int
howMany = EXPANSION_SIZE);

};

 

template < class
T >

MemoryPool < T > :: MemoryPool (size_t
size)

{

    expandTheFreeList(size);

}

 

template < class
T >

MemoryPool < T > :: ~MemoryPool ()

{

    MemoryPool<T> *nextPtr = next;

    for (nextPtr = next; nextPtr != NULL; nextPtr = next) {

        next = next->next;

        delete [] nextPtr;

    }

}

 

template < class
T >

inline

void* MemoryPool < T > :: alloc (size_t)

{

    if (!next) {

        expandTheFreeList();

    }

    MemoryPool<T> *head = next;

    next = head->next;

    return
head;

}

 

template < class
T >

inline

void
MemoryPool < T > :: free (void *doomed)

{

    MemoryPool<T> *head = static_cast <MemoryPool<T> *> (doomed);

    head->next = next;

    next = head;

}

 

template < class
T >

void
MemoryPool < T > :: expandTheFreeList(int
howMany)

{

    // We must allocate an object large enough to contain the

    // next pointer.

    size_t
size = (sizeof(T) > sizeof(MemoryPool<T> *)) ?

        sizeof(T) : sizeof(MemoryPool<T> *);

    MemoryPool<T> *runner = (MemoryPool<T>*) new
char

        [size];

    next = runner;

    for (int
i = 0; i < howMany ; i++) {

        runner->next =

            (MemoryPool<T>*) new
char [size];

        runner = runner->next;

    }

    runner->next = 0;

}

 

class
Rational {

public:

    Rational (int
a = 0, int
b = 1 ) : n(a), d(b) {}

    void *operator
new(size_t
size) { return
memPool->alloc(size); }

    void
operator
delete(void *doomed,size_t
size)

    { memPool->free(doomed); }

    static
void
newMemPool() { memPool = new
MemoryPool <Rational>; }

    static
void
deleteMemPool() { free( memPool); }

private:

    int
n; // Numerator

    int
d; // Denominator

    static
MemoryPool <Rational> *memPool;

};

 

MemoryPool <Rational> *Rational::memPool = 0;

 

int
main ()

{

    DWORD
time1, time2;

    Rational *array[1000];

    Rational::newMemPool();

    int
i;

 

    time1 = GetTickCount();

 

    for (int
j = 0; j < 500; j++) {

        for (i = 0; i < 1000; i++) {

            array[i] = new
Rational(i);

        }

        for (i = 0; i < 1000; i++) {

            delete
array[i];

        }

    }

 

    Rational::deleteMemPool();

 

    time2 = GetTickCount();

 

    std::cout<<“Performance:”<<time2time1 << “\n”;

 

    return 0;

}

Single Threaded Variable-Size Memory Manager

Typical implementations of a Web server are gigantic string crunchers. If you peek into the code you’ll find that the handling of a single HTTP request requires a large number of string surgeries. Many of those calls require space allocation to create new or duplicated strings. Furthermore, you cannot tell in advance how many string allocations would be necessary and of what size. Potentially, strings are unbounded. The nature of string manipulations is dictated by the content of the particular HTTP request. A fixed-size memory manager would fall short of the requirement. Relying on the global new() and delete(), however, is out of the question. The global new() and delete() are expensive. They consume hundreds of instructions and moreover, they contain mutually exclusive critical sections that prevent concurrent thread execution and consequently hurt scalabilty. It would damage the speed and scalability of a Web server. This is a perfect place for a home-grown, variable-size memory manager.

After implementing a single threaded variable length memory manager and used by Rational class. The execution time was 200 ms.

Key Points:

Flexibility trades off with speed. As the power and flexibility of memory management increases, execution speed decreases.

The global memory manager (implemented by new() and delete()) is general purpose and consequently expensive.

Specialized memory managers can improve your performance increasingly.

Advertisements

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out /  Change )

Google+ photo

You are commenting using your Google+ account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

w

Connecting to %s