Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members | Related Pages

/ray/src/lib/salloc.cc

Go to the documentation of this file.
00001 /* 
00002  * lib/salloc.cc
00003  * 
00004  * Routines for memory allocation which limit the amount of memory 
00005  * being used at the same time as well as support for allocation 
00006  * checking/debugging. 
00007  * 
00008  * Copyright (c) 2000--2004 by Wolfgang Wieser ] wwieser (a) gmx <*> de [ 
00009  * 
00010  * This file may be distributed and/or modified under the terms of the 
00011  * GNU General Public License version 2 as published by the Free Software 
00012  * Foundation. (See COPYING.GPL for details.)
00013  * 
00014  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
00015  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
00016  * 
00017  */
00018 
00019 #include "salloc.h"
00020 
00021 #include <stdio.h>
00022 
00023 #if HAVE_MALLOC_H
00024 #include <malloc.h>
00025 #endif
00026 
00027 
00028 // This is the low-level interface to the system to be used: 
00029 inline void *_malloc(size_t size)  {  return(malloc(size));  }
00030 inline void *_realloc(void *ptr,size_t size)  {  return(realloc(ptr,size));  }
00031 inline void _free(void *ptr)  {  return(free(ptr));  }
00032 
00033 
00034 //------------------------------------------------------------------------------
00035 // Internal code, mostly for allocation debugging and allocation limitation. 
00036 
00037 // We need a mutex to protect the statistics and also the B-tree in 
00038 // case allocation debugging is switched on. 
00039 #include <lib/threads/mutex.h>
00040 static FastMutex mutex;
00041 
00042 static struct LMallocUsage lmu=
00043 {
00044     INIT_FIELD(alloc_limit) 0,
00045     INIT_FIELD(curr_used) 0,
00046     INIT_FIELD(max_used) 0,
00047     INIT_FIELD(malloc_calls) 0,
00048     INIT_FIELD(realloc_calls) 0,
00049     INIT_FIELD(free_calls) 0,
00050     INIT_FIELD(used_chunks) 0,
00051     INIT_FIELD(max_used_chunks) 0,
00052     INIT_FIELD(real_failures) 0,
00053     INIT_FIELD(limit_failures) 0
00054 };
00055 
00056 /* Set the limit: maximum amount of memory to aquire using LMalloc(). */
00057 void LMallocSetLimit(size_t limit)
00058 {  lmu.alloc_limit=limit;  }
00059 
00060 /* Get the LMallocUsage content: */
00061 void LMallocGetUsage(struct LMallocUsage *dest)
00062 {
00063     if(dest)
00064     {  memcpy(dest,&lmu,sizeof(lmu));  }
00065 }
00066 
00067 
00068 static void _LMallocPrintStats()
00069 {
00070     fprintf(stderr,
00071         "  Alloc: %u bytes in %d chunks; Peak: %u by,%d chks; (%u/%u/%u)\n",
00072         lmu.curr_used,lmu.used_chunks,lmu.max_used,lmu.max_used_chunks,
00073         lmu.malloc_calls,lmu.realloc_calls,lmu.free_calls);
00074     fprintf(stderr,
00075         "  Limit: %u bytes;  USE_MALLOC_USABLE_SIZE=%s, LMALLOC_DEBUGGING=%s\n",
00076         lmu.alloc_limit,
00077 #if USE_MALLOC_USABLE_SIZE
00078         "yes",
00079 #else
00080         "no",
00081 #endif
00082 #if LMALLOC_DEBUGGING
00083         "yes"
00084 #else
00085         "no"
00086 #endif
00087         );
00088 }
00089 
00090 
00091 // Called whenever we exit due to allocation issues to be somewhat more 
00092 // verbose. 
00093 static void _LMallocExit()
00094 {
00095     _LMallocPrintStats();
00096     
00097     abort();   // This aids in debugging. 
00098     exit(-1);
00099 }
00100 
00101 #if LMALLOC_DEBUGGING
00102 
00103 struct _packed_ LMEntry
00104 {
00105     void *ptr;
00106 #if !USE_MALLOC_USABLE_SIZE
00107     size_t _size;
00108 #endif
00109     
00110     inline size_t size()
00111     {
00112 #if USE_MALLOC_USABLE_SIZE
00113         return(malloc_usable_size(ptr));
00114 #else
00115         return(_size);
00116 #endif
00117     }
00118     
00119     // These are needed for the TLBTree to operate properly. 
00120     inline bool operator==(const LMEntry &e) const  {  return(ptr==e.ptr);  }
00121     inline bool operator!=(const LMEntry &e) const  {  return(ptr!=e.ptr);  }
00122     inline bool operator<(const LMEntry &e) const   {  return(ptr<e.ptr);  }
00123     inline bool operator<=(const LMEntry &e) const  {  return(ptr<=e.ptr);  }
00124     inline bool operator>(const LMEntry &e) const   {  return(ptr>e.ptr);  }
00125     inline bool operator>=(const LMEntry &e) const  {  return(ptr>=e.ptr);  }
00126     // For the search: 
00127     inline bool operator==(void *ptr2) const  {  return(ptr==ptr2);  }
00128     inline bool operator!=(void *ptr2) const  {  return(ptr!=ptr2);  }
00129     inline bool operator<(void *ptr2) const   {  return(ptr<ptr2);  }
00130     inline bool operator<=(void *ptr2) const  {  return(ptr<=ptr2);  }
00131     inline bool operator>(void *ptr2) const   {  return(ptr>ptr2);  }
00132     inline bool operator>=(void *ptr2) const  {  return(ptr>=ptr2);  }
00133     
00134     // Constructor: Same API independent of USE_MALLOC_USABLE_SIZE. 
00135 #if USE_MALLOC_USABLE_SIZE
00136     inline LMEntry(void *pptr,size_t /*psize*/) : ptr(pptr) {}
00137 #else
00138     inline LMEntry(void *pptr,size_t psize) : ptr(pptr),_size(psize) {}
00139 #endif
00140     
00141     // Default constructor does nothing. 
00142     inline LMEntry() {}
00143     
00144     // Destructor: 
00145     inline ~LMEntry(){}
00146     
00147     // Copy constructor and assignment: use default. 
00148 };
00149 
00150 
00151 #include <lib/tl/defop.h>
00152 struct LMEntryOperators : TLDefaultOperators_CDT<LMEntry>
00153 {
00154     // Private versions of the (de)allocation routines: 
00155     // Because of course, here in the allocation routines, we cannot use 
00156     // our own allocation routines recursively! 
00157     static inline void *alloc(size_t size)
00158     {
00159         void *ptr=_malloc(size);
00160         if(!ptr)  _AllocFailure(size);
00161         return(ptr);
00162     }
00163     static inline void *free(void *ptr)
00164         {  _free(ptr);  return(NULL);  }
00165 };
00166 
00167 
00168 // We use a TLBTree to efficiently store the pointers. 
00169 #include <lib/tl/tlbtree.h>
00170 typedef TLBTree<LMEntry,LMEntryOperators> LMTree;
00171 static LMTree *InitializeAllocDebugging();
00172 // This will automatically call the init function on startup. 
00173 static LMTree *chunks = InitializeAllocDebugging();
00174 
00175 static LMTree *InitializeAllocDebugging()
00176 {
00177     // Okay, be careful here inside the alloc debugging code. 
00178     // We cannot simply call operator new or LMalloc(). 
00179     
00180     void *space=_malloc(sizeof(LMTree));
00181     if(!space)  _AllocFailure(sizeof(LMTree));
00182     // Use operator new with explicit placement.
00183     LMTree *chunks = new(space) LMTree(/*m=*/32);
00184     
00185     fprintf(stderr,"LMalloc debugging initialized "
00186         "(sizeof(LMEntry)=%d, MUS=%s).\n",
00187         sizeof(LMEntry),
00188 #if USE_MALLOC_USABLE_SIZE
00189         "yes"
00190 #else
00191         "no"
00192 #endif
00193         );
00194     
00195     // Register that statistics are written on exit. 
00196     atexit(&_LMallocPrintStats);
00197     
00198     return(chunks);
00199 }
00200 
00201 #endif  /* LMALLOC_DEBUGGING */
00202 
00203 // NOTE: It is crucial that we do this here and not above in the 
00204 //       alloc debug code. 
00205 #if !USE_MALLOC_USABLE_SIZE
00206 // We do not know in realloc/free how big the block is and we have no 
00207 // alloc debugging in place to get the size by other means, so assume 
00208 // a size of 1 for all chunks and thereby acually count the number of 
00209 // *calls* to LMalloc()/LRealloc()/LFree(). 
00210 #define malloc_usable_size(x)  1
00211 #endif 
00212 
00213 
00214 static inline void *_LMalloc(size_t size)
00215 {
00216     MutexLock<FastMutex> lock(mutex);
00217     
00218     if(lmu.alloc_limit)
00219     {
00220         if(lmu.curr_used+size>lmu.alloc_limit)
00221         {
00222             ++lmu.limit_failures;
00223             //if(lmalloc_failure_handler)
00224             //{  (*lmalloc_failure_handler)(size);  }
00225             return(NULL);
00226         }
00227     }
00228     
00229     void *ptr=_malloc(size);     // <-- DO IT
00230     ++lmu.malloc_calls;
00231     
00232     if(!ptr)
00233     {
00234         ++lmu.real_failures;
00235         //if(lmalloc_failure_handler)
00236         //{  (*lmalloc_failure_handler)(size);  }
00237         return(ptr);   // ...which is NULL
00238     }
00239     
00240 #if LMALLOC_DEBUGGING
00241     LMEntry e(ptr,size);
00242     int rv=chunks->store(e,/*allow_update=*/0);
00243     if(rv)
00244     {
00245         fprintf(stderr,
00246             "OOPS(alloc): Chunk %p (size=%u) was already allocated.\n",
00247             ptr,size);
00248         _LMallocExit();
00249     }
00250     lmu.curr_used+=e.size();  // <-- NOT the size var on stack!
00251 #else
00252     lmu.curr_used+=malloc_usable_size(ptr);
00253 #endif
00254     
00255     if(lmu.max_used<lmu.curr_used)
00256     {  lmu.max_used=lmu.curr_used;  }
00257     
00258     ++lmu.used_chunks;
00259     if(lmu.max_used_chunks<lmu.used_chunks)
00260     {  lmu.max_used_chunks=lmu.used_chunks;  }
00261     
00262     /*#if AllocDebugging
00263     if(hlib_allocdebug_trace)
00264     {
00265         if(hlib_allocdebug_trace>1)
00266         {  fprintf(stderr,"@HLAT->malloc[%u](%u)=%p\n",
00267             hlib_allocdebug_seq,size,ptr);  }
00268         if(hlib_allocdebug_seq==hlib_allocdebug_abort_seq)
00269         {  abort();  }
00270         ++hlib_allocdebug_seq;
00271     }
00272     #endif*/
00273     
00274     return(ptr);
00275 }
00276 
00277 
00278 static inline void _LFree(void *ptr)
00279 {
00280     MutexLock<FastMutex> lock(mutex);
00281     
00282     // ptr!=NULL here. 
00283     
00284     /*#if AllocDebugging
00285     if(hlib_allocdebug_trace)
00286     {
00287         if(hlib_allocdebug_trace>1)
00288         {  fprintf(stderr,"@HLAT->free[%u](%p)\n",hlib_allocdebug_seq,ptr);  }
00289         if(hlib_allocdebug_seq==hlib_allocdebug_abort_seq)
00290         {  abort();  }
00291         ++hlib_allocdebug_seq;
00292     }
00293     #endif*/
00294     
00295 #if LMALLOC_DEBUGGING
00296     LMEntry e;
00297     int rv=chunks->remove(ptr,&e);
00298     if(rv)
00299     {
00300         fprintf(stderr,"OOPS(free): Chunk %p was never allocated.\n",ptr);
00301         _LMallocExit();
00302     }
00303     size_t size=e.size();
00304 #else
00305     size_t size=malloc_usable_size(ptr);
00306 #endif
00307     if(lmu.curr_used>=size)
00308     {  lmu.curr_used-=size;  }
00309     else  /* should never happen... */
00310     {  lmu.curr_used=0;  }
00311     
00312     _free(ptr);   // <-- DO IT
00313     ++lmu.free_calls;
00314     
00315     --lmu.used_chunks;
00316 }
00317 
00318 
00319 static inline void *_LRealloc(void *ptr,size_t size)
00320 {
00321     MutexLock<FastMutex> lock(mutex);
00322     
00323     // size!=0 and ptr!=NULL here. 
00324     
00325     /*#if AllocDebugging
00326     void *_old_ptr=ptr;
00327     #endif*/
00328     
00329 #if 0   /* <-- Use malloc() and free() instead if realloc(). */
00330 #warning "<--NO REALLOC--!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!-->"
00331     {
00332         /* This depends on a working malloc_usable_size(). */
00333         void *nptr=LMalloc(size);
00334         size_t osize=malloc_usable_size(ptr);
00335         if(!nptr)  return(NULL);
00336         memcpy(nptr,ptr,osize<size ? osize : size);
00337         LFree(ptr);
00338         return(nptr);
00339     }
00340 #endif
00341     
00342 #if LMALLOC_DEBUGGING
00343     LMEntry e;
00346     int rv=chunks->remove(ptr,&e);
00347     if(rv)
00348     {
00349         fprintf(stderr,"OOPS(realloc): Chunk %p was never allocated.\n",ptr);
00350         _LMallocExit();
00351     }
00352     size_t oldsize=e.size();
00353 #else
00354     size_t oldsize=malloc_usable_size(ptr);
00355 #endif
00356     
00357     if(lmu.curr_used>=oldsize)
00358     {  lmu.curr_used-=oldsize;  }
00359     else  /* should never happen... */
00360     {  lmu.curr_used=0;  }
00361     
00362     if(lmu.alloc_limit && lmu.curr_used+size>lmu.alloc_limit)
00363     {
00364         // FAIL: The old chunks stays allocated!
00365         ptr=NULL;
00366         ++lmu.limit_failures;
00367         
00368         // Decrement here since we will increment afterwards...
00369         --lmu.real_failures;
00370     }
00371     else
00372     {
00373         ptr=realloc(ptr,size);     // <-- DO IT
00374         ++lmu.realloc_calls;
00375     }
00376         
00377     if(!ptr)
00378     {
00379         lmu.curr_used+=oldsize;
00380         //lmu.max_used does not need to be updated here. 
00381         ++lmu.real_failures;
00382         
00383 #if LMALLOC_DEBUGGING
00384         rv=chunks->store(e,/*allow_update=*/0);
00385         if(rv)
00386         {
00387             fprintf(stderr,"OOPS: Chunk %p again in B-tree?!\n",ptr);
00388             _LMallocExit();
00389         }
00390 #endif
00391         
00392         //if(lmalloc_failure_handler)
00393         //{  (*lmalloc_failure_handler)(size);  }
00394         return(NULL);
00395     }
00396     
00397 #if LMALLOC_DEBUGGING
00398     e=LMEntry(ptr,size);
00399     rv=chunks->store(e,/*allow_update=*/0);
00400     if(rv)
00401     {
00402         fprintf(stderr,
00403             "OOPS(realloc): Chunk %p (size=%u) again in B-tree?!\n",
00404             ptr,size);
00405         _LMallocExit();
00406     }
00407     lmu.curr_used+=e.size();  // <-- ...and NOT the size var on the stack. 
00408 #else
00409     lmu.curr_used+=malloc_usable_size(ptr);
00410 #endif
00411     if(lmu.max_used<lmu.curr_used)
00412     {  lmu.max_used=lmu.curr_used;  }
00413     
00414     /*#if AllocDebugging
00415     if(hlib_allocdebug_trace)
00416     {
00417         if(hlib_allocdebug_trace>1)
00418         {  fprintf(stderr,"@HLAT->realloc[%u](%p,%u)=%p\n",hlib_allocdebug_seq,
00419             _old_ptr,size,ptr);  }
00420         if(hlib_allocdebug_seq==hlib_allocdebug_abort_seq)
00421         {  abort();  }
00422         ++hlib_allocdebug_seq;
00423     }
00424     #endif*/
00425     
00426     return(ptr);
00427 }
00428 
00429 
00430 //------------------------------------------------------------------------------
00431 // "High-level" routines: The frontend to the "user": 
00432 // The called _L* functions are declared static inline. 
00433 
00435 void _AllocFailure(size_t size)
00436 {
00437     fprintf(stderr,"Failed to allocate %u bytes of memory.\n",size);
00438     
00439     _LMallocExit();
00440 }
00441 
00442 
00443 void *LMalloc(size_t size)
00444 {
00445     if(!size)  return(NULL);
00446     void *ptr=_LMalloc(size);
00447 //fprintf(stderr,"LMalloc(%d)=%p \n",size,ptr);
00448     if(!ptr)  _AllocFailure(size);
00449     return(ptr);
00450 }
00451 
00452 
00453 void *LFree(void *ptr)
00454 {
00455     if(ptr)  _LFree(ptr);
00456 //fprintf(stderr,"LFree(%p) \n",ptr);
00457     return(NULL);
00458 }
00459 
00460 
00461 void *LRealloc(void *ptr,size_t size)
00462 {
00463     if(!ptr)  return(LMalloc(size));
00464     if(!size)  return(LFree(ptr));
00465     void *nptr=_LRealloc(ptr,size);
00466 //fprintf(stderr,"LRealloc(%p,%d)=&p \n",ptr,size,nptr);
00467     if(!nptr)  _AllocFailure(size);
00468     return(nptr);
00469 }

Generated on Sat Feb 19 22:33:46 2005 for Ray by doxygen 1.3.5