Memory Management---Slab mechanism allocation object

Source: Internet
Author: User

Assigning objects from one cache always follows the following guidelines:

1. Whether there are idle objects in the local cache, and if so, get the objects from them.
2. If there are no objects in the local cache, look for idle objects from the slab linked list in Kmem_list3 and populate the local cache for redistribution;
3. If there are no idle objects in all slab, then create a new slab, then assign.



From: http://blog.csdn.net/vanbreaker/article/details/7671211

 Linux kernel from slab Allocate memory space from kmalloc () or Kmem_cache_alloc () function realization.

Kmalloc ()->__kmalloc ()->__do_kmalloc ();

/** * __do_kmalloc-allocate Memory * @size: How many bytes of memory is required. * @flags: The type of memory to allocate (see KMALLOC).  * @caller: Function caller for debug tracking of the caller */static __always_inline void *__do_kmalloc (size_t size, gfp_t  Flags,  void *caller) {struct Kmem_cache *cachep;void *ret;/* If you want to save a few bytes. Text Space:replace * __ With Kmem_. * Then Kmalloc uses the uninlined functions instead of the inline * functions. */* Finds the generic cache of the specified size, about the sizes array, has been parsed in the previous initialization */Cachep = __FIND_GENERAL_CACHEP (size, flags); if (Unlikely (zero_or_n Ull_ptr (Cachep))) Return Cachep;ret = __cache_alloc (CACHEP, flags, caller); /* Actual Assignment Work */Trace_kmalloc ((unsigned long) caller, ret,      size, cachep->size, flags); return ret;}

Last CallThe actual assignment work:__do_cache_alloc ()->__cache_alloc ()->____cache_alloc ();

static inline void *____cache_alloc (struct kmem_cache *cachep, gfp_t flags) {void *objp;struct array_cache *ac;bool force_ Refill = False;check_irq_off (), ac = Cpu_cache_get (Cachep),  /* get the local cache for this CPU */ if (likely (Ac->avail) {/* If free objects are available in the local cache */ ac->touched = 1;<span style= "White-space:pre" > /* from local Extract the last idle object from the entry array of the cache */&NBSP;&LT;/SPAN&GT;OBJP = Ac_get_obj (Cachep, AC, flags, FALSE);  /* * allow for the POSSIB ility all avail objects is not allowed * by the current flags */if (OBJP) {stats_inc_allochit (Cachep); goto out;}  Force_refill = true; Whether the flag bit requires Refill}stats_inc_allocmiss (CACHEP);  /* extracts the free object from the slab three-strand into the local cache */&NBSP;OBJP = CACHE_ALLOC_ Refill (Cachep, flags, force_refill);/* * the ' AC ' May is updated by Cache_alloc_refill (), * and Kmemleak_erase () requires Its correct value. */ /* Cache_alloc_refill's Cache_grow opened the interrupt, the local cache pointer may have changed and need to regain AC = Cpu_cache_get (CACHEP); out:/* * to avoid A false negative, if an Object-is-in-one of the * PER-CPU caches is leaked, we need-make sure kmemleak doesn ' t * treat the array pointers As a reference to the object. */if (OBJP) kmemleak_erase (&ac->entry[ac->avail]);/* The Assigned object, whose entry pointer points to empty */ return OBJP;}
static inline void *ac_get_obj (struct kmem_cache *cachep,struct array_cache *ac, gfp_t flags, bool Force_refill) {void *obj P;if (Unlikely (Sk_memalloc_socks ())) OBJP = __ac_get_obj (Cachep, AC, Flags, Force_refill); else  <span style= " White-space:pre ">/* First reduce the value of avail by 1, so that avail corresponding idle object is the hottest, that is, recently released, more likely to reside in the CPU cache */</SPAN>OBJP = ac->en Try[--ac->avail];return OBJP;}


Core:

tatic void *cache_alloc_refill (struct Kmem_cache *cachep, gfp_t flags,bool force_refill) {int batchcount;struct kmem_ List3 *l3;struct array_cache *ac;int node;check_irq_off (); node = numa_mem_id ();  /* Gets the memory nodes, UMA has only one node */ if ( Unlikely (Force_refill)) goto FORCE_GROW;RETRY:AC = Cpu_cache_get (Cachep); batchcount = ac->batchcount; /* Get the number of bulk transfers */ if (!ac->touched && batchcount > Batchrefill_limit) {/* * If there was little recent Activ  ity on this cache, then * perform only a partial refill. Otherwise we could generate * refill bouncing. */   /* has not used this local cache recently, there is no need to add too many objects          &NBSP; The number of additions is the default limit */  Batchcount = Batchrefill_limit;} L3 = cachep->nodelists[node];/* Get kmem_list3*/ bug_on (Ac->avail > 0 | |!l3); Spin_lock (&l3->list_ lock);/* See if we can refill from the shared array */<span style= "White-space:pre" > /* shared local cache for multicore systems Share      For all CPUs, if there is a shared local cache     , first bulk handling of idle objects from the shared local cache to the local cache     . Filling work is made easy with the shared local cache. */ </span>if (l3->shared && transfer_objects (AC, l3->shared, Batchcount)) {l3->shared- >touched = 1;goto Alloc_done;} <span style= "White-space:pre" > /* If there is no shared local cache, or there are no idle objects     , allocate from the slab list */  </span>while (Batchcount > 0) {struct List_head *entry;struct slab *slabp;/* Get Slab alloc is to come from . */<span style= "white-space:pre" >/* scan slab linked list, starting with the partial list, if the entire partial list cannot find Batchcount idle objects,        </span> <span style= "White-space:pre" > re-scan free linked list */ </span>entry = l3-> Slabs_partial.next;if (Entry = = &l3->slabs_partial) { /*entry Return to the table header indicates that the partial list has been scanned and begins scanning the free list */  L3->free_touched = 1; /* indicates that the slab empty list has just been accessed */ entry = l3->slabs_free.next;if (Entry = = &l3->slabs_ FREE)/* Empty slab linked list is also empty, you must add slab */  goto Must_grow;} SLABP = list_entry (entry, struct slab, list), CHECK_SLABP (Cachep, SLABP); check_spinlock_acquired (CACHEP);/* * The Slab was either on partial or free list so * There must is at least one object available for * allocation. */bug_on (slabp->inuse >= cachep->num) <span style= "White-space:pre" > /* If there are still idle objects in SLABP and you still need to populate the objects to local cache */ </span>while (Slabp->inuse < Cachep->num && batchcount--) {stats_inc_alloced (CACHEP); Stats_inc_active (Cachep); Stats_set_high (Cachep); <span style= "White-space:pre" > /* the essence of filling is to use the void* array element behind AC to point to a free object </span> <span style= "White-space:pre" > ac->entry[ac->avail++] = Slab_get_obj (Cachep, SLABP, node); </span><span style= "White-space:pre" > */ </span>ac_put_obj (Cachep, AC, Slab_get_obj ( Cachep, Slabp,node));} CHECK_SLABP (Cachep, SLABP);/* Move SLABP to correct SLABP list: */list_del (&slabp->list); if (Slabp->free = = BUF Ctl_end)  /*free equalsBufctl_end indicates that the idle object is exhausted, inserting slab into the full list */ list_add (&slabp->list, &l3->slabs_full); Elselist_add ( &slabp->list, &l3->slabs_partial);} Must_grow:<span style= "White-space:pre" > /* add slab free objects from the avail list to the local cache     , Update the number of idle objects in the slab list */ </span>l3->free_objects-= Ac->avail;alloc_done:spin_unlock (&l3->list_ Lock), if (Unlikely (!ac->avail)) { /* The local cache still does not have free objects available, description slab     three chains There are no idle objects, you need to create a new empty slab */ int x;force_grow:x = Cache_grow (Cachep, Flags | Gfp_thisnode, node, null);  /* Create an empty slab */ /* Cache_grow can reenable interrupts, then AC could change. */ac = Cpu_cache_get (Cachep); node = numa_mem_id ();/* No objects in sight? Abort */if (!x && (Ac->avail = = 0 | | force_refill)) return null;if (!ac->avail)/* Objects refilled by Interru Pt? */goto retry;} ac->touched = 1;return ac_get_obj (Cachep, AC, Flags, Force_refill);  /* back to local CachThe virtual address of the last idle object in e OBJP = ac->entry[--ac->avail];*/ } 

Auxiliary functions:

/* * Transfer objects in one arraycache to another. * Locking must is handled by the caller. * * Return The number of entries transferred. */static int transfer_objects (struct array_cache *to,struct array_cache *from, unsigned int max) {/* figure out how many en Tries to transfer */int nr = min (min (from->avail, max), To->limit-to->avail), if (!NR) return 0;/* copy */memcpy (to-& Gt;entry + to->avail, from->entry + from->avail-nr,sizeof (void *) *NR);/* Data update on both sides */from->avail-= nr;to-> Avail + = Nr;to->touched = 1;return nr;}

/* Extract a free object from slab */static void *slab_get_obj (struct kmem_cache *cachep, struct slab *slabp,int nodeid) {/* Get an Idle object, Free is the index of the first idle object in this slab */void *OBJP = index_to_obj (Cachep, SLABP, Slabp->free); kmem_bufctl_t next; /* Update in Object Count */slabp->inuse++; /* Gets the index of the next free object */next = Slab_bufctl (SLABP) [slabp->free]; #if debugslab_bufctl (SLABP) [Slabp->free] = Bufctl_free ; WARN_ON (Slabp->nodeid! = Nodeid); #endif/* Free point to the next idle object */slabp->free = Next;return objp;}

static inline void *index_to_obj (struct kmem_cache *cache, struct slab *slab, unsigned int idx) {/* S_mem is the starting address of the first object in slab, b Uffer_size is the size of each object, where the address of the object is computed based on the object index */return SLAB->S_MEM + cache->buffer_size * IDX;}

Static inline kmem_bufctl_t *slab_bufctl (struct slab *slabp) {return (kmem_bufctl_t *) (SLABP + 1);}
Static inline struct Array_cache *cpu_cache_get (struct Kmem_cache *cachep) {return cachep->array[smp_processor_id () ];}

For Cache_grow expansion after analysis;



Memory Management---Slab mechanism allocation object

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.