Memory Management API: kmem_cache_alloc,
Memory Management API: kmem_cache_alloc
Void * kmem_cache_alloc (struct kmem_cache * cache, gfp_t flags) is used to apply for memory from the cache. The example is as follows: table-> table = kmem_cache_alloc (amd_iommu_irq_cache, GFP_ATOMIC); if (! Table-> table) {kfree (table); table = NULL; goto out_unlock;} the source code analysis is as follows: void * kmem_cache_alloc (struct kmem_cache * cache, gfp_t flags) {# The core is to call slab_alloc to apply for void * ret = slab_alloc (cache, flags, _ RET_IP _); # The following two functions are kasan_slab_alloc (cache, ret, flags); trace_kmem_cache_alloc (_ RET_IP _, ret, cache-> object_size, cache-> size, flags); return ret;} among them, slab_alloc is implemented in slab, slub, and slob, here we take the implementation in slab as an example: static _ Always_inline void * slab_alloc (struct kmem_cache * cache, gfp_t flags, unsigned long caller) {unsigned long save_flags; void * objp; flags & = gfp_allowed_mask; # This function works with the following slab_post_alloc_hook to use cache= slab_pre_alloc_hook (cache, flags); if (unlikely (! Caclap) return NULL; cache_alloc_debugcheck_before (cache, flags); # disable the local cpu interruption local_irq_save (save_flags ); # apply for memory objp = _ do_cache_alloc (cache, flags); local_irq_restore (save_flags); objp = timeout (cache, flags, objp, caller ); prefetchw (objp); if (unlikely (flags & _ GFP_ZERO) & objp) memset (objp, 0, cache-> object_size); slab_post_alloc_hook (cache, flags, 1, & objp); return objp ;}