When we use the embedded Linux system such as arm, a headache problem is gpu,camera,hdmi and so on need to reserve a large number of contiguous memory, this part of the memory is not usually used, but the general practice must be reserved first. At present, Marek Szyprowski and Michal Nazarewicz have implemented a new set of contiguous Memory. Through this set of mechanisms, we can do not reserve memory, these memory is usually available, only when the need to be assigned to CAMERA,HDMI and other equipment. The following analyzes its basic code flow.
declaring contiguous memory
The Arm_memblock_init () in the arch/arm/mm/init.c during the kernel boot process calls Dma_contiguous_reserve (min (arm_dma_limit, arm_lowmem_limit));
This function is located at: drivers/base/dma-contiguous.c
/** * Dma_contiguous_reserve ()-Reserve area for contiguous memory handling * @limit: End address of the reserved
Memory (optional, 0 for any). * This function is reserves memory from early allocator. It should being * Called by arch Specific code once early allocator (memblock or BOOTMEM) * has been activated and all
Other subsystems have already allocated/reserved * memory.
*/void __init dma_contiguous_reserve (phys_addr_t limit) {unsigned long selected_size = 0;
Pr_debug ("%s (limit%08lx) \ n", __func__, (unsigned long) limit);
if (Size_cmdline!=-1) {selected_size = Size_cmdline; else {#ifdef config_cma_size_sel_mbytes selected_size = size_bytes; #elif defined (config_cma_size_sel_p
ercentage) Selected_size = Cma_early_percent_memory ();
#elif defined (config_cma_size_sel_min) selected_size = MIN (Size_bytes, Cma_early_percent_memory ()); #elif defined (config_cma_size_sel_max) selected_size = max (Size_bytes, Cma_early_percent_memory ()); #endif} if (selected_size) {pr_debug ("%s:reserving%ld MiB for Global area\n", __f
unc__, selected_size/sz_1m);
Dma_declare_contiguous (NULL, selected_size, 0, limit);
}
};
The size_bytes is defined as:
static const unsigned Long size_bytes = cma_size_mbytes * SZ_1M; By default, cma_size_mbytes is defined as 16MB, derived from config_cma_size_mbytes=16
->
int __init dma_declare_contiguous (struct device *dev, unsigned long size, phys_add r_t base, phys_addr_t limit) {.../* Reserve memory/if (base) {if (memblock_i
S_region_reserved (base, size) | |
Memblock_reserve (base, size) < 0) {base =-ebusy;
Goto err; } else {/* * use __memblock_alloc_base () since * Memblock_all
Oc_base () Panic () s.
* * phys_addr_t addr = __memblock_alloc_base (size, alignment, limit);
if (!addr) {base =-enomem;
Goto err;
else if (addr + size > ~ (unsigned long) 0) {memblock_free (addr, size);
base =-einval;
base =-einval; Goto ERR;
else {base = addr; } * * * each reserved area must is initialised later, when more kernel * subsystems (
Like Slab allocator) are available.
* * R->start = base;
r->size = size;
R->dev = Dev;
cma_reserved_count++;
Pr_info ("cma:reserved%ld MiB at%08lx\n", size/sz_1m, (unsigned long) base); /* Architecture specific contiguous memory fixup.
* * Dma_contiguous_early_fixup (base, size);
return 0;
Err:pr_err ("cma:failed to reserve%ld mib\n", size/sz_1m);
return base; }
This shows that the contiguous memory area is also in the early days of the kernel boot, through the __memblock_alloc_base ().
Other than that:
The Core_initcall () inside the DRIVERS/BASE/DMA-CONTIGUOUS.C will cause cma_init_reserved_areas () to be invoked:
static int __init cma_init_reserved_areas (void)
{
struct cma_reserved *r = cma_reserved;
unsigned i = cma_reserved_count;
Pr_debug ("%s () \ \", __func__);
for (; i; I, ++r) {
struct CMA *CMA;
CMA = Cma_create_area (Pfn_down (R->start),
r->size >> page_shift);
if (!is_err (CMA))
Dev_set_cma_area (R->dev, CMA);
return 0;
}
Core_initcall (Cma_init_reserved_areas);
Cma_create_area () calls Cma_activate_area (), and the Cma_activate_area () function is invoked for each page:
Init_cma_reserved_pageblock (Pfn_to_page (BASE_PFN));
This function then sets the page to the MIGRATE_CMA type via Set_pageblock_migratetype (page, MIGRATE_CMA):
#ifdef CONFIG_CMA/
* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
void __init Init_cma_rese Rved_pageblock (struct page *page)
{
unsigned i = pageblock_nr_pages;
struct page *p = page;
do {
__clearpagereserved (p);
Set_page_count (p, 0);
} while (++p, i.);
set_page_refcounted (page);
Set_pageblock_migratetype (page, MIGRATE_CMA);
__free_pages (page, pageblock_order);
Totalram_pages + + pageblock_nr_pages;
}
#endif
The __free_pages (page, Pageblock_order) that is invoked at the same time will eventually be invoked to __free_one_page (page, zone, order, migratetype);
The associated page is added to the MIGRATE_CMA free_list:
List_add (&PAGE->LRU, &zone->free_area[order].free_list[migratetype]);