The Linux memory activity area is actually the area where the memory block in the global variable e820 has been checked and processed. It is useful in management zone initialization and other places.
Data Structure
Struct node_active_region {
Unsigned long start_pfn;
Unsigned long end_pfn;
Int nid;
};
Initialization
The initialization of active memory is in setup_arch ()-> initmem_init ()-> e820_register_active_regions ().
/* Walk the e820 map and register active regions within a node */
Void _ init e820_register_active_regions (int nid, unsigned long start_pfn,
Unsigned long last_pfn)
{
Unsigned long ei_startpfn;
Unsigned long ei_endpfn;
Int I;
For (I = 0; I <e81_nr _ map; I ++)
If (e820_find_active_region (& e81_map [I],/* search for the activity area from the global variable e820 */
Start_pfn, last_pfn,
& Ei_startpfn, & ei_endpfn ))
Add_active_range (nid, ei_startpfn, ei_endpfn);/* Add to activity zone */
}
/*
* Finds an active region in the address range from start_pfn to last_pfn and
* Returns its range in ei_startpfn and ei_endpfn for the e820 entry.
*/
Int _ init e820_find_active_region (const struct e820entry * ei,
Unsigned long start_pfn,
Unsigned long last_pfn,
Unsigned long * ei_startpfn,
Unsigned long * ei_endpfn)
{
U64 align = PAGE_SIZE;
* Ei_startpfn = round_up (ei-> addr, align)> PAGE_SHIFT;
* Ei_endpfn = round_down (ei-> addr + ei-> size, align)> PAGE_SHIFT;
/* Skip map entries smaller than a page */
If (* ei_startpfn> = * ei_endpfn)
Return 0;
/* Skip if map is outside the node */
If (ei-> type! = E820_RAM | * ei_endpfn <= start_pfn |
* Ei_startpfn> = last_pfn)
Return 0;
/* Check for overlaps */
If (* ei_startpfn <start_pfn)
* Ei_startpfn = start_pfn;
If (* ei_endpfn> last_pfn)
* Ei_endpfn = last_pfn;
Return 1;
}
/* Add the activity area and check the original area */
Void _ init add_active_range (unsigned int nid, unsigned long start_pfn,
Unsigned long end_pfn)
{
Int I;
Mminit_dprintk (MMINIT_TRACE, "memory_register ",
"Entering add_active_range (% d, % # lx, % # lx )"
"% D entries of % d used \ n ",
Nid, start_pfn, end_pfn,
Nr_nodemap_entries, MAX_ACTIVE_REGIONS );
// Not set macro
Mminit_validate_memmodel_limits (& start_pfn, & end_pfn );
/* Merge with existing active regions if possible */
For (I = 0; I <nr_nodemap_entries; I ++ ){
If (early_node_map [I]. nid! = Nid)
Continue;
/* Skip if an existing region covers this new one */
If (start_pfn> = early_node_map [I]. start_pfn &&
End_pfn <= early_node_map [I]. end_pfn)
Return;
/* Merge forward if suitable */
If (start_pfn <= early_node_map [I]. end_pfn &&
End_pfn> early_node_map [I]. end_pfn ){
Early_node_map [I]. end_pfn = end_pfn;
Return;
}
/* Merge backward if suitable */
If (start_pfn <early_node_map [I]. end_pfn &&
End_pfn> = early_node_map [I]. start_pfn ){
Early_node_map [I]. start_pfn = start_pfn;
Return;
}
}
/* Check that early_node_map is large enough */
If (I> = MAX_ACTIVE_REGIONS ){
Printk (KERN_CRIT "More than % d memory regions, truncating \ n ",
MAX_ACTIVE_REGIONS );
Return;
}
Early_node_map [I]. nid = nid;
Early_node_map [I]. start_pfn = start_pfn;
Early_node_map [I]. end_pfn = end_pfn;
Nr_nodemap_entries = I + 1;
}
From bullbat's column