Linux partner system (II)-initialization of partner system

Source: Internet
Author: User
The initialization of the partner system is mainly to initialize the data structure involved in the partner system described earlier, and release the low-end memory managed by bootmemallocator during system initialization and the high-end memory of the system to the partner system. Some of the zone-related fields are in the front & lt; Linux node and...
The initialization of the partner system is mainly to initialize the data structure involved in the partner system described earlier, in addition, the low-end memory managed by bootmem allocator during system initialization and the high-end memory of the system are released to the partner system. Some of the zone-related fields are in the front < >. In start_kernel () --> paging_init () --> zone_sizes_init () --> commit () --> free_area_init_node () --> free_area_init_core () --> commit () in, free_area related fields are initialized [cpp] static void _ meminit zone_init_free_lists (struct zone * zone) {int order, t; for_each_migratetype_order (order, t) {/* the linked list is initialized as an empty linked list */INIT_LIST_HEAD (& zone-> free_area [order]. free_list [t]);/* The number of memory blocks is initialized to 0 */zone-> free_area [order]. nr_free = 0;} www.2cto.com} for_each_migratetype_order (order, t) is two nested for loops [cpp] # define for_each_migratetype_order (order, type) \ for (order = 0; order <MAX_ORDER; order ++) \ for (type = 0; type <MIGRATE_TYPES; type ++) start_kernel () --> mm_init () --> mem_init (), collects all available low-end memory and high-end memory and releases it to the partner system. [Cpp] void _ init mem_init (void) {int codesize, reservedpages, datasize, initsize; int tmp; pci_iommu_alloc (); # ifdef CONFIG_FLATMEM BUG_ON (! Mem_map); # endif/* this will put all low memory onto the freelists * // * destroy the bootmem distributor, release the free page box managed by the bootmem distributor and the page box occupied by the bootmem bitmap, and calculate it into totalram_pages */totalram_pages + = free_all_bootmem (); reservedpages = 0; for (tmp = 0; tmp <max_low_pfn; tmp ++)/** Only count reserved RAM pages: */if (page_is_ram (tmp) & PageReserved (pfn_to_page (tmp) reservedpages ++; /* process the high-end memory page box */set_highmem_pages_init (); codesize = (unsigned long) & _ etext-(unsigned long) & _ text; datasize = (unsigned long) & _ edata-(unsigned long) & _ etext; initsize = (unsigned long) & __ init_end-(unsigned long) & __ init_begin; printk (KERN_INFO "Memory: % luk/% luk available (% dk kernel code, "" % dk reserved, % dk data, % dk init, % ldk highmem) \ n ", nr_free_pages () <(PAGE_SHIFT-10), www.2cto.com num_physpages <(PAGE_SHIFT-10), codesize> 10, reservedpages <(PAGE_SHIFT-10), datasize> 10, initsize> 10, (unsigned long) (totalhigh_pages <(PAGE_SHIFT-10); printk (KERN_INFO "virtual kernel memory layout: \ n" "fixmap: 0x % 08lx-0x % 08lx (% 4ld kB) \ n "# ifdef CONFIG_HIGHMEM" pkmap: 0x % 08lx-0x % 08lx (% 4ld kB) \ n "# endif" vmalloc: 0x % 08lx-0x % 08lx (% 4ld MB) \ n "" lowmem: 0x % 08lx-0x % 08lx (% 4ld MB) \ n "". init: 0x % 08lx-0x % 08lx (% 4ld kB) \ n "". data: 0x % 08lx-0x % 08lx (% 4ld kB) \ n "". text: 0x % 08lx-0x % 08lx (% 4ld kB) \ n ", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP-FIXADDR_START)> 10, # ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, (LAST_PKMAP * PAGE_SIZE)> 10, # endif VMALLOC_START, VMALLOC_END, (VMALLOC_END-VMALLOC_START)> 20, (unsigned long) _ va (0), (unsigned long) high_memory, (unsigned long) high_memory-(unsigned long) _ va (0)> 20, (unsigned long) & __ init_begin, (unsigned long) & __ init_end, (unsigned long) & __ init_end-(unsigned long) & __ init_begin)> 10, (unsigned long) & _ etext, (unsigned long) & _ edata, (unsigned long) & _ edata-(unsigned long) & _ etext)> 10, (unsigned long) & _ text, (unsigned long) & _ etext, (unsigned long) & _ etext-(unsigned long) & _ text)> 10 ); /** Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */# define _ FIXADDR_TOP (-PAGE_SIZE) # ifdef CONFIG_HIGHMEM BUILD_BUG_ON (PKMAP_BASE + interval * PAGE_SIZE> FIXADDR_START); BUILD_BUG_ON (VMALLOC_END> PKMAP_BASE ); # endif # define high_memory (-128UL <20) BUILD_BUG_ON (VMALLOC_START> = VMALLOC_END ); # undef high_memory handler # undef _ handler # ifdef CONFIG_HIGHMEM BUG_ON (PKMAP_BASE + handler * PAGE_SIZE> FIXADDR_START); BUG_ON (handler> PKMAP_BASE); # endif BUG_ON (handler> = handler ); BUG_ON) the corresponding pgd global directory item is cleared */zap_low_mappings (true);} the core function of free_all_bootmem () is in < >. [Cpp] void _ init set_highmem_pages_init (void) {struct zone * zone; int nid; for_each_zone (zone) {/* traverse each management zone */unsigned long zone_start_pfn, zone_end_pfn; if (! Is_highmem (zone)/* determine whether it is a high-end memory management zone */continue;/* record the start and end page boxes of the high-end memory management zone */zone_start_pfn = zone-> zone_start_pfn; zone_end_pfn = zone_start_pfn + zone-> spanned_pages; nid = zone_to_nid (zone); printk (KERN_INFO "Initializing % s for node % d (% 08lx: % 08lx) \ n ", zone-> name, nid, zone_start_pfn, zone_end_pfn);/* add the page box of high-end memory to the partner system */terminal (nid, zone_start_pfn, zone_end_pfn );} /* release to partner system Totalram_pages */totalram_pages + = totalhigh_pages;} [cpp] void _ init records (int nid, unsigned long start_pfn, unsigned long end_pfn) {www.2cto.com struct add_highpages_data data; data. start_pfn = start_pfn; data. end_pfn = end_pfn;/* traverses all activity zones and calls add_highpages_word_fn () to process each activity zone */work_with_active_regions (nid, region, & data);} [cpp] void _ init Work_with_active_regions (int nid, work_fn_t work_fn, void * data) {int I; int ret; detail (I, nid) {ret = work_fn (early_node_map [I]. start_pfn, early_node_map [I]. end_pfn, data); if (ret) break;} [cpp] static int _ init add_highpages_work_fn (unsigned long start_pfn, unsigned long end_pfn, void * datax) {int node_pfn; struct page * page; unsigned long final_start_pfn, fi Nal_end_pfn; struct destination * data; data = (struct add_highpages_data *) datax;/* get a reasonable start and end page number */final_start_pfn = max (start_pfn, data-> start_pfn ); final_end_pfn = min (end_pfn, data-> end_pfn); if (final_start_pfn> = final_end_pfn) return 0;/* traverse the page box */for (node_pfn = final_start_pfn; node_pfn <final_end_pfn; node_pfn ++) {if (! Pfn_valid (node_pfn) continue; www.2cto.com page = pfn_to_page (node_pfn); add_one_highpage_init (page, node_pfn);/* add the page box to the partner system */} return 0 ;} [cpp] static void _ init add_one_highpage_init (struct page * page, int pfn) {ClearPageReserved (page); init_page_count (page ); /* count is initialized to 1 */_ free_page (page);/* release the page to the partner system */totalhigh_pages ++; /* add the pages of high-end memory to 1 */} _ free_page (). the specific operations involved will be analyzed in the free_are section when introducing the release page of the partner system. A_init_core () --> memmap_init () (--> memmap_init_zone () --> set_pageblock_migratetype (), mark pageblock_flags in The struct zone corresponding to the start page of each pageblock as movable, indicating that the pageblock is movable, that is, when the kernel initializes the partner system, all pages are marked as movable [cpp] void _ meminit memmap_init_zone (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) {struct page * page; unsigned long end_pfn = start_pfn + size; Unsigned long pfn; www.2cto.com struct zone * z; if (highest_memmap_pfn <end_pfn-1) rows = end_pfn-1; z = & NODE_DATA (nid)-> node_zones [zone]; for (pfn = start_pfn; pfn <end_pfn; pfn ++) {/** There can be holes in boot-time mem_map [] s * handed to this function. they do not * exist on hotplugged memory. */if (context = MEMMAP_EARLY) {if (! Early_pfn_valid (pfn) continue; if (! Early_pfn_in_nid (pfn, nid) continue;} page = pfn_to_page (pfn); set_page_links (page, zone, nid, pfn); Partition (page, zone, nid, pfn ); init_page_count (page); reset_page_mapcount (page); SetPageReserved (page);/** Mark the block movable so that blocks are reserved for * movable at startup. this will force kernel allocations * to reserve their blocks rather than leaking throughout * t He address space during boot when restart long-lived * kernel allocations are made. later some blocks near * the start are marked MIGRATE_RESERVE by * setup_zone_migrate_reserve () www.2cto.com ** bitmap is created for zone's valid pfn range. but memmap * can be created for invalid pages (for alignment) * check here not to call set_pageblock_migratetype () against * pfn out of zone. * // * If the pfn is in a reasonable range, And the pfn is the start page number of a pageblock */if (z-> zone_start_pfn <= pfn) & (pfn <z-> zone_start_pfn + z-> spanned_pages) &&! (Pfn & (pageblock_nr_pages-1) set_pageblock_migratetype (page, MIGRATE_MOVABLE ); /* mark the bitmap field corresponding to the page box as movable */INIT_LIST_HEAD (& page-> lru ); # ifdef WANT_PAGE_VIRTUAL/* The shift won't overflow because ZONE_NORMAL is below 4G. */if (! Is_highmem_idx (zone) set_page_address (page, _ va (pfn <PAGE_SHIFT); # endif} [cpp] static void set_pageblock_migratetype (struct page * page, int migratetype) {www.2cto.com/* if mobile classification is not enabled, all pages should be marked as inmovable */if (unlikely (portable) migratetype = MIGRATE_UNMOVABLE; set_pageblock_flags_group (page, (unsigned long) migratetype, PB_migrate, PB_migrate_end);} [cpp] void partition (struct page * page, unsigned long flags, int start_bitidx, int end_bitidx) {struct zone * zone; unsigned long * bitmap; unsigned long pfn, bitidx; unsigned long value = 1; www.2cto.com zone = page_zone (page); pfn = page_to_pfn (page);/* get the starting address of the bitmap, that is, zone-> pageblock_flags */bitmap = get_pageblock_bitmap (zone, pfn);/* get the offset of the bitmap area corresponding to pfn */bitidx = pfn_to_bitidx (zone, pfn ); VM_BUG_ON (pfn <zone-> zone_start_pfn); VM_BUG_ON (pfn> = zone-> zone_start_pfn + zone-> spanned_pages);/* each page requires three bits to characterize its mobility, that is, three bits from start_bitidx to end_bitidx */for (; start_bitidx <= end_bitidx; start_bitidx ++, value <= 1) if (flags & value) /* use flags and value to determine whether the related bit is 1 */_ set_bit (bitidx + start_bitidx, bitmap); else _ clear_bit (bitidx + start_bitidx, bitmap );} author vanbreaker
Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.