Memory Management API: get_user_pages, apiget_user_pages
Memory Management API: get_user_pages
Long get_user_pages (unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page ** pages, struct vm_area_struct ** vmas) is used to map user space pages to memory, return the pointer of their page structure, that is, the fourth parameter of get_user_pages, struct page ** pages. The source code analysis is as follows: long get_user_pages (unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page ** pages, struct vm_area_struct ** vmas) {# This function maps memory return _ get_user_pages_locked (cu Rrent, current-> mm, start, nr_pages, pages, vmas, NULL, false, gup_flags | FOLL_TOUCH);} static _ always_inline long _ get_user_pages_locked (struct task_struct * tsk, struct mm_struct * mm, unsigned long start, unsigned long nr_pages, struct page ** pages, struct vm_area_struct ** vmas, int * locked, bool policy_drop, unsigned int flags) {long ret, pages_done; bool lock_dropped; # in this example, locked is nullif (locked) {/* if VM_FAULT_RETRY can be returned, vmas become invalid */BUG_ON (vmas);/* check caller initialized locked */BUG_ON (* locked! = 1) ;}if (pages) flags | = FOLL_GET; pages_done = 0; lock_dropped = false; for (;) {# Map pages of nr_pages from start, save the page pointer in pages. ret indicates the number of successfully mapped pages. ret = _ get_user_pages (tsk, mm, start, nr_pages, flags, pages, vmas, locked ); if (! Locked)/* VM_FAULT_RETRY couldn't trigger, bypass */return ret;/* VM_FAULT_RETRY cannot return errors */if (! * Locked) {BUG_ON (ret <0); BUG_ON (ret> = nr_pages);} if (! Pages)/* If it's a prefault don't insist harder */return ret; # This if condition is true, note _ get_user_pages successfully mapped the ret pages if (ret> 0) {nr_pages-= ret; pages_done + = ret; if (! Nr_pages) break;} if (* locked) {/* VM_FAULT_RETRY didn't trigger */if (! Pages_done) pages_done = ret; break;}/* VM_FAULT_RETRY triggered, so seek to the faulting offset */pages + = ret; start + = ret <PAGE_SHIFT; /** Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */* locked = 1; lock_dropped = true; down_read (& mm-> mmap_sem); # map a page from start. Normally, the page will be failed. Ret = _ get_user_pages (tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret! = 1) {BUG_ON (ret> 1); if (! Pages_done) pages_done = ret; break; # normally, the for endless loop will exit from here} # If the program runs here, it indicates that it was failed when the nr_pages page was mapped for the first time. However, the second call _ get_user_pages # ing single page is successful. Therefore, the total number of pages to be mapped is reduced by one, and a page is added to the start address, and the ing is re-started. nr_pages --; pages_done ++; if (! Nr_pages) break; pages ++; start + = PAGE_SIZE;} if (policy_drop & lock_dropped & * locked) {/** We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */up_read (& mm-> mmap_sem); * locked = 0 ;}# pages_done indicates the number of successfully mapped pages. return pages_done ;}