[Cpp] int evaluate (unsigned int num_regions, # if LINUX_VERSION_CODE> = KERNEL_VERSION (, 26) struct dm_io_region * where, # else struct io_region * where, # endif int rw, struct bio_vec * bvec, io_policy_fn fn, void * context) {struct dm_io_request iorq; iorq. bi_rw = rw; iorq. mem. type = DM_IO_BVEC; iorq. mem. ptr. bvec = bvec; iorq. policy. fn = fn; iorq. policy. context = context; iorq. client = flash Cache_io_client; return dm_io (& iorq, num_regions, where, NULL);} The user must set up an io_region structure to describe the desired location of the I/O. each io_region indicates a block-device along with the starting sector and size of the region. however, the representation of the io_region struct varies with kernel versions. Versions later than 2.6.26 are expressed as follows using dm_io_region: [cpp] struct dm_io_region {struct block_device * bdev; sector_t sector; sector_t count;/* If this is zero the region is ignored. */}; versions earlier than 2.6.26 are represented by io_region: [cpp] struct io_region {struct block_device * bdev; sector_t sector; sector_t count;}; although the form is different, however, the content is the same and contains a pointer to block_device, as well as the starting sector and area size of the region. Bio_vec struct: [cpp] struct bio_vec {struct page * bv_page; <span style = "white-space: pre "> </span> // the pointer unsigned int bv_len of the page descriptor in the page box pointing to the segment; <span style =" white-space: pre "> </span> // segment length: unsigned int bv_offset; <span style =" white-space: pre "> </span> // offset of the segment data in the page box}; io_policy_fn is the type of a callback function pointer, which is defined as follows: [cpp] typedef void (* io_policy_fn) (unsigned long error, void * context); The "error" parameter in this callback ,, Is a bitset (instead of a simple error value ). in the case of an write-I/O to multiple regions, this bitset allows dm-io to indicate success or failure on each individual region. the dm_io_request structure is as follows: (The dm_io_request structure is used to encapsulate the request type. If dm_io_policy.fn is set, it is asynchronous IO; otherwise, it is synchronous IO .) [Cpp] struct dm_io_request {int bi_rw;/* READ | WRITE-not READA */struct dm_io_memory mem;/* Memory to use for io */struct dm_io_notify y; /* Synchronous if needed y. fn is NULL */struct dm_io_client * client;/* Client memory handler */}; from the analysis above, we can see that dm_io_async_bvec uses io_policy_fn to determine whether the operation is synchronous or asynchronous, use bio_vec * bvec to determine The dm_io service type. dm_io has three service types: [cpp] // The first I/O service type takes a list of me Mory pages as the data buffer for the I/O, along with an offset into the first page. struct page_list {struct page_list * next; struct page * page;}; int dm_io_sync (unsigned int num_regions, struct io_region * where, int rw, struct page_list * pl, unsigned int offset, unsigned long * error_bits); int dm_io_async (unsigned int num_regions, struct io_region * where, int rw, struct page_list * pl, unsigned Int offset, io_policy_fn fn, void * context); [cpp] // The second I/O service type takes an array of bio vectors as the data buffer for the I/O. this service can be handy if the caller has a pre-assembled bio, but wants to direct different portions of the bio to different devices. int dm_io_sync_bvec (unsigned int num_regions, struct io_region * where, int rw, struct bio_vec * bvec, unsigned long * err Or_bits); int dm_io_async_bvec (unsigned int num_regions, struct io_region * where, int rw, struct bio_vec * bvec, io_policy_fn fn, void * context ); [cpp] // The third I/O service type takes a pointer to a vmalloc 'd memory buffer as the data buffer for the I/O. this service can be handy if the caller needs to do I/O to a large region but doesn't want to allocate a large number of individual memory page S. int round (unsigned int num_regions, struct io_region * where, int rw, void * data, unsigned long * error_bits); int round (unsigned int num_regions, struct io_region * where, int rw, void * data, io_policy_fn fn, void * context); after dm_io_async_bvec encapsulates requests through dm_io_request, various types of requests are determined, and then operations are completed by the dm_io () function. Dm_io functions: [cpp] int dm_io (struct dm_io_request * io_req, unsigned num_regions, struct dm_io_region * where, unsigned long * sync_error_bits) {int r; struct dpages dp; r = dp_init (io_req, & dp); if (r) return r; if (! Io_req-> Policy. fn) return sync_io (io_req-> client, num_regions, where, io_req-> bi_rw, & dp, sync_error_bits); return async_io (io_req-> client, num_regions, where, io_req-> bi_rw, & dp, io_req-> sort y. fn, io_req-> sort y. context);} dpages struct: [cpp] struct dpages {void (* get_page) (struct dpages * dp, struct page ** p, unsigned long * len, unsigned * offset); // a function pointer. Obtain a region based on the parameter. The region type is determined by the context void (* next_pag E) (struct dpages * dp); // It is also a function pointer. Obtain the unsigned context_u and void * context_ptr in the next area of a region based on the parameters; // Private Data member, whose type can be determined based on the context, that is, the preceding region type}; dpages struct provide an alternative action for getting a new destination page for io. dp_init () function: [cpp] static int dp_init (struct dm_io_request * io_req, struct dpages * dp) {/* Set up dpages based on memory type */switch (io_req-> mem. type) {case DM_IO_PAGE_LIST: list_dp_init (dp, io_r Eq-> mem. ptr. pl, io_req-> mem. offset); break; case DM_IO_BVEC: <span style = "white-space: pre"> </span> // you can see our io_req-> mem. type is the type bvec_dp_init (dp, io_req-> mem. ptr. bvec); break; case DM_IO_VMA: vm_dp_init (dp, io_req-> mem. ptr. vma); break; case DM_IO_KMEM: km_dp_init (dp, io_req-> mem. ptr. addr); break; default: return-EINVAL;} return 0;} bvce_dp_init () function: [cpp] static void bvec_dp_init (struct dpages * Dp, struct bio_vec * bvec) {dp-> get_page = bvec_get_page; // obtain a bio_vec dp-> next_page = bvec_next_page; // obtain the next bio_vec dp-> context_ptr = bvec; // determine the region type as bio_vec} bvec_get_page (). The function is as follows: [cpp] static void bvec_get_page (struct dpages * dp, struct page ** p, unsigned long * len, unsigned * offset) {struct bio_vec * bvec = (struct bio_vec *) dp-> context_ptr; * p = bvec-> bv_page; * len = bvec-> bv_len; * offset = Bvec-> bv_offset;} bvec_next_page () function: [cpp] static void bvec_next_page (struct dpages * dp) {struct bio_vec * bvec = (struct bio_vec *) dp-> context_ptr; dp-> context_ptr = bvec + 1;} after the dm_io service type is processed, and then according to io_req-> sort y. whether the fn is set to determine whether the dm_io operation type is synchronous or asynchronous. Synchronous operation calls sync_io; Asynchronous Operation calls async_io. Sync_io () function: [cpp] static int sync_io (struct dm_io_client * client, unsigned int num_regions, struct dm_io_region * where, int rw, struct dpages * dp, unsigned long * error_bits) {struct io; // further encapsulate the dm_io_request request if (num_regions> 1 & (rw & RW_MASK )! = WRITE) {// dm_io cannot read multiple io_region WARN_ON (1); return-EIO;} retry: io. error_bits = 0; io. eopnotsupp_bits = 0; atomic_set (& io. count, 1);/* see dispatch_io () */io. sleeper = current; io. client = client; dispatch_io (rw, num_regions, where, dp, & io, 1); while (1) {set_current_state (TASK_UNINTERRUPTIBLE); if (! Atomic_read (& io. count) break; io_schedule ();} set_current_state (TASK_RUNNING); if (io. eopnotsupp_bits & (rw & (1 <BIO_RW_BARRIER) {rw & = ~ (1 <BIO_RW_BARRIER); goto retry;} if (error_bits) * error_bits = io. error_bits; return io. error_bits? -EIO: 0;} async_io () function: [cpp] static int async_io (struct dm_io_client * client, unsigned int num_regions, struct dm_io_region * where, int rw, struct dpages * dp, io_policy_fn fn, void * context) {struct io * io; // The reason why struct io is added is to re-encapsulate the above dm_io_request and add it to the thread, for io distribution and processing if (num_regions> 1 & (rw & RW_MASK )! = WRITE) {// Dm-io can read from one io_region or write to one or more io_regions. writes to multiple regions are specified by an array of io_region structures, dm_io cannot read multiple io_region WARN_ON (1); fn (1, context); return-EIO ;} io = mempool_alloc (client-> pool, GFP_NOIO); io-> error_bits = 0; io-> eopnotsupp_bits = 0; atomic_set (& io-> count, 1 ); /* see dispatch_io () */io-> sleeper = NULL; io-> client = client; io-> callback = fn; io-> context = context; dispatch_io (rw, num_regions, where, dp, io, 0); return 0 ;}