Xen network front-end driver code analysis (device initialization)

Source: Internet
Author: User

Whether it is a block device or a network device, the installation and uninstallation of the front-end driver module are completed through xenbus_register_frontend and xenbus_unregister_driver. Netfront_driver is also a xenbus_driver structure.

Static struct xenbus_driver netfront_driver = {
. Name = "VIF ",
. Owner = this_module,
. IDS = netfront_ids,
. Probe = netfront_probe,
. Remove = _ devexit_p (xennet_remove ),
. Resume = netfront_resume,
. Otherend_changed = backend_changed,
};

The front end connects to the back-end resume through xennet_connect. xennet_connect first calls talk_to_backend and sets the corresponding backend parameter, e.g.

TX-ring-ref

RX-ring-ref

Event-Channel

Request-RX-copy

Feature-RX-Policy

Feature-SG

Feature-gso-tcpv4

Among them, request-RX-copy, feature-RX-Policy, feature-SG, feature-gso-tcpv4 are 1

Note that before calling xenbus_transaction_start () settings, you must use setup_netfront to generate a struct netfront_info struct.

Static int setup_netfront (struct xenbus_device * Dev, struct netfront_info * info)
{
Struct xen_netif_tx_sring * TXS;
Struct xen_netif_rx_sring * rxs;
Int err;
Struct net_device * netdev = Info-> netdev;

Info-> tx_ring_ref = grant_invalid_ref;
Info-> rx_ring_ref = grant_invalid_ref;
Info-> Rx. sring = NULL;
Info-> Tx. sring = NULL;
Netdev-> IRQ = 0;

Err = xen_net_read_mac (Dev, netdev-> dev_addr );
If (ERR ){
Xenbus_dev_fatal (Dev, err, "parsing % S/mac", Dev-> nodename );
Goto fail;
}

Obtain the netfront MAC address by reading xenstore

TXS = (struct xen_netif_tx_sring *) get_zeroed_page (gfp_noio | _ gfp_high );
If (! TXS ){
Err =-enomem;
Xenbus_dev_fatal (Dev, err, "allocating TX ring page ");
Goto fail;
}
Shared_ring_init (TXS );
Front_ring_init (& info-> Tx, TXS, page_size );

Initialize TX Io ring page

Err = xenbus_grant_ring (Dev, pai_to_mfn (TXS ));
If (ERR <0 ){
Free_page (unsigned long) TXS );
Goto fail;
}

Grant the access permission of the Tx Io ring page to the backend.

Info-> tx_ring_ref = err;
Rxs = (struct xen_netif_rx_sring *) get_zeroed_page (gfp_noio | _ gfp_high );
If (! Rxs ){
Err =-enomem;
Xenbus_dev_fatal (Dev, err, "allocating RX ring page ");
Goto fail;
}
Shared_ring_init (rxs );
Front_ring_init (& info-> RX, rxs, page_size );

Err = xenbus_grant_ring (Dev, pai_to_mfn (rxs ));
If (ERR <0 ){
Free_page (unsigned long) rxs );
Goto fail;
}
Info-> rx_ring_ref = err;

Initialize the RX Io ring page and grant the backend access priviledge.

Err = xenbus_alloc_evtchn (Dev, & info-> evtchn );
If (ERR)
Goto fail;

Initialize an event channel at the frontend and backend.

Err = bind_evtchn_to_irqhandler (Info-> evtchn, xennet_interrupt,
Irqf_sample_random, netdev-> name,
Netdev );

The IRQ service routine corresponding to the hard interrupt of event channel is xennet_interrupt, which wakes up napi poll

If (ERR <0)
Goto fail;
Netdev-> IRQ = err;
Return 0;

Fail:
Return err;
}

After xennet_connect, perform a series of RX and TX initialization. The Code is as follows:

Static int xennet_connect (struct net_device * Dev)
{
Struct netfront_info * NP = netdev_priv (Dev );
Int I, requeue_idx, err;
Struct sk_buff * SKB;
Grant_ref_t ref;
Struct xen_netif_rx_request * req;
Unsigned int feature_rx_copy;

Err = xenbus_scanf (xbt_nil, NP-> xbdev-> otherend,
"Feature-RX-Copy", "% u", & feature_rx_copy );
If (Err! = 1)
Feature_rx_copy = 0;

If (! Feature_rx_copy ){
Dev_info (& Dev-> Dev,
"Backend does not support copying receive path \ n ");
Return-enodev;
}

Err = talk_to_backend (NP-> xbdev, NP );
If (ERR)
Return err;

Xennet_set_features (Dev );

Spin_lock_bh (& NP-> rx_lock );
Spin_lock_irq (& NP-> tx_lock );

/* Step 1: discard all pending TX packet fragments .*/
Xennet_release_tx_bufs (NP );

Release all resources of tx_skbs

/* Step 2: rebuild the RX buffer freelist and the RX ring itself .*/
For (requeue_idx = 0, I = 0; I <net_rx_ring_size; I ++ ){
If (! NP-> rx_skbs [I])
Continue;

SKB = NP-> rx_skbs [requeue_idx] = xennet_get_rx_skb (NP, I );
Ref = NP-> grant_rx_ref [requeue_idx] = xennet_get_rx_ref (NP, I );
Req = ring_get_request (& NP-> RX, requeue_idx );

Gnttab_grant_foreign_access_ref (
Ref, NP-> xbdev-> otherend_id,
Pfn_to_mfn (page_to_pfn (skb_shinfo (SKB)->
Frags-> page )),
0 );
REQ-> GREF = ref;
REQ-> id = requeue_idx;

Requeue_idx ++;
}

NP-> Rx. req_prod_pvt = requeue_idx;

Assign the resources of the rx_skbs and grant_rx_ref arrays to the struct of xen_netif_rx_request and grant the access permission of the backend device. In this way, if a backend package arrives, the data can be copied to the corresponding page.

/*
* Step 3: All public and private State shoshould now be sane. Get
* Ready to start sending and logging ing packets and give the driver
* Domain a kick because we 've probably just requeued some
* Packets.
*/
Netif_carrier_on (NP-> netdev );
Policy_remote_via_irq (NP-> netdev-> IRQ );
Xennet_tx_buf_gc (Dev );
Xennet_alloc_rx_buffers (Dev );

Spin_unlock_irq (& NP-> tx_lock );
Spin_unlock_bh (& NP-> rx_lock );

Return 0;
}

The corresponding xennet_disconnect_backend is used to do the opposite, release the event channel, and release the Tx and Rx Io ring resources.

Static void xennet_disconnect_backend (struct netfront_info * info)
{
/* Stop old I/F to prevent errors whilst we rebuild the State .*/
Spin_lock_bh (& info-> rx_lock );
Spin_lock_irq (& info-> tx_lock );
Netif_carrier_off (Info-> netdev );
Spin_unlock_irq (& info-> tx_lock );
Spin_unlock_bh (& info-> rx_lock );

If (Info-> netdev-> IRQ)
Unbind_from_irqhandler (Info-> netdev-> IRQ, info-> netdev );
Info-> evtchn = Info-> netdev-> IRQ = 0;

/* End access and free the pages */
Xennet_end_access (Info-> tx_ring_ref, info-> Tx. sring );
Xennet_end_access (Info-> rx_ring_ref, info-> Rx. sring );

Info-> tx_ring_ref = grant_invalid_ref;
Info-> rx_ring_ref = grant_invalid_ref;
Info-> Tx. sring = NULL;
Info-> Rx. sring = NULL;
}

Netfront_probe, used to detect Nic devices from xen PCI bus. Netfront_probe will first call xennet_create_dev to create a net_device device through the xenbus_device Device

Static struct net_device * _ devinit xennet_create_dev (struct xenbus_device * Dev)
{
Int I, err;
Struct net_device * netdev;
Struct netfront_info * NP;

Netdev = alloc_etherdev (sizeof (struct netfront_info ));

Alloc_etherdev creates net_device with only one RX queue and TX queue, where net_device-> priv is the netfront_info pointer.

If (! Netdev ){
Printk (kern_warning "% S> alloc_etherdev failed. \ n ",
_ FUNC __);
Return err_ptr (-enomem );
}

NP = netdev_priv (netdev );
NP-> xbdev = dev;

Netfront_info-> xbdev is the device pointer of xenbus_device.

Spin_lock_init (& NP-> tx_lock );
Spin_lock_init (& NP-> rx_lock );

Skb_queue_head_init (& NP-> rx_batch );
NP-> rx_target = rx_dfl_min_target;
NP-> rx_min_target = rx_dfl_min_target;
NP-> rx_max_target = rx_max_target;

Init_timer (& NP-> rx_refill_timer );
NP-> rx_refill_timer.data = (unsigned long) netdev;
NP-> rx_refill_timer.function = rx_refill_timeout;

/* Initialise tx_skbs as a free chain containing every entry .*/
NP-> tx_skb_freelist = 0;
For (I = 0; I <net_tx_ring_size; I ++ ){
Skb_entry_set_link (& NP-> tx_skbs [I], I + 1 );
NP-> grant_tx_ref [I] = grant_invalid_ref;
}

/* Clear out rx_skbs */
For (I = 0; I <net_rx_ring_size; I ++ ){
NP-> rx_skbs [I] = NULL;
NP-> grant_rx_ref [I] = grant_invalid_ref;
}

/* A grant for every TX ring slot */
If (gnttab_alloc_grant_references (tx_max_target,
& NP-> gref_tx_head) <0 ){
Printk (kern_alert "#### netfront can't alloc TX grant refs \ n ");
Err =-enomem;
Goto exit;
}
/* A grant for every RX ring slot */
If (gnttab_alloc_grant_references (rx_max_target,
& NP-> gref_rx_head) <0 ){
Printk (kern_alert "#### netfront can't alloc RX grant refs \ n ");
Err =-enomem;
Goto exit_free_tx;
}

Netdev-> netdev_ops = & xennet_netdev_ops;

Netif_napi_add (netdev, & NP-> napi, xennet_poll, 64 );
Netdev-> features = netif_f_ip_csum;

Set_ethtool_ops (netdev, & xennet_ethtool_ops );
Set_netdev_dev (netdev, & Dev-> Dev );

NP-> netdev = netdev;

Netif_carrier_off (netdev );

The above are general net_device initialization steps.

Return netdev;

Exit_free_tx:
Gnttab_free_grant_references (NP-> gref_tx_head );
Exit:
Free_netdev (netdev );
Return err_ptr (ERR );
}

The core of netfront_probe is to call register_netdev to register itself.

Static int _ devinit netfront_probe (struct xenbus_device * Dev,
Const struct xenbus_device_id * ID)
{
Int err;
Struct net_device * netdev;
Struct net_device * netdev_found = NULL;
Struct netfront_info * Info;
Char mac_addr [eth_alen];


Netdev = xennet_create_dev (Dev );

If (is_err (netdev )){
Err = ptr_err (netdev );
Xenbus_dev_fatal (Dev, err, "creating netdev ");
Return err;
}

Info = netdev_priv (netdev );
Dev_set_drvdata (& Dev-> Dev, Info );

Err = register_netdev (Info-> netdev );
If (ERR ){
Printk (kern_warning "% s: register_netdev err = % d \ n ",
_ FUNC __, ERR );
Goto fail;
}

Err = xennet_sysfs_addif (Info-> netdev );
If (ERR ){
Unregister_netdev (Info-> netdev );
Printk (kern_warning "% s: Add sysfs failed err = % d \ n ",
_ FUNC __, ERR );
Goto fail;
}

Return 0;

Fail:
Free_netdev (netdev );
Dev_set_drvdata (& Dev-> Dev, null );
Out:
Return err;
}

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.