Linux protocol stack learns the default queue policy for sending queues in section 11th

Source: Internet
Author: User

If no sending policy is configured for the sending queue of the network device, the kernel uses the default queue policy to send packets.

Two default queue policies are defined in the kernel. One is used for a queue with zero length. One is used for a queue whose queue length is not 0 but no queue policy is configured.


The default queue policy noop_qdisc used during initialization:

/* Drop the packet directly during the queuing operation */static int noop_enqueue (struct sk_buff * skb, struct Qdisc * qdisc) {kfree_skb (skb); return NET_XMIT_CN ;} /* NULL */static struct sk_buff * noop_dequeue (struct Qdisc * qdisc) {return NULL;} struct Qdisc_ops noop_qdisc_ops _ read_mostly = {. id = "noop ",. priv_size = 0 ,. enqueue = noop_enqueue ,. dequeue = noop_dequeue ,. peek = noop_dequeue ,. owner = THIS_MODULE,}; static struct netdev_queue noop_netdev_queue = {. qdisc = & noop_qdisc ,. qdisc_sleeping = & noop_qdisc,}; struct Qdisc noop_qdisc = {. enqueue = noop_enqueue ,. dequeue = noop_dequeue ,. flags = TCQ_F_BUILTIN ,. ops = & noop_qdisc_ops ,. list = LIST_HEAD_INIT (noop_qdisc.list ),. dev_queue = & noop_netdev_queue,}; EXPORT_SYMBOL (noop_qdisc );



Default queue policy noqueue_qdisc for a zero-length queue:

Static struct Qdisc_ops noqueue_qdisc_ops _ read_mostly = {. id = "noqueue ",. priv_size = 0 ,. enqueue = noop_enqueue ,. dequeue = noop_dequeue ,. peek = noop_dequeue ,. owner = THIS_MODULE,}; static struct netdev_queue noqueue_netdev_queue = {. qdisc = & noqueue_qdisc ,. qdisc_sleeping = & noqueue_qdisc,}; static struct Qdisc noqueue_qdisc = {/* the queue length is zero, and the queuing function is left empty, * The sending function determines whether to cache packets based on whether the field is null */. enqueue = NULL ,. dequeue = noop_dequeue ,. flags = TCQ_F_BUILTIN ,. ops = & noqueue_qdisc_ops ,. list = LIST_HEAD_INIT (noqueue_qdisc.list ),. q. lock = _ SPIN_LOCK_UNLOCKED (noqueue_qdisc.q.lock ),. dev_queue = & noqueue_netdev_queue ,};


Default queue policy pfifo_fast:

The default queue policy uses three priority queues to manage message sending. The priority of the message is determined based on the priority set by the skb-> priority field.

/* The default number of message priority queues is 3 */# define PFIFO_FAST_BANDS 3/* Private Data Structure of the queue policy * q: Three Message Queues with different priorities, the smaller the array tag, the higher the priority * bitmap: records which packets in the three priority queues need to be sent */struct pfifo_fast_priv {u32 bitmap; struct sk_buff_head q [PFIFO_FAST_BANDS];}; /* obtain the queue number to be sent based on the value of priv-> bitmap. * if multiple queues have packets to be sent, returns the queue number */static const int bitmap2band [] = {-1, 0, 1, 0, 2, 0, 1, 0} with the highest priority }; struct sk_buff_head * band2list (struct pfifo_fast_priv * priv, int band) {return priv-> q + band ;} /* map the skb-> priority field to the queue policy priority queue ing table */static const u8 prio2band [TC_PRIO_MAX + 1] = {1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1 };

Operations for joining and leaving a team:

Static int pfifo_fast_enqueue (struct sk_buff * skb, struct Qdisc * qdisc) {/* if the queue is not full, cache the message */if (skb_queue_len (& qdisc-> q) <qdisc_dev (qdisc)-> tx_queue_len) {/* Find the priority queue corresponding to the queue policy based on the skb priority */int band = prio2band [skb-> priority & TC_PRIO_MAX]; struct pfifo_fast_priv * priv = qdisc_priv (qdisc); struct sk_buff_head * list = band2list (priv, band ); /* bitmap bit of the priority queue */priv-> bitmap | = (1 <band); qdisc-> q. qlen ++;/* Add messages to the queue */return _ qdisc_enqueue_tail (skb, qdisc, list);}/* If the queue is full, discarded packets */return qdisc_drop (skb, qdisc );}


Static struct sk_buff * pfifo_fast_dequeue (struct Qdisc * qdisc) {struct pfifo_fast_priv * priv = qdisc_priv (qdisc ); /* Find the queue with the highest priority for sending messages */int band = bitmap2band [priv-> bitmap]; if (likely (band> = 0 )) {struct sk_buff_head * list = band2list (priv, band);/* obtain a packet from the queue */struct sk_buff * skb = _ qdisc_dequeue_head (qdisc, list ); qdisc-> q. qlen --;/* if the queue is empty, clear the bitmap bit */if (skb_queue_empty (list) {priv-> bitmap & = ~ (1 <band) ;}return skb;}/* No message in the queue to be sent, return NULL */return NULL ;}


struct Qdisc_ops pfifo_fast_ops __read_mostly = {    .id     =   "pfifo_fast",    .priv_size  =   sizeof(struct pfifo_fast_priv),    .enqueue    =   pfifo_fast_enqueue,    .dequeue    =   pfifo_fast_dequeue,    .owner      =   THIS_MODULE,};


Queue policy initialization:

When a device is created, noop_qdisc is used to initialize the queue policy of the sending queue:

int register_netdevice(struct net_device *dev){    。。。。。。    dev_init_scheduler(dev);    。。。。。。}void dev_init_scheduler(struct net_device *dev){    dev->qdisc = &noop_qdisc;    netdev_for_each_tx_queue(dev, dev_init_scheduler_queue,                             &noop_qdisc);}

When a device is enabled, if no queue policy is created, a default queue policy is created.

Dev_open (){...... Dev_activate (dev );......} Void dev_activate (struct net_device * dev) {/* if dev uses the default qisc noop_qdisc, * Create a New qdisc */if (dev-> qdisc ==& noop_qdisc) {attach_default_qdiscs (dev );}}
Static void attach_one_default_qdisc (struct net_device * dev, struct netdev_queue * dev_queue, void * _ unused) {struct Qdisc * qdisc;/* If the queue length is not 0, create a sending queue policy pfifo_fast */if (dev-> tx_queue_len) {qdisc = qdisc_create_dflt (dev, dev_queue, & pfifo_fast_ops, TC_H_ROOT); if (! Qdisc) {printk (KERN_INFO "% s: activation failed \ n", dev-> name); return ;} /* Can by-pass the queue discipline for default qdisc */qdisc-> flags | = TCQ_F_CAN_BYPASS;}/* the length of the sending queue is 0, use unzip */else {qdisc = & noqueue_qdisc;} dev_queue-> qdisc_sleeping = qdisc;} static void Merge (struct net_device * dev) {struct netdev_queue * txq; struct Qdisc * qdisc;/* Get the first sending queue of the device */txq = netdev _ Get_tx_queue (dev, 0);/* if the device only has one sending queue or the sending queue length is 0, * Call attach_one_default_qdisc to create a default queue policy */if (! Netif_is_multiqueue (dev) | dev-> tx_queue_len = 0) {evaluate (dev, attach_one_default_qdisc, NULL); dev-> qdisc = txq-> qdisc_sleeping; atomic_inc (& dev-> qdisc-> refcnt);} else {qdisc = qdisc_create_dflt (dev, txq, & mq_qdisc_ops, TC_H_ROOT); if (qdisc) {qdisc-> ops-> attach (qdisc); dev-> qdisc = qdisc ;}}}


This article is from the "Yao Yang blog" blog, please be sure to keep this source http://yaoyang.blog.51cto.com/7657153/1305123

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.