From: Sebastian Andrzej Siewior Date: Wed, 21 Feb 2018 10:39:54 +0100 Subject: [PATCH] net: use task_struct instead of CPU number as the queue owner on -RT In commit ("net: move xmit_recursion to per-task variable on -RT") the recursion level was changed to be per-task since we can get preempted in BH on -RT. The lock owner should consequently be recorded as the task that holds the lock and not the CPU. Otherwise we trigger the "Dead loop on virtual device" warning on SMP systems. Cc: stable-rt@vger.kernel.org Reported-by: Kurt Kanzenbach Tested-by: Kurt Kanzenbach Signed-off-by: Sebastian Andrzej Siewior --- include/linux/netdevice.h | 54 ++++++++++++++++++++++++++++++++++++++++------ net/core/dev.c | 6 ++++- 2 files changed, 53 insertions(+), 7 deletions(-) --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -571,7 +571,11 @@ struct netdev_queue { * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; +#ifdef CONFIG_PREEMPT_RT_FULL + struct task_struct *xmit_lock_owner; +#else int xmit_lock_owner; +#endif /* * Time (in jiffies) of last Tx */ @@ -3535,10 +3539,48 @@ static inline u32 netif_msg_init(int deb return (1 << debug_value) - 1; } +#ifdef CONFIG_PREEMPT_RT_FULL +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) +{ + txq->xmit_lock_owner = current; +} + +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = NULL; +} + +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) +{ + if (txq->xmit_lock_owner != NULL) + return true; + return false; +} + +#else + +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) +{ + txq->xmit_lock_owner = cpu; +} + +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; +} + +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) +{ + if (txq->xmit_lock_owner != -1) + return true; + return false; +} +#endif + static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = cpu; + netdev_queue_set_owner(txq, cpu); } static inline bool __netif_tx_acquire(struct netdev_queue *txq) @@ -3555,32 +3597,32 @@ static inline void __netif_tx_release(st static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = smp_processor_id(); + netdev_queue_set_owner(txq, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) { bool ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); + netdev_queue_set_owner(txq, smp_processor_id()); return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + netdev_queue_clear_owner(txq); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + netdev_queue_clear_owner(txq); spin_unlock_bh(&txq->_xmit_lock); } static inline void txq_trans_update(struct netdev_queue *txq) { - if (txq->xmit_lock_owner != -1) + if (netdev_queue_has_owner(txq)) txq->trans_start = jiffies; } --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3483,7 +3483,11 @@ static int __dev_queue_xmit(struct sk_bu if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ +#ifdef CONFIG_PREEMPT_RT_FULL + if (txq->xmit_lock_owner != current) { +#else if (txq->xmit_lock_owner != cpu) { +#endif if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) goto recursion_alert; @@ -7488,7 +7492,7 @@ static void netdev_init_one_queue(struct /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); - queue->xmit_lock_owner = -1; + netdev_queue_clear_owner(queue); netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; #ifdef CONFIG_BQL