This is the 4.9.116 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAltcA9oACgkQONu9yGCS aT4NZg/7Bbf10v5kf18jZeolQkj1GyVv7+tI1fAlM3tT5BGEqAZDUdp383YegQih YRV5Q7fsdPxqyoXAfQosKdjViBowWglzWJE2YKZHRIOkOBSC0mlhfhNiqwp9owlQ /JHGwfPhYaJt9Oyuc/OZ3iq/KNe8gm29OuFnQd8pKp8mFakpyiEVcLSeqHUjGQ9P BBM0H9+F/16iOOVcOqQvbG7rza9AjPXeTLGcMf63Nah6qLSvuH3il/v42N5XXOuJ iXozco9ifh3BxC/vP3sHrt+BCUeUsNbLUdZO1gZIpybd1byJAbQSPkN8v9jgNZbG j7xMfMecsUNVsPpv8i8f7Zbh7PDYx+XGk6ufArmYItmp3X65gO+rrxbme+pSvKib g8x0952+u+ddnyEPH/DcypTI/WU2qeAfXk4HEbeeYiZZxOUmF76XNn55YZW8xpqj jJi9CaXHiXQpje2a8KGMR3b37T3f5fntOn4rIWT/isaqbqms8j/3b9AYf9yEEGZ1 b05787d6ybHQrMVi9nTXKrRAQqlnKpZZWdsOPvrrV9jO5TnYyDy2RB9/19SEpkdj kD6lsMlL//o6TRFDIdph9Kg1sm2rFnkT78Hc/RZJ5t27+CM2YfvrLr1+k4G15QqG N2h+0naYkA6dc052i0kbL0cQGXngeoBeINAKOcXyom99p/rFaKA= =gXo7 -----END PGP SIGNATURE----- Merge 4.9.116 into android-4.9 Changes in 4.9.116 MIPS: ath79: fix register address in ath79_ddr_wb_flush() MIPS: Fix off-by-one in pci_resource_to_user() ip: hash fragments consistently ip: in cmsg IP(V6)_ORIGDSTADDR call pskb_may_pull net/mlx4_core: Save the qpn from the input modifier in RST2INIT wrapper net: skb_segment() should not return NULL net/mlx5: Adjust clock overflow work period net/mlx5e: Don't allow aRFS for encapsulated packets net/mlx5e: Fix quota counting in aRFS expire flow multicast: do not restore deleted record source filter mode to new one net: phy: consider PHY_IGNORE_INTERRUPT in phy_start_aneg_priv rtnetlink: add rtnl_link_state check in rtnl_configure_link tcp: fix dctcp delayed ACK schedule tcp: helpers to send special DCTCP ack tcp: do not cancel delay-AcK on DCTCP special ACK tcp: do not delay ACK in DCTCP upon CE status change tcp: free batches of packets in tcp_prune_ofo_queue() tcp: avoid collapses in tcp_prune_queue() if possible tcp: detect malicious patterns in tcp_collapse_ofo_queue() tcp: call tcp_drop() from tcp_data_queue_ofo() usb: cdc_acm: Add quirk for Castles VEGA3000 usb: core: handle hub C_PORT_OVER_CURRENT condition usb: gadget: f_fs: Only return delayed status when len is 0 driver core: Partially revert "driver core: correct device's shutdown order" can: xilinx_can: fix RX loop if RXNEMP is asserted without RXOK can: xilinx_can: fix power management handling can: xilinx_can: fix recovery from error states not being propagated can: xilinx_can: fix device dropping off bus on RX overrun can: xilinx_can: keep only 1-2 frames in TX FIFO to fix TX accounting can: xilinx_can: fix incorrect clear of non-processed interrupts can: xilinx_can: fix RX overflow interrupt not being enabled turn off -Wattribute-alias exec: avoid gcc-8 warning for get_task_comm Linux 4.9.116 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
52be322125
3
Makefile
3
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 115
|
||||
SUBLEVEL = 116
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
@ -650,6 +650,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
|
|||
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
|
||||
|
||||
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
|
||||
KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
|
||||
|
|
|
@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
|
|||
|
||||
void ath79_ddr_wb_flush(u32 reg)
|
||||
{
|
||||
void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
|
||||
void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
|
||||
|
||||
/* Flush the DDR write buffer. */
|
||||
__raw_writel(0x1, flush_reg);
|
||||
|
|
|
@ -55,7 +55,7 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|||
phys_addr_t size = resource_size(rsrc);
|
||||
|
||||
*start = fixup_bigphys_addr(rsrc->start, size);
|
||||
*end = rsrc->start + size;
|
||||
*end = rsrc->start + size - 1;
|
||||
}
|
||||
|
||||
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
|
|
|
@ -363,14 +363,6 @@ re_probe:
|
|||
goto probe_failed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure devices are listed in devices_kset in correct order
|
||||
* It's important to move Dev to the end of devices_kset before
|
||||
* calling .probe, because it could be recursive and parent Dev
|
||||
* should always go first
|
||||
*/
|
||||
devices_kset_move_last(dev);
|
||||
|
||||
if (dev->bus->probe) {
|
||||
ret = dev->bus->probe(dev);
|
||||
if (ret)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
*
|
||||
* Copyright (C) 2012 - 2014 Xilinx, Inc.
|
||||
* Copyright (C) 2009 PetaLogix. All rights reserved.
|
||||
* Copyright (C) 2017 Sandvik Mining and Construction Oy
|
||||
*
|
||||
* Description:
|
||||
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
|
||||
|
@ -25,8 +26,10 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/can/dev.h>
|
||||
|
@ -101,7 +104,7 @@ enum xcan_reg {
|
|||
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
|
||||
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
|
||||
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
|
||||
XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
|
||||
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
|
||||
|
||||
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
|
||||
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
|
||||
|
@ -118,6 +121,7 @@ enum xcan_reg {
|
|||
/**
|
||||
* struct xcan_priv - This definition define CAN driver instance
|
||||
* @can: CAN private data structure.
|
||||
* @tx_lock: Lock for synchronizing TX interrupt handling
|
||||
* @tx_head: Tx CAN packets ready to send on the queue
|
||||
* @tx_tail: Tx CAN packets successfully sended on the queue
|
||||
* @tx_max: Maximum number packets the driver can send
|
||||
|
@ -132,6 +136,7 @@ enum xcan_reg {
|
|||
*/
|
||||
struct xcan_priv {
|
||||
struct can_priv can;
|
||||
spinlock_t tx_lock;
|
||||
unsigned int tx_head;
|
||||
unsigned int tx_tail;
|
||||
unsigned int tx_max;
|
||||
|
@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
|
|||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
#define XCAN_CAP_WATERMARK 0x0001
|
||||
struct xcan_devtype_data {
|
||||
unsigned int caps;
|
||||
};
|
||||
|
||||
/**
|
||||
* xcan_write_reg_le - Write a value to the device register little endian
|
||||
* @priv: Driver private data structure
|
||||
|
@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
|
|||
usleep_range(500, 10000);
|
||||
}
|
||||
|
||||
/* reset clears FIFOs */
|
||||
priv->tx_head = 0;
|
||||
priv->tx_tail = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
struct net_device_stats *stats = &ndev->stats;
|
||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||
u32 id, dlc, data[2] = {0, 0};
|
||||
unsigned long flags;
|
||||
|
||||
if (can_dropped_invalid_skb(ndev, skb))
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
|
||||
|
||||
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
|
||||
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
|
||||
priv->tx_head++;
|
||||
|
||||
/* Write the Frame to Xilinx CAN TX FIFO */
|
||||
|
@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
stats->tx_bytes += cf->can_dlc;
|
||||
}
|
||||
|
||||
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
|
||||
if (priv->tx_max > 1)
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
|
||||
|
||||
/* Check if the TX buffer is full */
|
||||
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* xcan_current_error_state - Get current error state from HW
|
||||
* @ndev: Pointer to net_device structure
|
||||
*
|
||||
* Checks the current CAN error state from the HW. Note that this
|
||||
* only checks for ERROR_PASSIVE and ERROR_WARNING.
|
||||
*
|
||||
* Return:
|
||||
* ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
|
||||
* otherwise.
|
||||
*/
|
||||
static enum can_state xcan_current_error_state(struct net_device *ndev)
|
||||
{
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
|
||||
|
||||
if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
|
||||
return CAN_STATE_ERROR_PASSIVE;
|
||||
else if (status & XCAN_SR_ERRWRN_MASK)
|
||||
return CAN_STATE_ERROR_WARNING;
|
||||
else
|
||||
return CAN_STATE_ERROR_ACTIVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* xcan_set_error_state - Set new CAN error state
|
||||
* @ndev: Pointer to net_device structure
|
||||
* @new_state: The new CAN state to be set
|
||||
* @cf: Error frame to be populated or NULL
|
||||
*
|
||||
* Set new CAN error state for the device, updating statistics and
|
||||
* populating the error frame if given.
|
||||
*/
|
||||
static void xcan_set_error_state(struct net_device *ndev,
|
||||
enum can_state new_state,
|
||||
struct can_frame *cf)
|
||||
{
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
|
||||
u32 txerr = ecr & XCAN_ECR_TEC_MASK;
|
||||
u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
|
||||
|
||||
priv->can.state = new_state;
|
||||
|
||||
if (cf) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[6] = txerr;
|
||||
cf->data[7] = rxerr;
|
||||
}
|
||||
|
||||
switch (new_state) {
|
||||
case CAN_STATE_ERROR_PASSIVE:
|
||||
priv->can.can_stats.error_passive++;
|
||||
if (cf)
|
||||
cf->data[1] = (rxerr > 127) ?
|
||||
CAN_ERR_CRTL_RX_PASSIVE :
|
||||
CAN_ERR_CRTL_TX_PASSIVE;
|
||||
break;
|
||||
case CAN_STATE_ERROR_WARNING:
|
||||
priv->can.can_stats.error_warning++;
|
||||
if (cf)
|
||||
cf->data[1] |= (txerr > rxerr) ?
|
||||
CAN_ERR_CRTL_TX_WARNING :
|
||||
CAN_ERR_CRTL_RX_WARNING;
|
||||
break;
|
||||
case CAN_STATE_ERROR_ACTIVE:
|
||||
if (cf)
|
||||
cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
|
||||
break;
|
||||
default:
|
||||
/* non-ERROR states are handled elsewhere */
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
|
||||
* @ndev: Pointer to net_device structure
|
||||
*
|
||||
* If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
|
||||
* the performed RX/TX has caused it to drop to a lesser state and set
|
||||
* the interface state accordingly.
|
||||
*/
|
||||
static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
|
||||
{
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
enum can_state old_state = priv->can.state;
|
||||
enum can_state new_state;
|
||||
|
||||
/* changing error state due to successful frame RX/TX can only
|
||||
* occur from these states
|
||||
*/
|
||||
if (old_state != CAN_STATE_ERROR_WARNING &&
|
||||
old_state != CAN_STATE_ERROR_PASSIVE)
|
||||
return;
|
||||
|
||||
new_state = xcan_current_error_state(ndev);
|
||||
|
||||
if (new_state != old_state) {
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
|
||||
skb = alloc_can_err_skb(ndev, &cf);
|
||||
|
||||
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
|
||||
|
||||
if (skb) {
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->can_dlc;
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xcan_err_interrupt - error frame Isr
|
||||
* @ndev: net_device pointer
|
||||
|
@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
|||
struct net_device_stats *stats = &ndev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
u32 err_status, status, txerr = 0, rxerr = 0;
|
||||
u32 err_status;
|
||||
|
||||
skb = alloc_can_err_skb(ndev, &cf);
|
||||
|
||||
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
|
||||
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
|
||||
txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
|
||||
rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
|
||||
XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
|
||||
status = priv->read_reg(priv, XCAN_SR_OFFSET);
|
||||
|
||||
if (isr & XCAN_IXR_BSOFF_MASK) {
|
||||
priv->can.state = CAN_STATE_BUS_OFF;
|
||||
|
@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
|||
can_bus_off(ndev);
|
||||
if (skb)
|
||||
cf->can_id |= CAN_ERR_BUSOFF;
|
||||
} else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
|
||||
priv->can.state = CAN_STATE_ERROR_PASSIVE;
|
||||
priv->can.can_stats.error_passive++;
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = (rxerr > 127) ?
|
||||
CAN_ERR_CRTL_RX_PASSIVE :
|
||||
CAN_ERR_CRTL_TX_PASSIVE;
|
||||
cf->data[6] = txerr;
|
||||
cf->data[7] = rxerr;
|
||||
}
|
||||
} else if (status & XCAN_SR_ERRWRN_MASK) {
|
||||
priv->can.state = CAN_STATE_ERROR_WARNING;
|
||||
priv->can.can_stats.error_warning++;
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] |= (txerr > rxerr) ?
|
||||
CAN_ERR_CRTL_TX_WARNING :
|
||||
CAN_ERR_CRTL_RX_WARNING;
|
||||
cf->data[6] = txerr;
|
||||
cf->data[7] = rxerr;
|
||||
}
|
||||
} else {
|
||||
enum can_state new_state = xcan_current_error_state(ndev);
|
||||
|
||||
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
|
||||
}
|
||||
|
||||
/* Check for Arbitration lost interrupt */
|
||||
|
@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
|||
if (isr & XCAN_IXR_RXOFLW_MASK) {
|
||||
stats->rx_over_errors++;
|
||||
stats->rx_errors++;
|
||||
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
|
@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
|
|||
|
||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
|
||||
if (isr & XCAN_IXR_RXOK_MASK) {
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET,
|
||||
XCAN_IXR_RXOK_MASK);
|
||||
work_done += xcan_rx(ndev);
|
||||
} else {
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET,
|
||||
XCAN_IXR_RXNEMP_MASK);
|
||||
break;
|
||||
}
|
||||
work_done += xcan_rx(ndev);
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
|
||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||
}
|
||||
|
||||
if (work_done)
|
||||
if (work_done) {
|
||||
can_led_event(ndev, CAN_LED_EVENT_RX);
|
||||
xcan_update_error_state_after_rxtx(ndev);
|
||||
}
|
||||
|
||||
if (work_done < quota) {
|
||||
napi_complete(napi);
|
||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
||||
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
|
||||
ier |= XCAN_IXR_RXNEMP_MASK;
|
||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
||||
}
|
||||
return work_done;
|
||||
|
@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
|
|||
{
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
unsigned int frames_in_fifo;
|
||||
int frames_sent = 1; /* TXOK => at least 1 frame was sent */
|
||||
unsigned long flags;
|
||||
int retries = 0;
|
||||
|
||||
while ((priv->tx_head - priv->tx_tail > 0) &&
|
||||
(isr & XCAN_IXR_TXOK_MASK)) {
|
||||
/* Synchronize with xmit as we need to know the exact number
|
||||
* of frames in the FIFO to stay in sync due to the TXFEMP
|
||||
* handling.
|
||||
* This also prevents a race between netif_wake_queue() and
|
||||
* netif_stop_queue().
|
||||
*/
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
|
||||
frames_in_fifo = priv->tx_head - priv->tx_tail;
|
||||
|
||||
if (WARN_ON_ONCE(frames_in_fifo == 0)) {
|
||||
/* clear TXOK anyway to avoid getting back here */
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
||||
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check if 2 frames were sent (TXOK only means that at least 1
|
||||
* frame was sent).
|
||||
*/
|
||||
if (frames_in_fifo > 1) {
|
||||
WARN_ON(frames_in_fifo > priv->tx_max);
|
||||
|
||||
/* Synchronize TXOK and isr so that after the loop:
|
||||
* (1) isr variable is up-to-date at least up to TXOK clear
|
||||
* time. This avoids us clearing a TXOK of a second frame
|
||||
* but not noticing that the FIFO is now empty and thus
|
||||
* marking only a single frame as sent.
|
||||
* (2) No TXOK is left. Having one could mean leaving a
|
||||
* stray TXOK as we might process the associated frame
|
||||
* via TXFEMP handling as we read TXFEMP *after* TXOK
|
||||
* clear to satisfy (1).
|
||||
*/
|
||||
while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||
}
|
||||
|
||||
if (isr & XCAN_IXR_TXFEMP_MASK) {
|
||||
/* nothing in FIFO anymore */
|
||||
frames_sent = frames_in_fifo;
|
||||
}
|
||||
} else {
|
||||
/* single frame in fifo, just clear TXOK */
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
||||
}
|
||||
|
||||
while (frames_sent--) {
|
||||
can_get_echo_skb(ndev, priv->tx_tail %
|
||||
priv->tx_max);
|
||||
priv->tx_tail++;
|
||||
stats->tx_packets++;
|
||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||
}
|
||||
can_led_event(ndev, CAN_LED_EVENT_TX);
|
||||
|
||||
netif_wake_queue(ndev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||
|
||||
can_led_event(ndev, CAN_LED_EVENT_TX);
|
||||
xcan_update_error_state_after_rxtx(ndev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
|
|||
struct net_device *ndev = (struct net_device *)dev_id;
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
u32 isr, ier;
|
||||
u32 isr_errors;
|
||||
|
||||
/* Get the interrupt status from Xilinx CAN */
|
||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||
|
@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
|
|||
xcan_tx_interrupt(ndev, isr);
|
||||
|
||||
/* Check for the type of error interrupt and Processing it */
|
||||
if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
|
||||
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
|
||||
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
|
||||
XCAN_IXR_ARBLST_MASK));
|
||||
isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
|
||||
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
|
||||
if (isr_errors) {
|
||||
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
|
||||
xcan_err_interrupt(ndev, isr);
|
||||
}
|
||||
|
||||
/* Check for the type of receive interrupt and Processing it */
|
||||
if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
|
||||
if (isr & XCAN_IXR_RXNEMP_MASK) {
|
||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
||||
ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
|
||||
ier &= ~XCAN_IXR_RXNEMP_MASK;
|
||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
||||
napi_schedule(&priv->napi);
|
||||
}
|
||||
|
@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
|
|||
static void xcan_chip_stop(struct net_device *ndev)
|
||||
{
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
u32 ier;
|
||||
|
||||
/* Disable interrupts and leave the can in configuration mode */
|
||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
||||
ier &= ~XCAN_INTR_ALL;
|
||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
||||
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
|
||||
set_reset_mode(ndev);
|
||||
priv->can.state = CAN_STATE_STOPPED;
|
||||
}
|
||||
|
||||
|
@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
|
|||
*/
|
||||
static int __maybe_unused xcan_suspend(struct device *dev)
|
||||
{
|
||||
if (!device_may_wakeup(dev))
|
||||
return pm_runtime_force_suspend(dev);
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
|
||||
return 0;
|
||||
if (netif_running(ndev)) {
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
xcan_chip_stop(ndev);
|
||||
}
|
||||
|
||||
return pm_runtime_force_suspend(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
|
|||
*/
|
||||
static int __maybe_unused xcan_resume(struct device *dev)
|
||||
{
|
||||
if (!device_may_wakeup(dev))
|
||||
return pm_runtime_force_resume(dev);
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_force_resume(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "pm_runtime_force_resume failed on resume\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
ret = xcan_chip_start(ndev);
|
||||
if (ret) {
|
||||
dev_err(dev, "xcan_chip_start failed on resume\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
|
|||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
}
|
||||
|
||||
priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
|
||||
priv->can.state = CAN_STATE_SLEEPING;
|
||||
|
||||
clk_disable_unprepare(priv->bus_clk);
|
||||
clk_disable_unprepare(priv->can_clk);
|
||||
|
||||
|
@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
|
|||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
int ret;
|
||||
u32 isr, status;
|
||||
|
||||
ret = clk_prepare_enable(priv->bus_clk);
|
||||
if (ret) {
|
||||
|
@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
|
||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||
status = priv->read_reg(priv, XCAN_SR_OFFSET);
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
if (isr & XCAN_IXR_BSOFF_MASK) {
|
||||
priv->can.state = CAN_STATE_BUS_OFF;
|
||||
priv->write_reg(priv, XCAN_SRR_OFFSET,
|
||||
XCAN_SRR_RESET_MASK);
|
||||
} else if ((status & XCAN_SR_ESTAT_MASK) ==
|
||||
XCAN_SR_ESTAT_MASK) {
|
||||
priv->can.state = CAN_STATE_ERROR_PASSIVE;
|
||||
} else if (status & XCAN_SR_ERRWRN_MASK) {
|
||||
priv->can.state = CAN_STATE_ERROR_WARNING;
|
||||
} else {
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
}
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
|
|||
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static const struct xcan_devtype_data xcan_zynq_data = {
|
||||
.caps = XCAN_CAP_WATERMARK,
|
||||
};
|
||||
|
||||
/* Match table for OF platform binding */
|
||||
static const struct of_device_id xcan_of_match[] = {
|
||||
{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
|
||||
{ .compatible = "xlnx,axi-can-1.00.a", },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, xcan_of_match);
|
||||
|
||||
/**
|
||||
* xcan_probe - Platform registration call
|
||||
* @pdev: Handle to the platform device structure
|
||||
|
@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
|
|||
struct resource *res; /* IO mem resources */
|
||||
struct net_device *ndev;
|
||||
struct xcan_priv *priv;
|
||||
const struct of_device_id *of_id;
|
||||
int caps = 0;
|
||||
void __iomem *addr;
|
||||
int ret, rx_max, tx_max;
|
||||
int ret, rx_max, tx_max, tx_fifo_depth;
|
||||
|
||||
/* Get the virtual base address for the device */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
|
||||
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
|
||||
&tx_fifo_depth);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
|
@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
|
|||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
of_id = of_match_device(xcan_of_match, &pdev->dev);
|
||||
if (of_id) {
|
||||
const struct xcan_devtype_data *devtype_data = of_id->data;
|
||||
|
||||
if (devtype_data)
|
||||
caps = devtype_data->caps;
|
||||
}
|
||||
|
||||
/* There is no way to directly figure out how many frames have been
|
||||
* sent when the TXOK interrupt is processed. If watermark programming
|
||||
* is supported, we can have 2 frames in the FIFO and use TXFEMP
|
||||
* to determine if 1 or 2 frames have been sent.
|
||||
* Theoretically we should be able to use TXFWMEMP to determine up
|
||||
* to 3 frames, but it seems that after putting a second frame in the
|
||||
* FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
|
||||
* than 2 frames in FIFO) is set anyway with no TXOK (a frame was
|
||||
* sent), which is not a sensible state - possibly TXFWMEMP is not
|
||||
* completely synchronized with the rest of the bits?
|
||||
*/
|
||||
if (caps & XCAN_CAP_WATERMARK)
|
||||
tx_max = min(tx_fifo_depth, 2);
|
||||
else
|
||||
tx_max = 1;
|
||||
|
||||
/* Create a CAN device instance */
|
||||
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
|
||||
if (!ndev)
|
||||
|
@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
|
|||
CAN_CTRLMODE_BERR_REPORTING;
|
||||
priv->reg_base = addr;
|
||||
priv->tx_max = tx_max;
|
||||
spin_lock_init(&priv->tx_lock);
|
||||
|
||||
/* Get IRQ for the device */
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
|
@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
|
||||
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
|
||||
priv->reg_base, ndev->irq, priv->can.clock.freq,
|
||||
priv->tx_max);
|
||||
tx_fifo_depth, priv->tx_max);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Match table for OF platform binding */
|
||||
static const struct of_device_id xcan_of_match[] = {
|
||||
{ .compatible = "xlnx,zynq-can-1.0", },
|
||||
{ .compatible = "xlnx,axi-can-1.00.a", },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, xcan_of_match);
|
||||
|
||||
static struct platform_driver xcan_driver = {
|
||||
.probe = xcan_probe,
|
||||
.remove = xcan_remove,
|
||||
|
|
|
@ -2916,7 +2916,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
|
|||
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
|
||||
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
|
||||
struct res_srq *srq;
|
||||
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
|
||||
int local_qpn = vhcr->in_modifier & 0xffffff;
|
||||
|
||||
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
|
||||
if (err)
|
||||
|
|
|
@ -383,14 +383,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
|
|||
HLIST_HEAD(del_list);
|
||||
spin_lock_bh(&priv->fs.arfs.arfs_lock);
|
||||
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
|
||||
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
|
||||
break;
|
||||
if (!work_pending(&arfs_rule->arfs_work) &&
|
||||
rps_may_expire_flow(priv->netdev,
|
||||
arfs_rule->rxq, arfs_rule->flow_id,
|
||||
arfs_rule->filter_id)) {
|
||||
hlist_del_init(&arfs_rule->hlist);
|
||||
hlist_add_head(&arfs_rule->hlist, &del_list);
|
||||
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
|
||||
|
@ -715,6 +715,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||
skb->protocol != htons(ETH_P_IPV6))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (skb->encapsulation)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
|
||||
if (!arfs_t)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
|
|
@ -233,6 +233,7 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
|
|||
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_tstamp *tstamp = &priv->tstamp;
|
||||
u64 overflow_cycles;
|
||||
u64 ns;
|
||||
u64 frac = 0;
|
||||
u32 dev_freq;
|
||||
|
@ -257,10 +258,17 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
|
|||
|
||||
/* Calculate period in seconds to call the overflow watchdog - to make
|
||||
* sure counter is checked at least once every wrap around.
|
||||
* The period is calculated as the minimum between max HW cycles count
|
||||
* (The clock source mask) and max amount of cycles that can be
|
||||
* multiplied by clock multiplier where the result doesn't exceed
|
||||
* 64bits.
|
||||
*/
|
||||
ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
|
||||
overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult);
|
||||
overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1);
|
||||
|
||||
ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles,
|
||||
frac, &frac);
|
||||
do_div(ns, NSEC_PER_SEC / 2 / HZ);
|
||||
do_div(ns, NSEC_PER_SEC / HZ);
|
||||
tstamp->overflow_period = ns;
|
||||
|
||||
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
|
||||
|
|
|
@ -598,7 +598,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
|
|||
* negotiation may already be done and aneg interrupt may not be
|
||||
* generated.
|
||||
*/
|
||||
if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
|
||||
if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
|
||||
err = phy_aneg_done(phydev);
|
||||
if (err > 0) {
|
||||
trigger = true;
|
||||
|
|
|
@ -1785,6 +1785,9 @@ static const struct usb_device_id acm_ids[] = {
|
|||
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
|
||||
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
|
||||
},
|
||||
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
|
||||
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
|
||||
},
|
||||
|
||||
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
|
||||
.driver_info = CLEAR_HALT_CONDITIONS,
|
||||
|
|
|
@ -1139,10 +1139,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|||
|
||||
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
|
||||
/* Tell hub_wq to disconnect the device or
|
||||
* check for a new connection
|
||||
* check for a new connection or over current condition.
|
||||
* Based on USB2.0 Spec Section 11.12.5,
|
||||
* C_PORT_OVER_CURRENT could be set while
|
||||
* PORT_OVER_CURRENT is not. So check for any of them.
|
||||
*/
|
||||
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
|
||||
(portstatus & USB_PORT_STAT_OVERCURRENT))
|
||||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
|
||||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
|
||||
set_bit(port1, hub->change_bits);
|
||||
|
||||
} else if (portstatus & USB_PORT_STAT_ENABLE) {
|
||||
|
|
|
@ -3243,7 +3243,7 @@ static int ffs_func_setup(struct usb_function *f,
|
|||
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
|
||||
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
|
||||
|
||||
return USB_GADGET_DELAYED_STATUS;
|
||||
return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
|
||||
}
|
||||
|
||||
static bool ffs_func_req_match(struct usb_function *f,
|
||||
|
|
|
@ -1228,15 +1228,14 @@ killed:
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
char *get_task_comm(char *buf, struct task_struct *tsk)
|
||||
char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
|
||||
{
|
||||
/* buf must be at least sizeof(tsk->comm) in size */
|
||||
task_lock(tsk);
|
||||
strncpy(buf, tsk->comm, sizeof(tsk->comm));
|
||||
strncpy(buf, tsk->comm, buf_size);
|
||||
task_unlock(tsk);
|
||||
return buf;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_task_comm);
|
||||
EXPORT_SYMBOL_GPL(__get_task_comm);
|
||||
|
||||
/*
|
||||
* These functions flushes out all traces of the currently running executable
|
||||
|
|
|
@ -3152,7 +3152,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
|
|||
{
|
||||
__set_task_comm(tsk, from, false);
|
||||
}
|
||||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
|
||||
#define get_task_comm(buf, tsk) ({ \
|
||||
BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
|
||||
__get_task_comm(buf, sizeof(buf), tsk); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void scheduler_ipi(void);
|
||||
|
|
|
@ -364,6 +364,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
|
|||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags);
|
||||
|
||||
void tcp_enter_quickack_mode(struct sock *sk);
|
||||
static inline void tcp_dec_quickack_mode(struct sock *sk,
|
||||
const unsigned int pkts)
|
||||
{
|
||||
|
@ -554,6 +555,7 @@ void tcp_send_fin(struct sock *sk);
|
|||
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
|
||||
int tcp_send_synack(struct sock *);
|
||||
void tcp_push_one(struct sock *, unsigned int mss_now);
|
||||
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
|
||||
void tcp_send_ack(struct sock *sk);
|
||||
void tcp_send_delayed_ack(struct sock *sk);
|
||||
void tcp_send_loss_probe(struct sock *sk);
|
||||
|
|
|
@ -2339,9 +2339,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
|
|||
return err;
|
||||
}
|
||||
|
||||
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
|
||||
|
||||
__dev_notify_flags(dev, old_flags, ~0U);
|
||||
if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
|
||||
__dev_notify_flags(dev, old_flags, 0U);
|
||||
} else {
|
||||
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
|
||||
__dev_notify_flags(dev, old_flags, ~0U);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rtnl_configure_link);
|
||||
|
|
|
@ -3253,6 +3253,7 @@ normal:
|
|||
net_warn_ratelimited(
|
||||
"skb_segment: too many frags: %u %u\n",
|
||||
pos, mss);
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -3289,11 +3290,10 @@ skip_fraglist:
|
|||
|
||||
perform_csum_check:
|
||||
if (!csum) {
|
||||
if (skb_has_shared_frag(nskb)) {
|
||||
err = __skb_linearize(nskb);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
if (skb_has_shared_frag(nskb) &&
|
||||
__skb_linearize(nskb))
|
||||
goto err;
|
||||
|
||||
if (!nskb->remcsum_offload)
|
||||
nskb->ip_summed = CHECKSUM_NONE;
|
||||
SKB_GSO_CB(nskb)->csum =
|
||||
|
|
|
@ -1193,8 +1193,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
|
|||
if (pmc) {
|
||||
im->interface = pmc->interface;
|
||||
im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
im->sfmode = pmc->sfmode;
|
||||
if (pmc->sfmode == MCAST_INCLUDE) {
|
||||
if (im->sfmode == MCAST_INCLUDE) {
|
||||
im->tomb = pmc->tomb;
|
||||
im->sources = pmc->sources;
|
||||
for (psf = im->sources; psf; psf = psf->sf_next)
|
||||
|
|
|
@ -519,6 +519,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
|||
to->dev = from->dev;
|
||||
to->mark = from->mark;
|
||||
|
||||
skb_copy_hash(to, from);
|
||||
|
||||
/* Copy the flags to each fragment. */
|
||||
IPCB(to)->flags = IPCB(from)->flags;
|
||||
|
||||
|
|
|
@ -135,15 +135,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
|
|||
{
|
||||
struct sockaddr_in sin;
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
__be16 *ports = (__be16 *)skb_transport_header(skb);
|
||||
__be16 *ports;
|
||||
int end;
|
||||
|
||||
if (skb_transport_offset(skb) + 4 > (int)skb->len)
|
||||
end = skb_transport_offset(skb) + 4;
|
||||
if (end > 0 && !pskb_may_pull(skb, end))
|
||||
return;
|
||||
|
||||
/* All current transport protocols have the port numbers in the
|
||||
* first four bytes of the transport header and this function is
|
||||
* written with this assumption in mind.
|
||||
*/
|
||||
ports = (__be16 *)skb_transport_header(skb);
|
||||
|
||||
sin.sin_family = AF_INET;
|
||||
sin.sin_addr.s_addr = iph->daddr;
|
||||
|
|
|
@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
|
|||
struct dctcp *ca = inet_csk_ca(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* State has changed from CE=0 to CE=1 and delayed
|
||||
* ACK has not sent yet.
|
||||
*/
|
||||
if (!ca->ce_state && ca->delayed_ack_reserved) {
|
||||
u32 tmp_rcv_nxt;
|
||||
|
||||
/* Save current rcv_nxt. */
|
||||
tmp_rcv_nxt = tp->rcv_nxt;
|
||||
|
||||
/* Generate previous ack with CE=0. */
|
||||
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
|
||||
tp->rcv_nxt = ca->prior_rcv_nxt;
|
||||
|
||||
tcp_send_ack(sk);
|
||||
|
||||
/* Recover current rcv_nxt. */
|
||||
tp->rcv_nxt = tmp_rcv_nxt;
|
||||
if (!ca->ce_state) {
|
||||
/* State has changed from CE=0 to CE=1, force an immediate
|
||||
* ACK to reflect the new CE state. If an ACK was delayed,
|
||||
* send that first to reflect the prior CE state.
|
||||
*/
|
||||
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
|
||||
__tcp_send_ack(sk, ca->prior_rcv_nxt);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
}
|
||||
|
||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||
|
@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
|
|||
struct dctcp *ca = inet_csk_ca(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* State has changed from CE=1 to CE=0 and delayed
|
||||
* ACK has not sent yet.
|
||||
*/
|
||||
if (ca->ce_state && ca->delayed_ack_reserved) {
|
||||
u32 tmp_rcv_nxt;
|
||||
|
||||
/* Save current rcv_nxt. */
|
||||
tmp_rcv_nxt = tp->rcv_nxt;
|
||||
|
||||
/* Generate previous ack with CE=1. */
|
||||
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
|
||||
tp->rcv_nxt = ca->prior_rcv_nxt;
|
||||
|
||||
tcp_send_ack(sk);
|
||||
|
||||
/* Recover current rcv_nxt. */
|
||||
tp->rcv_nxt = tmp_rcv_nxt;
|
||||
if (ca->ce_state) {
|
||||
/* State has changed from CE=1 to CE=0, force an immediate
|
||||
* ACK to reflect the new CE state. If an ACK was delayed,
|
||||
* send that first to reflect the prior CE state.
|
||||
*/
|
||||
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
|
||||
__tcp_send_ack(sk, ca->prior_rcv_nxt);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
}
|
||||
|
||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||
|
|
|
@ -210,13 +210,14 @@ static void tcp_incr_quickack(struct sock *sk)
|
|||
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
|
||||
}
|
||||
|
||||
static void tcp_enter_quickack_mode(struct sock *sk)
|
||||
void tcp_enter_quickack_mode(struct sock *sk)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
tcp_incr_quickack(sk);
|
||||
icsk->icsk_ack.pingpong = 0;
|
||||
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_enter_quickack_mode);
|
||||
|
||||
/* Send ACKs quickly, if "quick" count is not exhausted
|
||||
* and the session is not interactive.
|
||||
|
@ -4517,7 +4518,7 @@ coalesce_done:
|
|||
/* All the bits are present. Drop. */
|
||||
NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPOFOMERGE);
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
skb = NULL;
|
||||
tcp_dsack_set(sk, seq, end_seq);
|
||||
goto add_sack;
|
||||
|
@ -4536,7 +4537,7 @@ coalesce_done:
|
|||
TCP_SKB_CB(skb1)->end_seq);
|
||||
NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPOFOMERGE);
|
||||
__kfree_skb(skb1);
|
||||
tcp_drop(sk, skb1);
|
||||
goto merge_right;
|
||||
}
|
||||
} else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
|
||||
|
|
|
@ -174,8 +174,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
|
|||
}
|
||||
|
||||
/* Account for an ACK we sent. */
|
||||
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
|
||||
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
|
||||
u32 rcv_nxt)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (unlikely(rcv_nxt != tp->rcv_nxt))
|
||||
return; /* Special ACK sent by DCTCP to reflect ECN */
|
||||
tcp_dec_quickack_mode(sk, pkts);
|
||||
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
|
||||
}
|
||||
|
@ -905,8 +910,8 @@ out:
|
|||
* We are working here with either a clone of the original
|
||||
* SKB, or a fresh unique copy made by the retransmit engine.
|
||||
*/
|
||||
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
gfp_t gfp_mask)
|
||||
static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
|
||||
int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct inet_sock *inet;
|
||||
|
@ -969,7 +974,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|||
th->source = inet->inet_sport;
|
||||
th->dest = inet->inet_dport;
|
||||
th->seq = htonl(tcb->seq);
|
||||
th->ack_seq = htonl(tp->rcv_nxt);
|
||||
th->ack_seq = htonl(rcv_nxt);
|
||||
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
|
||||
tcb->tcp_flags);
|
||||
|
||||
|
@ -1010,7 +1015,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|||
icsk->icsk_af_ops->send_check(sk, skb);
|
||||
|
||||
if (likely(tcb->tcp_flags & TCPHDR_ACK))
|
||||
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
|
||||
tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
|
||||
|
||||
if (skb->len != tcp_header_size) {
|
||||
tcp_event_data_sent(tp, sk);
|
||||
|
@ -1046,6 +1051,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
|
||||
tcp_sk(sk)->rcv_nxt);
|
||||
}
|
||||
|
||||
/* This routine just queues the buffer for sending.
|
||||
*
|
||||
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
|
||||
|
@ -3470,7 +3482,7 @@ void tcp_send_delayed_ack(struct sock *sk)
|
|||
}
|
||||
|
||||
/* This routine sends an ack and also updates the window. */
|
||||
void tcp_send_ack(struct sock *sk)
|
||||
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
|
||||
{
|
||||
struct sk_buff *buff;
|
||||
|
||||
|
@ -3508,9 +3520,14 @@ void tcp_send_ack(struct sock *sk)
|
|||
|
||||
/* Send it off, this clears delayed acks for us. */
|
||||
skb_mstamp_get(&buff->skb_mstamp);
|
||||
tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
|
||||
__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__tcp_send_ack);
|
||||
|
||||
void tcp_send_ack(struct sock *sk)
|
||||
{
|
||||
__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_send_ack);
|
||||
|
||||
/* This routine sends a packet with an out of date sequence
|
||||
* number. It assumes the other end will try to ack it.
|
||||
|
|
|
@ -695,13 +695,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
|
|||
}
|
||||
if (np->rxopt.bits.rxorigdstaddr) {
|
||||
struct sockaddr_in6 sin6;
|
||||
__be16 *ports = (__be16 *) skb_transport_header(skb);
|
||||
__be16 *ports;
|
||||
int end;
|
||||
|
||||
if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
|
||||
end = skb_transport_offset(skb) + 4;
|
||||
if (end <= 0 || pskb_may_pull(skb, end)) {
|
||||
/* All current transport protocols have the port numbers in the
|
||||
* first four bytes of the transport header and this function is
|
||||
* written with this assumption in mind.
|
||||
*/
|
||||
ports = (__be16 *)skb_transport_header(skb);
|
||||
|
||||
sin6.sin6_family = AF_INET6;
|
||||
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
|
||||
|
|
|
@ -585,6 +585,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
|||
to->dev = from->dev;
|
||||
to->mark = from->mark;
|
||||
|
||||
skb_copy_hash(to, from);
|
||||
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
to->tc_index = from->tc_index;
|
||||
#endif
|
||||
|
|
|
@ -771,8 +771,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
|
|||
if (pmc) {
|
||||
im->idev = pmc->idev;
|
||||
im->mca_crcount = idev->mc_qrv;
|
||||
im->mca_sfmode = pmc->mca_sfmode;
|
||||
if (pmc->mca_sfmode == MCAST_INCLUDE) {
|
||||
if (im->mca_sfmode == MCAST_INCLUDE) {
|
||||
im->mca_tomb = pmc->mca_tomb;
|
||||
im->mca_sources = pmc->mca_sources;
|
||||
for (psf = im->mca_sources; psf; psf = psf->sf_next)
|
||||
|
|
Loading…
Reference in New Issue