This is the 4.9.160 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlxw/ucACgkQONu9yGCS aT4vaRAAwEs6ppuH93NYZ1v1w2CA7PZLx7MAR3aiEr8rWQ5BnkmowPHvn0kC3VTs 643XN+iKfaq1SuWfdOs7+ACu1QGBfVsdyQhgIKwspAe7C6v284AF0NfsifnuN8/q 0eAgzFFkfngBgoh3oGLaeB0oPia3lSB2zG6hC2cyjeiEYEDcvJUA/ZHl9X1zFsJQ J9Ikicn1b2gz6/N5VKqrBokCXcrz184Yz8yRrC0rK8VFq0N9N3VZA2NyWmb9/Iqp Szj//Rh5LyjgrNSJHk0blNqB/5OdS7VsFl6LXuvE7NmUSLJJ0ou/BGLjw9R6TcOv XFIvuMDw0D/dm/icKprG1LuVYfOomoNu82YMz8K96ymt7BS/SAELHFktzvK1s104 ITS2IvBhpqSPp86dx1vkmo4NEyKUSrff1sLIssjpd9xQMt1+SVP7O7kn02GgRCXz T8PITSV2IQhHeNeBZVD8W4cLsrqn3sXFWDAVhmIw4J0VK6ghEGfaIBiwquRtNaz/ EsXSFKFs2hV++G8+f6vwQpHGyVSopGrgvvEEdqpWLcgjnYt1NhpfNxbEOBkfXXSd U0NN1EYs9ade9fVcXrZze9Z8QVF6s4Rdf5unQs64iCp7FvowqzwshJuOoJTz1MB/ ugCFieeAZXwO7tlLoMiUG+j/k0BNdhNWPx8o7sfQf2teTWNfWtU= =XYhR -----END PGP SIGNATURE----- Merge 4.9.160 into android-4.9 Changes in 4.9.160 net: fix IPv6 prefix route residue vsock: cope with memory allocation failure at socket creation time hwmon: (lm80) Fix missing unlock on error in set_fan_div() net: Fix for_each_netdev_feature on Big endian net: phy: xgmiitorgmii: Support generic PHY status read net: stmmac: handle endianness in dwmac4_get_timestamp sky2: Increase D3 delay again vhost: correctly check the return value of translate_desc() in log_used() net: Add header for usage of fls64() tcp: tcp_v4_err() should be more careful net: Do not allocate page fragments that are not skb aligned tcp: clear icsk_backoff in tcp_write_queue_purge() vxlan: test dev->flags & IFF_UP before calling netif_rx() net: stmmac: Fix a race in EEE enable callback net: ipv4: use a dedicated counter for icmp_v4 redirect packets btrfs: Remove false alert when fiemap range is smaller than on-disk extent net/x25: do not hold the cpu too long in x25_new_lci() mISDN: fix a race in dev_expire_timer() ax25: fix possible use-after-free Linux 4.9.160 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
fd5657a6c7
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 159
|
||||
SUBLEVEL = 160
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
|
||||
rv = lm80_read_value(client, LM80_REG_FANDIV);
|
||||
if (rv < 0)
|
||||
if (rv < 0) {
|
||||
mutex_unlock(&data->update_lock);
|
||||
return rv;
|
||||
}
|
||||
reg = (rv & ~(3 << (2 * (nr + 1))))
|
||||
| (data->fan_div[nr] << (2 * (nr + 1)));
|
||||
lm80_write_value(client, LM80_REG_FANDIV, reg);
|
||||
|
|
|
@ -168,8 +168,8 @@ dev_expire_timer(unsigned long data)
|
|||
spin_lock_irqsave(&timer->dev->lock, flags);
|
||||
if (timer->id >= 0)
|
||||
list_move_tail(&timer->list, &timer->dev->expired);
|
||||
spin_unlock_irqrestore(&timer->dev->lock, flags);
|
||||
wake_up_interruptible(&timer->dev->wait);
|
||||
spin_unlock_irqrestore(&timer->dev->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
INIT_WORK(&hw->restart_work, sky2_restart);
|
||||
|
||||
pci_set_drvdata(pdev, hw);
|
||||
pdev->d3_delay = 200;
|
||||
pdev->d3_delay = 300;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -237,15 +237,18 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
|
|||
static int dwmac4_rx_check_timestamp(void *desc)
|
||||
{
|
||||
struct dma_desc *p = (struct dma_desc *)desc;
|
||||
unsigned int rdes0 = le32_to_cpu(p->des0);
|
||||
unsigned int rdes1 = le32_to_cpu(p->des1);
|
||||
unsigned int rdes3 = le32_to_cpu(p->des3);
|
||||
u32 own, ctxt;
|
||||
int ret = 1;
|
||||
|
||||
own = p->des3 & RDES3_OWN;
|
||||
ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
|
||||
own = rdes3 & RDES3_OWN;
|
||||
ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
|
||||
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
|
||||
|
||||
if (likely(!own && ctxt)) {
|
||||
if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
|
||||
if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
|
||||
/* Corrupted value */
|
||||
ret = -EINVAL;
|
||||
else
|
||||
|
|
|
@ -676,25 +676,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
|
|||
struct ethtool_eee *edata)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
priv->eee_enabled = edata->eee_enabled;
|
||||
|
||||
if (!priv->eee_enabled)
|
||||
if (!edata->eee_enabled) {
|
||||
stmmac_disable_eee_mode(priv);
|
||||
else {
|
||||
} else {
|
||||
/* We are asking for enabling the EEE but it is safe
|
||||
* to verify all by invoking the eee_init function.
|
||||
* In case of failure it will return an error.
|
||||
*/
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
if (!priv->eee_enabled)
|
||||
edata->eee_enabled = stmmac_eee_init(priv);
|
||||
if (!edata->eee_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Do not change tx_lpi_timer in case of failure */
|
||||
priv->tx_lpi_timer = edata->tx_lpi_timer;
|
||||
}
|
||||
|
||||
return phy_ethtool_set_eee(priv->phydev, edata);
|
||||
ret = phy_ethtool_set_eee(dev->phydev, edata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->eee_enabled = edata->eee_enabled;
|
||||
priv->tx_lpi_timer = edata->tx_lpi_timer;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
|
||||
|
|
|
@ -42,7 +42,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
|
|||
u16 val = 0;
|
||||
int err;
|
||||
|
||||
err = priv->phy_drv->read_status(phydev);
|
||||
if (priv->phy_drv->read_status)
|
||||
err = priv->phy_drv->read_status(phydev);
|
||||
else
|
||||
err = genphy_read_status(phydev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -1911,7 +1911,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||
struct pcpu_sw_netstats *tx_stats, *rx_stats;
|
||||
union vxlan_addr loopback;
|
||||
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
|
||||
struct net_device *dev = skb->dev;
|
||||
struct net_device *dev;
|
||||
int len = skb->len;
|
||||
|
||||
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
|
||||
|
@ -1931,8 +1931,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||
#endif
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
dev = skb->dev;
|
||||
if (unlikely(!(dev->flags & IFF_UP))) {
|
||||
kfree_skb(skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (dst_vxlan->flags & VXLAN_F_LEARN)
|
||||
vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
|
||||
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source);
|
||||
|
||||
u64_stats_update_begin(&tx_stats->syncp);
|
||||
tx_stats->tx_packets++;
|
||||
|
@ -1945,8 +1952,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||
rx_stats->rx_bytes += len;
|
||||
u64_stats_update_end(&rx_stats->syncp);
|
||||
} else {
|
||||
drop:
|
||||
dev->stats.rx_dropped++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||
|
|
|
@ -1696,7 +1696,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
|
|||
|
||||
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
|
||||
len, iov, 64, VHOST_ACCESS_WO);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < ret; i++) {
|
||||
|
|
|
@ -4452,29 +4452,25 @@ try_submit_last:
|
|||
}
|
||||
|
||||
/*
|
||||
* Sanity check for fiemap cache
|
||||
* Emit last fiemap cache
|
||||
*
|
||||
* All fiemap cache should be submitted by emit_fiemap_extent()
|
||||
* Iteration should be terminated either by last fiemap extent or
|
||||
* fieinfo->fi_extents_max.
|
||||
* So no cached fiemap should exist.
|
||||
* The last fiemap cache may still be cached in the following case:
|
||||
* 0 4k 8k
|
||||
* |<- Fiemap range ->|
|
||||
* |<------------ First extent ----------->|
|
||||
*
|
||||
* In this case, the first extent range will be cached but not emitted.
|
||||
* So we must emit it before ending extent_fiemap().
|
||||
*/
|
||||
static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
struct fiemap_cache *cache)
|
||||
static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
struct fiemap_cache *cache)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cache->cached)
|
||||
return 0;
|
||||
|
||||
/* Small and recoverbale problem, only to info developer */
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
WARN_ON(1);
|
||||
#endif
|
||||
btrfs_warn(fs_info,
|
||||
"unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
|
||||
cache->offset, cache->phys, cache->len, cache->flags);
|
||||
ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
|
||||
cache->len, cache->flags);
|
||||
cache->cached = false;
|
||||
|
@ -4690,7 +4686,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
}
|
||||
out_free:
|
||||
if (!ret)
|
||||
ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
|
||||
ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
|
||||
free_extent_map(em);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
#define _LINUX_NETDEV_FEATURES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
typedef u64 netdev_features_t;
|
||||
|
||||
|
@ -137,8 +139,26 @@ enum {
|
|||
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
|
||||
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
|
||||
|
||||
#define for_each_netdev_feature(mask_addr, bit) \
|
||||
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
|
||||
/* Finds the next feature with the highest number of the range of start till 0.
|
||||
*/
|
||||
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
|
||||
{
|
||||
/* like BITMAP_LAST_WORD_MASK() for u64
|
||||
* this sets the most significant 64 - start to 0.
|
||||
*/
|
||||
feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
|
||||
|
||||
return fls64(feature) - 1;
|
||||
}
|
||||
|
||||
/* This goes for the MSB to the LSB through the set feature bits,
|
||||
* mask_addr should be a u64 and bit an int
|
||||
*/
|
||||
#define for_each_netdev_feature(mask_addr, bit) \
|
||||
for ((bit) = find_next_netdev_feature((mask_addr), \
|
||||
NETDEV_FEATURE_COUNT); \
|
||||
(bit) >= 0; \
|
||||
(bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
|
||||
|
||||
/* Features valid for ethtool to change */
|
||||
/* = all defined minus driver/device-class-related */
|
||||
|
|
|
@ -199,6 +199,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
|
|||
|
||||
void __ax25_put_route(ax25_route *ax25_rt);
|
||||
|
||||
extern rwlock_t ax25_route_lock;
|
||||
|
||||
static inline void ax25_route_lock_use(void)
|
||||
{
|
||||
read_lock(&ax25_route_lock);
|
||||
}
|
||||
|
||||
static inline void ax25_route_lock_unuse(void)
|
||||
{
|
||||
read_unlock(&ax25_route_lock);
|
||||
}
|
||||
|
||||
static inline void ax25_put_route(ax25_route *ax25_rt)
|
||||
{
|
||||
if (atomic_dec_and_test(&ax25_rt->refcount))
|
||||
|
|
|
@ -40,6 +40,7 @@ struct inet_peer {
|
|||
|
||||
u32 metrics[RTAX_MAX];
|
||||
u32 rate_tokens; /* rate limiting for ICMP */
|
||||
u32 n_redirects;
|
||||
unsigned long rate_last;
|
||||
union {
|
||||
struct list_head gc_list;
|
||||
|
|
|
@ -1530,6 +1530,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
|
|||
sk_wmem_free_skb(sk, skb);
|
||||
sk_mem_reclaim(sk);
|
||||
tcp_clear_all_retrans_hints(tcp_sk(sk));
|
||||
inet_csk(sk)->icsk_backoff = 0;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
|
||||
|
|
|
@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
|
|||
dst = (ax25_address *)(bp + 1);
|
||||
src = (ax25_address *)(bp + 8);
|
||||
|
||||
ax25_route_lock_use();
|
||||
route = ax25_get_route(dst, NULL);
|
||||
if (route) {
|
||||
digipeat = route->digipeat;
|
||||
|
@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
|
|||
ax25_queue_xmit(skb, dev);
|
||||
|
||||
put:
|
||||
if (route)
|
||||
ax25_put_route(route);
|
||||
|
||||
ax25_route_lock_unuse();
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <linux/export.h>
|
||||
|
||||
static ax25_route *ax25_route_list;
|
||||
static DEFINE_RWLOCK(ax25_route_lock);
|
||||
DEFINE_RWLOCK(ax25_route_lock);
|
||||
|
||||
void ax25_rt_device_down(struct net_device *dev)
|
||||
{
|
||||
|
@ -349,6 +349,7 @@ const struct file_operations ax25_route_fops = {
|
|||
* Find AX.25 route
|
||||
*
|
||||
* Only routes with a reference count of zero can be destroyed.
|
||||
* Must be called with ax25_route_lock read locked.
|
||||
*/
|
||||
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
|
||||
{
|
||||
|
@ -356,7 +357,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
|
|||
ax25_route *ax25_def_rt = NULL;
|
||||
ax25_route *ax25_rt;
|
||||
|
||||
read_lock(&ax25_route_lock);
|
||||
/*
|
||||
* Bind to the physical interface we heard them on, or the default
|
||||
* route if none is found;
|
||||
|
@ -379,11 +379,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
|
|||
if (ax25_spe_rt != NULL)
|
||||
ax25_rt = ax25_spe_rt;
|
||||
|
||||
if (ax25_rt != NULL)
|
||||
ax25_hold_route(ax25_rt);
|
||||
|
||||
read_unlock(&ax25_route_lock);
|
||||
|
||||
return ax25_rt;
|
||||
}
|
||||
|
||||
|
@ -414,9 +409,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
|
|||
ax25_route *ax25_rt;
|
||||
int err = 0;
|
||||
|
||||
if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
|
||||
ax25_route_lock_use();
|
||||
ax25_rt = ax25_get_route(addr, NULL);
|
||||
if (!ax25_rt) {
|
||||
ax25_route_lock_unuse();
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
}
|
||||
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
|
||||
err = -EHOSTUNREACH;
|
||||
goto put;
|
||||
|
@ -451,8 +449,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
|
|||
}
|
||||
|
||||
put:
|
||||
ax25_put_route(ax25_rt);
|
||||
|
||||
ax25_route_lock_unuse();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -6909,7 +6909,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
|
|||
netdev_features_t feature;
|
||||
int feature_bit;
|
||||
|
||||
for_each_netdev_feature(&upper_disables, feature_bit) {
|
||||
for_each_netdev_feature(upper_disables, feature_bit) {
|
||||
feature = __NETIF_F_BIT(feature_bit);
|
||||
if (!(upper->wanted_features & feature)
|
||||
&& (features & feature)) {
|
||||
|
@ -6929,7 +6929,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
|
|||
netdev_features_t feature;
|
||||
int feature_bit;
|
||||
|
||||
for_each_netdev_feature(&upper_disables, feature_bit) {
|
||||
for_each_netdev_feature(upper_disables, feature_bit) {
|
||||
feature = __NETIF_F_BIT(feature_bit);
|
||||
if (!(features & feature) && (lower->features & feature)) {
|
||||
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
|
||||
|
|
|
@ -383,6 +383,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|||
*/
|
||||
void *netdev_alloc_frag(unsigned int fragsz)
|
||||
{
|
||||
fragsz = SKB_DATA_ALIGN(fragsz);
|
||||
|
||||
return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_alloc_frag);
|
||||
|
@ -396,6 +398,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|||
|
||||
void *napi_alloc_frag(unsigned int fragsz)
|
||||
{
|
||||
fragsz = SKB_DATA_ALIGN(fragsz);
|
||||
|
||||
return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
|
||||
}
|
||||
EXPORT_SYMBOL(napi_alloc_frag);
|
||||
|
|
|
@ -448,6 +448,7 @@ relookup:
|
|||
atomic_set(&p->rid, 0);
|
||||
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
|
||||
p->rate_tokens = 0;
|
||||
p->n_redirects = 0;
|
||||
/* 60*HZ is arbitrary, but chosen enough high so that the first
|
||||
* calculation of tokens is at its maximum.
|
||||
*/
|
||||
|
|
|
@ -886,13 +886,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|||
/* No redirected packets during ip_rt_redirect_silence;
|
||||
* reset the algorithm.
|
||||
*/
|
||||
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
|
||||
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
|
||||
peer->rate_tokens = 0;
|
||||
peer->n_redirects = 0;
|
||||
}
|
||||
|
||||
/* Too many ignored redirects; do not send anything
|
||||
* set dst.rate_last to the last seen redirected packet.
|
||||
*/
|
||||
if (peer->rate_tokens >= ip_rt_redirect_number) {
|
||||
if (peer->n_redirects >= ip_rt_redirect_number) {
|
||||
peer->rate_last = jiffies;
|
||||
goto out_put_peer;
|
||||
}
|
||||
|
@ -909,6 +911,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|||
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
|
||||
peer->rate_last = jiffies;
|
||||
++peer->rate_tokens;
|
||||
++peer->n_redirects;
|
||||
#ifdef CONFIG_IP_ROUTE_VERBOSE
|
||||
if (log_martians &&
|
||||
peer->rate_tokens == ip_rt_redirect_number)
|
||||
|
|
|
@ -2315,7 +2315,6 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
tp->write_seq += tp->max_window + 2;
|
||||
if (tp->write_seq == 0)
|
||||
tp->write_seq = 1;
|
||||
icsk->icsk_backoff = 0;
|
||||
tp->snd_cwnd = 2;
|
||||
icsk->icsk_probes_out = 0;
|
||||
tp->packets_out = 0;
|
||||
|
|
|
@ -469,14 +469,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
|||
if (sock_owned_by_user(sk))
|
||||
break;
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
if (WARN_ON_ONCE(!skb))
|
||||
break;
|
||||
|
||||
icsk->icsk_backoff--;
|
||||
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
|
||||
TCP_TIMEOUT_INIT;
|
||||
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
BUG_ON(!skb);
|
||||
|
||||
remaining = icsk->icsk_rto -
|
||||
min(icsk->icsk_rto,
|
||||
tcp_time_stamp - tcp_skb_timestamp(skb));
|
||||
|
|
|
@ -1078,7 +1078,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
|
|||
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
||||
if (ifa == ifp)
|
||||
continue;
|
||||
if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
|
||||
if (ifa->prefix_len != ifp->prefix_len ||
|
||||
!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
|
||||
ifp->prefix_len))
|
||||
continue;
|
||||
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
|
||||
|
|
|
@ -1656,6 +1656,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
|
|||
|
||||
static void vmci_transport_destruct(struct vsock_sock *vsk)
|
||||
{
|
||||
/* transport can be NULL if we hit a failure at init() time */
|
||||
if (!vmci_trans(vsk))
|
||||
return;
|
||||
|
||||
/* Ensure that the detach callback doesn't use the sk/vsk
|
||||
* we are about to destruct.
|
||||
*/
|
||||
|
|
|
@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
|
|||
unsigned int lci = 1;
|
||||
struct sock *sk;
|
||||
|
||||
read_lock_bh(&x25_list_lock);
|
||||
|
||||
while ((sk = __x25_find_socket(lci, nb)) != NULL) {
|
||||
while ((sk = x25_find_socket(lci, nb)) != NULL) {
|
||||
sock_put(sk);
|
||||
if (++lci == 4096) {
|
||||
lci = 0;
|
||||
break;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
read_unlock_bh(&x25_list_lock);
|
||||
return lci;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue