This is the 4.9.155 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlxbDFsACgkQONu9yGCS
 aT4TmQ/+PCgz7TKTu1/Sh7ScmbSjlYFfaQ25K9NzqWDuaTOXCsvrWY4IDdbk7fxZ
 WnM8WPpTUfKaRwUVL3T32uwv/uMM8TnXN2r4dB9TH+WF/UK1ovLHQGO5zGWS9erY
 dvckRjkRuPe27KYd/sPSA7PRBiWuw1EFqcBIdAuC8OJK1knm3W95C8XVeIkVfey2
 /NFIvZELYEOCMzXwPqXbeFG553VtHVGB2GRJqEqZzqZ+JGnUCyBpCgvuwbrQ706l
 /Smvz23mmElUGzdTXr4uRPmf2g8bA3a+08/gQM4YPpZYRUMbcCVE3+KaYf3jjJPR
 Inj+cKBhF8R6JwkJayzdBJ1KbG6e6DaL2yANqNvoeYCkyP+4GNmoWKebFYklrXr/
 jn/btnJtHwSBDVufdgW6uQ039UE66Wfvvi7QrkCgTjebEyf6jU3SnB8Z7HpZZMCg
 dwgO6rHKklm6cSJTBZsIDyBd4oj19bTTvOiUcaayQXfeyjlrbWXhH6UOEBze54uT
 Ho8yjCq9r2TiO6g9mJOfkkvep1hgNVgabnOyA/papUx3my9sGkUn1Rqg1Pbm9OEF
 PV4p853t4+Qe5iBq+yXgacrCg+21q9Xg3ct6QW/c/Quz4UirR9Ayw93C4ej4rJYZ
 79/mADmkpH+A1ufP2F80UoWo0IN9oizecfYHOZ+6useUEVgIRik=
 =QoXP
 -----END PGP SIGNATURE-----

Merge 4.9.155 into android-4.9

Changes in 4.9.155
	Fix "net: ipv4: do not handle duplicate fragments as overlapping"
	fs: add the fsnotify call to vfs_iter_write
	ipv6: Consider sk_bound_dev_if when binding a socket to an address
	l2tp: copy 4 more bytes to linear part if necessary
	net/mlx4_core: Add masking for a few queries on HCA caps
	netrom: switch to sock timer API
	net/rose: fix NULL ax25_cb kernel panic
	ucc_geth: Reset BQL queue when stopping device
	net/mlx5e: Allow MAC invalidation while spoofchk is ON
	l2tp: remove l2specific_len dependency in l2tp_core
	l2tp: fix reading optional fields of L2TPv3
	ipvlan, l3mdev: fix broken l3s mode wrt local routes
	CIFS: Do not count -ENODATA as failure for query directory
	fs/dcache: Fix incorrect nr_dentry_unused accounting in shrink_dcache_sb()
	ARM: cns3xxx: Fix writing to wrong PCI config registers after alignment
	arm64: kaslr: ensure randomized quantities are clean also when kaslr is off
	arm64: hyp-stub: Forbid kprobing of the hyp-stub
	arm64: hibernate: Clean the __hyp_text to PoC after resume
	gfs2: Revert "Fix loop in gfs2_rbm_find"
	platform/x86: asus-nb-wmi: Map 0x35 to KEY_SCREENLOCK
	platform/x86: asus-nb-wmi: Drop mapping of 0x33 and 0x34 scan codes
	mmc: sdhci-iproc: handle mmc_of_parse() errors during probe
	kernel/exit.c: release ptraced tasks before zap_pid_ns_processes
	mm, oom: fix use-after-free in oom_kill_process
	mm: hwpoison: use do_send_sig_info() instead of force_sig()
	mm: migrate: don't rely on __PageMovable() of newpage after unlocking it
	cifs: Always resolve hostname before reconnecting
	drivers: core: Remove glue dirs from sysfs earlier
	fs: don't scan the inode cache before SB_BORN is set
	fanotify: fix handling of events on child sub-directory
	Linux 4.9.155

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-02-07 09:14:43 +01:00
commit 32e6695e35
34 changed files with 292 additions and 106 deletions

View File

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 154
SUBLEVEL = 155
EXTRAVERSION =
NAME = Roaring Lionus

View File

@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
return base + (where & 0xffc) + (devfn << 12);
return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,

View File

@ -294,8 +294,10 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__idmap_text_start, __idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed())
if (el2_reset_needed()) {
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
dcache_clean_range(__hyp_text_start, __hyp_text_end);
}
/*
* Tell the hibernation core that we've just restored

View File

@ -28,6 +28,8 @@
#include <asm/virt.h>
.text
.pushsection .hyp.text, "ax"
.align 11
ENTRY(__hyp_stub_vectors)

View File

@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,

View File

@ -862,6 +862,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
return;
mutex_lock(&gdp_mutex);
if (!kobject_has_children(glue_dir))
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}

View File

@ -242,7 +242,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
iproc_host->data = iproc_data;
mmc_of_parse(host->mmc);
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
sdhci_get_of_property(pdev);
host->mmc->caps |= iproc_host->data->mmc_caps;

View File

@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
u16 i, j;
u8 __iomem *bd;
netdev_reset_queue(ugeth->ndev);
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;

View File

@ -2037,9 +2037,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
u64 qword_field;
u32 dword_field;
int err;
u16 word_field;
u8 byte_field;
int err;
static const u8 a0_dmfs_query_hw_steering[] = {
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@ -2067,19 +2069,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
param->qpc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
param->log_num_qps = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
param->srqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
param->log_num_srqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
param->cqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
param->log_num_cqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
param->altc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
param->auxc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
param->eqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
param->log_num_eqs = byte_field & 0x1f;
MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
param->num_sys_eqs = word_field & 0xfff;
MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
param->rdmarc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
param->log_rd_per_qp = byte_field & 0x7;
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@ -2098,22 +2113,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
MLX4_GET(byte_field, outbox,
INIT_HCA_FS_A0_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
param->dmfs_high_steer_mode =
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_hash_sz, outbox,
INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
param->log_mc_hash_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
}
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@ -2137,15 +2151,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
param->mw_enabled = byte_field >> 7;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
param->log_mpt_sz = byte_field & 0x3f;
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
param->log_uar_sz = byte_field & 0xf;
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);

View File

@ -1216,14 +1216,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
int err = 0;
u8 *smac_v;
if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
mlx5_core_warn(esw->dev,
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
vport->vport);
return -EPERM;
}
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@ -1709,13 +1701,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev,
"MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
"Set invalid MAC while spoofchk is on, vport(%d)\n",
vport);
err = -EPERM;
goto unlock;
}
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
@ -1859,6 +1848,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
if (pschk && !is_valid_ether_addr(evport->info.mac))
mlx5_core_warn(esw->dev,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
if (err)

View File

@ -85,12 +85,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
err = ipvlan_register_nf_hook();
if (!err) {
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
mdev->priv_flags |= IFF_L3MDEV_MASTER;
mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
} else
goto fail;
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook();
mdev->l3mdev_ops = NULL;
}
@ -158,7 +158,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
if (port->mode == IPVLAN_MODE_L3S) {
dev->priv_flags &= ~IFF_L3MDEV_MASTER;
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook();
dev->l3mdev_ops = NULL;
}

View File

@ -477,8 +477,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
{ KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
{ KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */

View File

@ -48,6 +48,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "dns_resolve.h"
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
@ -306,6 +307,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname);
/*
* Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
* get their ip addresses changed at some point.
*
* This should be called with server->srv_mutex held.
*/
#ifdef CONFIG_CIFS_DFS_UPCALL
static int reconn_set_ipaddr(struct TCP_Server_Info *server)
{
int rc;
int len;
char *unc, *ipaddr = NULL;
if (!server->hostname)
return -EINVAL;
len = strlen(server->hostname) + 3;
unc = kmalloc(len, GFP_KERNEL);
if (!unc) {
cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
return -ENOMEM;
}
snprintf(unc, len, "\\\\%s", server->hostname);
rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
kfree(unc);
if (rc < 0) {
cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
__func__, server->hostname, rc);
return rc;
}
rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
strlen(ipaddr));
kfree(ipaddr);
return !rc ? -1 : 0;
}
#else
static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
{
return 0;
}
#endif
/*
* cifs tcp session reconnection
*
@ -403,6 +451,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
rc = reconn_set_ipaddr(server);
if (rc) {
cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
__func__, rc);
}
mutex_unlock(&server->srv_mutex);
msleep(3000);
} else {

View File

@ -2686,8 +2686,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
srch_inf->endOfSearch = true;
rc = 0;
}
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
} else
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}

View File

@ -1164,15 +1164,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
*/
void shrink_dcache_sb(struct super_block *sb)
{
long freed;
do {
LIST_HEAD(dispose);
freed = list_lru_walk(&sb->s_dentry_lru,
list_lru_walk(&sb->s_dentry_lru,
dentry_lru_isolate_shrink, &dispose, 1024);
this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
cond_resched();
} while (list_lru_count(&sb->s_dentry_lru) > 0);

View File

@ -1705,9 +1705,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
goto next_iter;
}
if (ret == -E2BIG) {
n += rbm->bii - initial_bii;
rbm->bii = 0;
rbm->offset = 0;
n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;

View File

@ -101,9 +101,9 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
parent = dget_parent(dentry);
p_inode = parent->d_inode;
if (unlikely(!fsnotify_inode_watches_children(p_inode)))
if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
__fsnotify_update_child_dentry_flags(p_inode);
else if (p_inode->i_fsnotify_mask & mask) {
} else if (p_inode->i_fsnotify_mask & mask & ~FS_EVENT_ON_CHILD) {
struct name_snapshot name;
/* we are notifying a parent so come up with the new mask which
@ -207,6 +207,10 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
else
mnt = NULL;
/* An event "on child" is not intended for a mount mark */
if (mask & FS_EVENT_ON_CHILD)
mnt = NULL;
/*
* Optimization: srcu_read_lock() has a memory barrier which can
* be expensive. It protects walking the *_fsnotify_marks lists.

View File

@ -389,8 +389,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
iter->type |= WRITE;
ret = file->f_op->write_iter(&kiocb, iter);
BUG_ON(ret == -EIOCBQUEUED);
if (ret > 0)
if (ret > 0) {
*ppos = kiocb.ki_pos;
fsnotify_modify(file);
}
return ret;
}
EXPORT_SYMBOL(vfs_iter_write);

View File

@ -119,13 +119,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
sb = container_of(shrink, struct super_block, s_shrink);
/*
* Don't call trylock_super as it is a potential
* scalability bottleneck. The counts could get updated
* between super_cache_count and super_cache_scan anyway.
* Call to super_cache_count with shrinker_rwsem held
* ensures the safety of call to list_lru_shrink_count() and
* s_op->nr_cached_objects().
* We don't call trylock_super() here as it is a scalability bottleneck,
* so we're exposed to partial setup state. The shrinker rwsem does not
* protect filesystem operations backing list_lru_shrink_count() or
* s_op->nr_cached_objects(). Counts can change between
* super_cache_count and super_cache_scan, so we really don't need locks
* here.
*
* However, if we are currently mounting the superblock, the underlying
* filesystem might be in a state of partial construction and hence it
* is dangerous to access it. trylock_super() uses a MS_BORN check to
* avoid this situation, so do the same here. The memory barrier is
* matched with the one in mount_fs() as we don't hold locks here.
*/
if (!(sb->s_flags & MS_BORN))
return 0;
smp_rmb();
if (sb->s_op && sb->s_op->nr_cached_objects)
total_objects = sb->s_op->nr_cached_objects(sb, sc);
@ -1211,6 +1221,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, struct vfsm
sb = root->d_sb;
BUG_ON(!sb);
WARN_ON(!sb->s_bdi);
/*
* Write barrier is for super_cache_count(). We place it before setting
* MS_BORN as the data dependency between the two functions is the
* superblock structure contents that we just set up, not the MS_BORN
* flag.
*/
smp_wmb();
sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(sb, flags, secdata);

View File

@ -113,6 +113,23 @@ extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
/**
* kobject_has_children - Returns whether a kobject has children.
* @kobj: the object to test
*
* This will return whether a kobject has other kobjects as children.
*
* It does NOT account for the presence of attribute files, only sub
* directories. It also assumes there is no concurrent addition or
* removal of such children, and thus relies on external locking.
*/
static inline bool kobject_has_children(struct kobject *kobj)
{
WARN_ON_ONCE(atomic_read(&kobj->kref.refcount) == 0);
return kobj->sd && kobj->sd->dir.subdirs;
}
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;

View File

@ -1368,6 +1368,7 @@ struct net_device_ops {
* @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@ -1398,6 +1399,7 @@ enum netdev_priv_flags {
IFF_RXFH_CONFIGURED = 1<<25,
IFF_PHONY_HEADROOM = 1<<26,
IFF_MACSEC = 1<<27,
IFF_L3MDEV_RX_HANDLER = 1<<28,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@ -1427,6 +1429,7 @@ enum netdev_priv_flags {
#define IFF_TEAM IFF_TEAM
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
#define IFF_MACSEC IFF_MACSEC
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
/**
* struct net_device - The DEVICE structure.
@ -4244,6 +4247,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
}
static inline bool netif_is_l3_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_MASTER;

View File

@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
if (netif_is_l3_slave(skb->dev))
master = netdev_master_upper_dev_get_rcu(skb->dev);
else if (netif_is_l3_master(skb->dev))
else if (netif_is_l3_master(skb->dev) ||
netif_has_l3_rx_handler(skb->dev))
master = skb->dev;
if (master && master->l3mdev_ops->l3mdev_l3_rcv)

View File

@ -527,12 +527,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
return NULL;
}
static struct task_struct *find_child_reaper(struct task_struct *father)
static struct task_struct *find_child_reaper(struct task_struct *father,
struct list_head *dead)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *reaper = pid_ns->child_reaper;
struct task_struct *p, *n;
if (likely(reaper != father))
return reaper;
@ -548,6 +550,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?: father->exit_code);
}
list_for_each_entry_safe(p, n, dead, ptrace_entry) {
list_del_init(&p->ptrace_entry);
release_task(p);
}
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
@ -634,7 +642,7 @@ static void forget_original_parent(struct task_struct *father,
exit_ptrace(father, dead);
/* Can drop and reacquire tasklist_lock */
reaper = find_child_reaper(father);
reaper = find_child_reaper(father, dead);
if (list_empty(&father->children))
return;

View File

@ -336,7 +336,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
if (fail || tk->addr_valid == 0) {
pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
force_sig(SIGKILL, tk->tsk);
do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
tk->tsk, PIDTYPE_PID);
}
/*

View File

@ -1044,10 +1044,13 @@ out:
* If migration is successful, decrease refcount of the newpage
* which will not free the page because new page owner increased
* refcounter. As well, if it is LRU page, add the page to LRU
* list in here.
* list in here. Use the old state of the isolated source page to
* determine if we migrated a LRU page. newpage was already unlocked
* and possibly modified by its owner - don't rely on the page
* state.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
if (unlikely(__PageMovable(newpage)))
if (unlikely(!is_lru))
put_page(newpage);
else
putback_lru_page(newpage);

View File

@ -861,6 +861,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
* still freeing memory.
*/
read_lock(&tasklist_lock);
/*
* The task 'p' might have already exited before reaching here. The
* put_task_struct() will free task_struct 'p' while the loop still try
* to access the field of 'p', so, get an extra reference.
*/
get_task_struct(p);
for_each_thread(p, t) {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
@ -880,6 +887,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
}
}
}
put_task_struct(p);
read_unlock(&tasklist_lock);
p = find_lock_task_mm(victim);

View File

@ -423,6 +423,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
* fragment.
*/
err = -EINVAL;
/* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
if (!prev_tail)
@ -499,7 +500,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
discard_qp:
inet_frag_kill(&qp->q);
err = -EINVAL;
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);

View File

@ -375,6 +375,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = -EINVAL;
goto out_unlock;
}
}
if (sk->sk_bound_dev_if) {
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
if (!dev) {
err = -ENODEV;

View File

@ -83,8 +83,7 @@
#define L2TP_SLFLAG_S 0x40000000
#define L2TP_SL_SEQ_MASK 0x00ffffff
#define L2TP_HDR_SIZE_SEQ 10
#define L2TP_HDR_SIZE_NOSEQ 6
#define L2TP_HDR_SIZE_MAX 14
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
@ -796,11 +795,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
"%s: recv data ns=%u, session nr=%u\n",
session->name, ns, session->nr);
}
ptr += 4;
}
/* Advance past L2-specific header, if present */
ptr += session->l2specific_len;
if (L2TP_SKB_CB(skb)->has_seq) {
/* Received a packet with sequence numbers. If we're the LNS,
* check if we sre sending sequence numbers and if not,
@ -936,7 +933,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
__skb_pull(skb, sizeof(struct udphdr));
/* Short packet? */
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: recv short packet (len=%d)\n",
tunnel->name, skb->len);
@ -1015,6 +1012,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
goto error;
}
if (tunnel->version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto error;
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);
@ -1114,21 +1115,20 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
memcpy(bufp, &session->cookie[0], session->cookie_len);
bufp += session->cookie_len;
}
if (session->l2specific_len) {
if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
u32 l2h = 0;
if (session->send_seq) {
l2h = 0x40000000 | session->ns;
session->ns++;
session->ns &= 0xffffff;
l2tp_dbg(session, L2TP_MSG_SEQ,
"%s: updated ns to %u\n",
session->name, session->ns);
}
if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
u32 l2h = 0;
*((__be32 *) bufp) = htonl(l2h);
if (session->send_seq) {
l2h = 0x40000000 | session->ns;
session->ns++;
session->ns &= 0xffffff;
l2tp_dbg(session, L2TP_MSG_SEQ,
"%s: updated ns to %u\n",
session->name, session->ns);
}
bufp += session->l2specific_len;
*((__be32 *)bufp) = htonl(l2h);
bufp += 4;
}
return bufp - optr;
@ -1805,7 +1805,7 @@ int l2tp_session_delete(struct l2tp_session *session)
EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set.
* l2specific_type parameters are set.
*/
void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
@ -1814,7 +1814,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
if (session->send_seq)
session->hdr_len += 4;
} else {
session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
session->hdr_len = 4 + session->cookie_len;
session->hdr_len += l2tp_get_l2specific_len(session);
if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
session->hdr_len += 4;
}

View File

@ -313,6 +313,37 @@ do { \
#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
#endif
static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
{
switch (session->l2specific_type) {
case L2TP_L2SPECTYPE_DEFAULT:
return 4;
case L2TP_L2SPECTYPE_NONE:
default:
return 0;
}
}
static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
unsigned char **ptr, unsigned char **optr)
{
int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
if (opt_len > 0) {
int off = *ptr - *optr;
if (!pskb_may_pull(skb, off + opt_len))
return -1;
if (skb->data != *optr) {
*optr = skb->data;
*ptr = skb->data + off;
}
}
return 0;
}
#define l2tp_printk(ptr, type, func, fmt, ...) \
do { \
if (((ptr)->debug) & (type)) \

View File

@ -157,6 +157,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);

View File

@ -169,6 +169,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);

View File

@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
mod_timer(&nr->t1timer, jiffies + nr->t1);
sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
}
void nr_start_t2timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
mod_timer(&nr->t2timer, jiffies + nr->t2);
sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
}
void nr_start_t4timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
mod_timer(&nr->t4timer, jiffies + nr->t4);
sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
}
void nr_start_idletimer(struct sock *sk)
@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
struct nr_sock *nr = nr_sk(sk);
if (nr->idle > 0)
mod_timer(&nr->idletimer, jiffies + nr->idle);
sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
}
void nr_start_heartbeat(struct sock *sk)
{
mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
}
void nr_stop_t1timer(struct sock *sk)
{
del_timer(&nr_sk(sk)->t1timer);
sk_stop_timer(sk, &nr_sk(sk)->t1timer);
}
void nr_stop_t2timer(struct sock *sk)
{
del_timer(&nr_sk(sk)->t2timer);
sk_stop_timer(sk, &nr_sk(sk)->t2timer);
}
void nr_stop_t4timer(struct sock *sk)
{
del_timer(&nr_sk(sk)->t4timer);
sk_stop_timer(sk, &nr_sk(sk)->t4timer);
}
void nr_stop_idletimer(struct sock *sk)
{
del_timer(&nr_sk(sk)->idletimer);
sk_stop_timer(sk, &nr_sk(sk)->idletimer);
}
void nr_stop_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
sk_stop_timer(sk, &sk->sk_timer);
}
int nr_t1timer_running(struct sock *sk)

View File

@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev)
/*
* Route a frame to an appropriate AX.25 connection.
* A NULL ax25_cb indicates an internally generated frame.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (skb->len < ROSE_MIN_LEN)
return res;
if (!ax25)
return rose_loopback_queue(skb, NULL);
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&