This is the 4.9.39 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllxlFQACgkQONu9yGCS aT42GBAAlL+49kLHeHQszHbIhCyd3bf59TC6cMZdGCA11X/TxvS4jFWpeopXQ7SR rngaPiAItWFuakZPAIe+cdIQhA1P8o0pwr5iIj0/bXilKYF9ujNrOXxYGTD6qgeB 1G437oYOcIXZQ1sHzy0eMYxM/s8HvmFvVxCwOaISqB6LIZDtX5lYQdzSk/Iy5zAt k0YBr+q+c6rm9039UQrvyQ0oTwUh0qSWg4rSW8tCOJcIJZP2BaD4M0BLQbaCOYWg 43Npx4MAHqIzWk38fZ7KxOo9yfNv6vbqW1WpB2WAT6i4zJJ6JtXqr7BpYVrSLDL9 NQWvxpuPbEcACL0Mt/mp2fh3bqfh5Hxl77YijyLCo5yq3XRt4w2kOsLUtEiSomQb 51rik+amM7v+ryJqPHLbSngZWbPoVZmkBO1psm5CaMcA4WnVEYy+ga9tnnXRQOzu 8irWSr4L4AxqBoxR/OjyfBcdIOlvgT9jEgnLnER1/W3vHtooyEkXBDYoZ4MszXWm 79VTDRb8i5VIm97klPUpo61D8kGnZOuXDJS2arKqJqVBjcVHeeOwU0jF/4HVpU4N HKHS/PQRLbV8Aa1r3uADAZLs01dUV1qbdgd7EZBUWj6Vix80w4e3K+kTDIcCe8zC 1KkvGNWjP/3JxwH6GgHsz8hJ6I4Dmsl9JTRUSAFnnGeSj68UDN4= =QrTI -----END PGP SIGNATURE----- Merge 4.9.39 into android-4.9 Changes in 4.9.39 xen-netfront: Rework the fix for Rx stall during OOM and network stress net_sched: fix error recovery at qdisc creation net: sched: Fix one possible panic when no destroy callback net/phy: micrel: configure intterupts after autoneg workaround ipv6: avoid unregistering inet6_dev for loopback net: dp83640: Avoid NULL pointer dereference. tcp: reset sk_rx_dst in tcp_disconnect() net: prevent sign extension in dev_get_stats() bridge: mdb: fix leak on complete_info ptr on fail path rocker: move dereference before free bpf: prevent leaking pointer via xadd on unpriviledged net: handle NAPI_GRO_FREE_STOLEN_HEAD case also in napi_frags_finish() net/mlx5: Cancel delayed recovery work when unloading the driver liquidio: fix bug in soft reset failure detection net/mlx5e: Fix TX carrier errors report in get stats ndo ipv6: dad: don't remove dynamic addresses if link is down vxlan: fix hlist corruption net: core: Fix slab-out-of-bounds in netdev_stats_to_stats64 net: ipv6: Compare lwstate in detecting duplicate nexthops vrf: fix bug_on triggered by rx when destroying a vrf rds: tcp: use sock_create_lite() to create the accept socket brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx() brcmfmac: Fix a memory leak in error handling path in 'brcmf_cfg80211_attach' brcmfmac: Fix glom_skb leak in brcmf_sdiod_recv_chain sfc: don't read beyond unicast address list cfg80211: Define nla_policy for NL80211_ATTR_LOCAL_MESH_POWER_MODE cfg80211: Validate frequencies nested in NL80211_ATTR_SCAN_FREQUENCIES cfg80211: Check if PMKID attribute is of expected size cfg80211: Check if NAN service ID is of expected size irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity parisc: Report SIGSEGV instead of SIGBUS when running out of stack parisc: use compat_sys_keyctl() parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs parisc/mm: Ensure IRQs are off in switch_mm() tools/lib/lockdep: Reduce MAX_LOCK_DEPTH to avoid overflowing lock_chain/: Depth thp, mm: fix crash due race in MADV_FREE handling kernel/extable.c: mark core_kernel_text notrace mm/list_lru.c: fix list_lru_count_node() to be race free fs/dcache.c: fix spin lockup issue on nlru->lock checkpatch: silence perl 5.26.0 unescaped left brace warnings binfmt_elf: use ELF_ET_DYN_BASE only for PIE arm: move ELF_ET_DYN_BASE to 4MB arm64: move ELF_ET_DYN_BASE to 4GB / 4MB powerpc: move ELF_ET_DYN_BASE to 4GB / 4MB s390: reduce ELF_ET_DYN_BASE exec: Limit arg stack to at most 75% of _STK_LIM ARM64: dts: marvell: armada37xx: Fix timer interrupt specifiers vt: fix unchecked __put_user() in tioclinux ioctls rcu: Add memory barriers for NOCB leader wakeup nvmem: core: fix leaks on registration errors mnt: In umount propagation reparent in a separate pass mnt: In propgate_umount handle visiting mounts in any order mnt: Make propagate_umount less slow for overlapping mount propagation trees selftests/capabilities: Fix the test_execve test mm: fix overflow check in expand_upwards() crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD crypto: atmel - only treat EBUSY as transient if backlog crypto: sha1-ssse3 - Disable avx2 crypto: caam - properly set IV after {en,de}crypt crypto: caam - fix signals handling Revert "sched/core: Optimize SCHED_SMT" sched/fair, cpumask: Export for_each_cpu_wrap() sched/topology: Fix building of overlapping sched-groups sched/topology: Optimize build_group_mask() sched/topology: Fix overlapping sched_group_mask PM / wakeirq: Convert to SRCU PM / QoS: return -EINVAL for bogus strings tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results kvm: vmx: Do not disable intercepts for BNDCFGS kvm: x86: Guest BNDCFGS requires guest MPX support kvm: vmx: Check value written to IA32_BNDCFGS kvm: vmx: allow host to access guest MSR_IA32_BNDCFGS 4.9.39 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
14accea70e
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 38
|
||||
SUBLEVEL = 39
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
|
|||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
/* This is the base location for PIE (ET_DYN with INTERP) loads. */
|
||||
#define ELF_ET_DYN_BASE 0x400000UL
|
||||
|
||||
/* When the program starts, a1 contains a pointer to a function to be
|
||||
registered with atexit, as per the SVR4 ABI. A value of 0 means we
|
||||
|
|
|
@ -75,14 +75,10 @@
|
|||
|
||||
timer {
|
||||
compatible = "arm,armv8-timer";
|
||||
interrupts = <GIC_PPI 13
|
||||
(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
|
||||
<GIC_PPI 14
|
||||
(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
|
||||
<GIC_PPI 11
|
||||
(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
|
||||
<GIC_PPI 10
|
||||
(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
soc {
|
||||
|
|
|
@ -113,12 +113,11 @@
|
|||
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
* use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
* the loader. We need to make sure that it is out of the way of the program
|
||||
* that it will "exec", and that there is sufficient room for the brk.
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
|
||||
#define ELF_ET_DYN_BASE 0x100000000UL
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -169,7 +168,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
|
||||
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
|
||||
#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
|
||||
|
||||
/* AArch32 registers. */
|
||||
#define COMPAT_ELF_NGREG 18
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
** flush/purge and allocate "regular" cacheable pages for everything.
|
||||
*/
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
|
||||
#ifdef CONFIG_PA11
|
||||
extern struct dma_map_ops pcxl_dma_ops;
|
||||
extern struct dma_map_ops pcx_dma_ops;
|
||||
|
@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev)
|
|||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(!dev->platform_data);
|
||||
return dev->platform_data;
|
||||
}
|
||||
|
||||
#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
|
||||
|
||||
|
||||
#define GET_IOC(dev) ({ \
|
||||
void *__pdata = parisc_walk_tree(dev); \
|
||||
__pdata ? HBA_DATA(__pdata)->iommu : NULL; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_IOMMU_CCIO
|
||||
struct parisc_device;
|
||||
|
|
|
@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
|
|||
mtctl(__space_to_prot(context), 8);
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
|
||||
static inline void switch_mm_irqs_off(struct mm_struct *prev,
|
||||
struct mm_struct *next, struct task_struct *tsk)
|
||||
{
|
||||
|
||||
if (prev != next) {
|
||||
mtctl(__pa(next->pgd), 25);
|
||||
load_context(next->context);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next, struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
switch_mm_irqs_off(prev, next, tsk);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#define switch_mm_irqs_off switch_mm_irqs_off
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
|
|
|
@ -361,7 +361,7 @@
|
|||
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
|
||||
ENTRY_SAME(add_key)
|
||||
ENTRY_SAME(request_key) /* 265 */
|
||||
ENTRY_SAME(keyctl)
|
||||
ENTRY_COMP(keyctl)
|
||||
ENTRY_SAME(ioprio_set)
|
||||
ENTRY_SAME(ioprio_get)
|
||||
ENTRY_SAME(inotify_init)
|
||||
|
|
|
@ -366,7 +366,7 @@ bad_area:
|
|||
case 15: /* Data TLB miss fault/Data page fault */
|
||||
/* send SIGSEGV when outside of vma */
|
||||
if (!vma ||
|
||||
address < vma->vm_start || address > vma->vm_end) {
|
||||
address < vma->vm_start || address >= vma->vm_end) {
|
||||
si.si_signo = SIGSEGV;
|
||||
si.si_code = SEGV_MAPERR;
|
||||
break;
|
||||
|
|
|
@ -23,12 +23,13 @@
|
|||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE 0x20000000
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
|
||||
0x100000000UL)
|
||||
|
||||
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
|
||||
|
||||
|
|
|
@ -158,14 +158,13 @@ extern unsigned int vdso_enabled;
|
|||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. 64-bit
|
||||
tasks are aligned to 4GB. */
|
||||
#define ELF_ET_DYN_BASE (is_compat_task() ? \
|
||||
(STACK_TOP / 3 * 2) : \
|
||||
(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
|
||||
0x100000000UL)
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. */
|
||||
|
|
|
@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
|||
|
||||
static bool avx2_usable(void)
|
||||
{
|
||||
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||
&& boot_cpu_has(X86_FEATURE_BMI1)
|
||||
&& boot_cpu_has(X86_FEATURE_BMI2))
|
||||
return true;
|
||||
|
|
|
@ -245,12 +245,13 @@ extern int force_personality32;
|
|||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
|
||||
0x100000000UL)
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. This could be done in user space,
|
||||
|
|
|
@ -405,6 +405,8 @@
|
|||
#define MSR_IA32_TSC_ADJUST 0x0000003b
|
||||
#define MSR_IA32_BNDCFGS 0x00000d90
|
||||
|
||||
#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
|
||||
|
||||
#define MSR_IA32_XSS 0x00000da0
|
||||
|
||||
#define FEATURE_CONTROL_LOCKED (1<<0)
|
||||
|
|
|
@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
|||
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||
return best && (best->ebx & bit(X86_FEATURE_MPX));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
|
|
@ -2987,7 +2987,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
|
||||
break;
|
||||
case MSR_IA32_BNDCFGS:
|
||||
if (!kvm_mpx_supported())
|
||||
if (!kvm_mpx_supported() ||
|
||||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
|
||||
return 1;
|
||||
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
|
||||
break;
|
||||
|
@ -3069,7 +3070,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
vmcs_writel(GUEST_SYSENTER_ESP, data);
|
||||
break;
|
||||
case MSR_IA32_BNDCFGS:
|
||||
if (!kvm_mpx_supported())
|
||||
if (!kvm_mpx_supported() ||
|
||||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
|
||||
return 1;
|
||||
if (is_noncanonical_address(data & PAGE_MASK) ||
|
||||
(data & MSR_IA32_BNDCFGS_RSVD))
|
||||
return 1;
|
||||
vmcs_write64(GUEST_BNDCFGS, data);
|
||||
break;
|
||||
|
@ -6474,7 +6479,6 @@ static __init int hardware_setup(void)
|
|||
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
|
||||
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
|
||||
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
|
||||
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
|
||||
|
||||
memcpy(vmx_msr_bitmap_legacy_x2apic,
|
||||
vmx_msr_bitmap_legacy, PAGE_SIZE);
|
||||
|
|
|
@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
|
|||
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
|
||||
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
|
||||
value = PM_QOS_LATENCY_ANY;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
|
||||
return ret < 0 ? ret : n;
|
||||
|
|
|
@ -61,6 +61,8 @@ static LIST_HEAD(wakeup_sources);
|
|||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
|
||||
|
||||
DEFINE_STATIC_SRCU(wakeup_srcu);
|
||||
|
||||
static struct wakeup_source deleted_ws = {
|
||||
.name = "deleted",
|
||||
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
|
||||
|
@ -199,7 +201,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
|
|||
spin_lock_irqsave(&events_lock, flags);
|
||||
list_del_rcu(&ws->entry);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
synchronize_rcu();
|
||||
synchronize_srcu(&wakeup_srcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_remove);
|
||||
|
||||
|
@ -333,12 +335,12 @@ void device_wakeup_detach_irq(struct device *dev)
|
|||
void device_wakeup_arm_wake_irqs(void)
|
||||
{
|
||||
struct wakeup_source *ws;
|
||||
int srcuidx;
|
||||
|
||||
rcu_read_lock();
|
||||
srcuidx = srcu_read_lock(&wakeup_srcu);
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
||||
dev_pm_arm_wake_irq(ws->wakeirq);
|
||||
|
||||
rcu_read_unlock();
|
||||
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -349,12 +351,12 @@ void device_wakeup_arm_wake_irqs(void)
|
|||
void device_wakeup_disarm_wake_irqs(void)
|
||||
{
|
||||
struct wakeup_source *ws;
|
||||
int srcuidx;
|
||||
|
||||
rcu_read_lock();
|
||||
srcuidx = srcu_read_lock(&wakeup_srcu);
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
||||
dev_pm_disarm_wake_irq(ws->wakeirq);
|
||||
|
||||
rcu_read_unlock();
|
||||
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -837,10 +839,10 @@ EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
|
|||
void pm_print_active_wakeup_sources(void)
|
||||
{
|
||||
struct wakeup_source *ws;
|
||||
int active = 0;
|
||||
int srcuidx, active = 0;
|
||||
struct wakeup_source *last_activity_ws = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
srcuidx = srcu_read_lock(&wakeup_srcu);
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||
if (ws->active) {
|
||||
pr_info("active wakeup source: %s\n", ws->name);
|
||||
|
@ -856,7 +858,7 @@ void pm_print_active_wakeup_sources(void)
|
|||
if (!active && last_activity_ws)
|
||||
pr_info("last active wakeup source: %s\n",
|
||||
last_activity_ws->name);
|
||||
rcu_read_unlock();
|
||||
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
|
||||
|
||||
|
@ -983,8 +985,9 @@ void pm_wakep_autosleep_enabled(bool set)
|
|||
{
|
||||
struct wakeup_source *ws;
|
||||
ktime_t now = ktime_get();
|
||||
int srcuidx;
|
||||
|
||||
rcu_read_lock();
|
||||
srcuidx = srcu_read_lock(&wakeup_srcu);
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||
spin_lock_irq(&ws->lock);
|
||||
if (ws->autosleep_enabled != set) {
|
||||
|
@ -998,7 +1001,7 @@ void pm_wakep_autosleep_enabled(bool set)
|
|||
}
|
||||
spin_unlock_irq(&ws->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
||||
}
|
||||
#endif /* CONFIG_PM_AUTOSLEEP */
|
||||
|
||||
|
@ -1059,15 +1062,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
|
|||
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct wakeup_source *ws;
|
||||
int srcuidx;
|
||||
|
||||
seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
|
||||
"expire_count\tactive_since\ttotal_time\tmax_time\t"
|
||||
"last_change\tprevent_suspend_time\n");
|
||||
|
||||
rcu_read_lock();
|
||||
srcuidx = srcu_read_lock(&wakeup_srcu);
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
||||
print_wakeup_source_stats(m, ws);
|
||||
rcu_read_unlock();
|
||||
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
||||
|
||||
print_wakeup_source_stats(m, &deleted_ws);
|
||||
|
||||
|
|
|
@ -1000,7 +1000,9 @@ static int atmel_sha_finup(struct ahash_request *req)
|
|||
ctx->flags |= SHA_FLAGS_FINUP;
|
||||
|
||||
err1 = atmel_sha_update(req);
|
||||
if (err1 == -EINPROGRESS || err1 == -EBUSY)
|
||||
if (err1 == -EINPROGRESS ||
|
||||
(err1 == -EBUSY && (ahash_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
return err1;
|
||||
|
||||
/*
|
||||
|
|
|
@ -2014,10 +2014,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
{
|
||||
struct ablkcipher_request *req = context;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
#ifdef DEBUG
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
|
||||
|
@ -2037,6 +2037,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
#endif
|
||||
|
||||
ablkcipher_unmap(jrdev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->info) to the last
|
||||
* ciphertext block. This is used e.g. by the CTS mode.
|
||||
*/
|
||||
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
|
||||
ivsize, 0);
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
ablkcipher_request_complete(req, err);
|
||||
|
@ -2047,10 +2055,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
{
|
||||
struct ablkcipher_request *req = context;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
#ifdef DEBUG
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
|
||||
|
@ -2069,6 +2077,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
#endif
|
||||
|
||||
ablkcipher_unmap(jrdev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->info) to the last
|
||||
* ciphertext block.
|
||||
*/
|
||||
scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
|
||||
ivsize, 0);
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
ablkcipher_request_complete(req, err);
|
||||
|
|
|
@ -491,7 +491,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
|||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||
if (!ret) {
|
||||
/* in progress */
|
||||
wait_for_completion_interruptible(&result.completion);
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
|
|
|
@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||
if (!ret) {
|
||||
/* in progress */
|
||||
wait_for_completion_interruptible(&result.completion);
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
|
|
|
@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev)
|
|||
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
|
||||
*/
|
||||
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
|
||||
#define TALITOS_MAX_KEY_SIZE 96
|
||||
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
|
||||
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
|
||||
|
||||
struct talitos_ctx {
|
||||
|
@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
|||
{
|
||||
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
|
||||
if (keylen > TALITOS_MAX_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(&ctx->key, key, keylen);
|
||||
ctx->keylen = keylen;
|
||||
|
||||
|
|
|
@ -646,6 +646,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||
int enabled;
|
||||
u64 val;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
if (gic_irq_in_rdist(d))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
|
|||
/* Wait for 100ms as Octeon resets. */
|
||||
mdelay(100);
|
||||
|
||||
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
|
||||
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
|
||||
dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
|
||||
oct->octeon_id);
|
||||
return 1;
|
||||
|
|
|
@ -48,7 +48,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct)
|
|||
/* Wait for 10ms as Octeon resets. */
|
||||
mdelay(100);
|
||||
|
||||
if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
|
||||
if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
|
||||
dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -2671,8 +2671,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|||
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
|
||||
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
|
||||
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
|
||||
stats->tx_carrier_errors =
|
||||
PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
|
||||
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
|
||||
stats->rx_frame_errors;
|
||||
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
|
||||
|
|
|
@ -67,6 +67,7 @@ enum {
|
|||
|
||||
enum {
|
||||
MLX5_DROP_NEW_HEALTH_WORK,
|
||||
MLX5_DROP_NEW_RECOVERY_WORK,
|
||||
};
|
||||
|
||||
static u8 get_nic_state(struct mlx5_core_dev *dev)
|
||||
|
@ -193,7 +194,7 @@ static void health_care(struct work_struct *work)
|
|||
mlx5_handle_bad_state(dev);
|
||||
|
||||
spin_lock(&health->wq_lock);
|
||||
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
|
||||
if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
|
||||
schedule_delayed_work(&health->recover_work, recover_delay);
|
||||
else
|
||||
dev_err(&dev->pdev->dev,
|
||||
|
@ -328,6 +329,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
|
|||
init_timer(&health->timer);
|
||||
health->sick = 0;
|
||||
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
|
||||
clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
|
||||
health->health = &dev->iseg->health;
|
||||
health->health_counter = &dev->iseg->health_counter;
|
||||
|
||||
|
@ -350,11 +352,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
|
|||
|
||||
spin_lock(&health->wq_lock);
|
||||
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
|
||||
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
|
||||
spin_unlock(&health->wq_lock);
|
||||
cancel_delayed_work_sync(&health->recover_work);
|
||||
cancel_work_sync(&health->work);
|
||||
}
|
||||
|
||||
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
||||
spin_lock(&health->wq_lock);
|
||||
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
|
||||
spin_unlock(&health->wq_lock);
|
||||
cancel_delayed_work_sync(&dev->priv.health.recover_work);
|
||||
}
|
||||
|
||||
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
|
|
@ -1169,7 +1169,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
int err = 0;
|
||||
|
||||
if (cleanup)
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_drain_health_recovery(dev);
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
|
||||
|
|
|
@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
|
|||
*index = entry->index;
|
||||
resolved = false;
|
||||
} else if (removing) {
|
||||
ofdpa_neigh_del(trans, found);
|
||||
*index = found->index;
|
||||
ofdpa_neigh_del(trans, found);
|
||||
} else if (updating) {
|
||||
ofdpa_neigh_update(found, trans, NULL, false);
|
||||
resolved = !is_zero_ether_addr(found->eth_dst);
|
||||
|
|
|
@ -4399,12 +4399,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
|
|||
struct efx_ef10_filter_table *table = efx->filter_state;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
struct netdev_hw_addr *uc;
|
||||
int addr_count;
|
||||
unsigned int i;
|
||||
|
||||
addr_count = netdev_uc_count(net_dev);
|
||||
table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
|
||||
table->dev_uc_count = 1 + addr_count;
|
||||
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
|
||||
i = 1;
|
||||
netdev_for_each_uc_addr(uc, net_dev) {
|
||||
|
@ -4415,6 +4412,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
|
|||
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
|
||||
i++;
|
||||
}
|
||||
|
||||
table->dev_uc_count = i;
|
||||
}
|
||||
|
||||
static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
|
||||
|
@ -4422,11 +4421,10 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
|
|||
struct efx_ef10_filter_table *table = efx->filter_state;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
struct netdev_hw_addr *mc;
|
||||
unsigned int i, addr_count;
|
||||
unsigned int i;
|
||||
|
||||
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
|
||||
|
||||
addr_count = netdev_mc_count(net_dev);
|
||||
i = 0;
|
||||
netdev_for_each_mc_addr(mc, net_dev) {
|
||||
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
|
||||
|
|
|
@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
|
|||
if (overflow) {
|
||||
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
|
||||
while (skb) {
|
||||
skb_complete_tx_timestamp(skb, NULL);
|
||||
kfree_skb(skb);
|
||||
skb = skb_dequeue(&dp83640->tx_queue);
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -622,6 +622,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
|
|||
if ((regval & 0xFF) == 0xFF) {
|
||||
phy_init_hw(phydev);
|
||||
phydev->link = 0;
|
||||
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
|
||||
phydev->drv->config_intr(phydev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -787,15 +787,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
|
|||
static void vrf_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct net_vrf *vrf = netdev_priv(dev);
|
||||
struct net_device *port_dev;
|
||||
struct list_head *iter;
|
||||
|
||||
vrf_rtable_release(dev, vrf);
|
||||
vrf_rt6_release(dev, vrf);
|
||||
|
||||
netdev_for_each_lower_dev(dev, port_dev, iter)
|
||||
vrf_del_slave(dev, port_dev);
|
||||
|
||||
free_percpu(dev->dstats);
|
||||
dev->dstats = NULL;
|
||||
}
|
||||
|
@ -1232,6 +1227,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
|
|||
|
||||
static void vrf_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct net_device *port_dev;
|
||||
struct list_head *iter;
|
||||
|
||||
netdev_for_each_lower_dev(dev, port_dev, iter)
|
||||
vrf_del_slave(dev, port_dev);
|
||||
|
||||
unregister_netdevice_queue(dev, head);
|
||||
}
|
||||
|
||||
|
|
|
@ -227,15 +227,15 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
|
|||
|
||||
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
|
||||
{
|
||||
struct vxlan_dev *vxlan;
|
||||
struct vxlan_dev_node *node;
|
||||
|
||||
/* For flow based devices, map all packets to VNI 0 */
|
||||
if (vs->flags & VXLAN_F_COLLECT_METADATA)
|
||||
vni = 0;
|
||||
|
||||
hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
|
||||
if (vxlan->default_dst.remote_vni == vni)
|
||||
return vxlan;
|
||||
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
|
||||
if (node->vxlan->default_dst.remote_vni == vni)
|
||||
return node->vxlan;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -2309,17 +2309,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
|
|||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_del_init_rcu(&vxlan->hlist);
|
||||
hlist_del_init_rcu(&vxlan->hlist4.hlist);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
hlist_del_init_rcu(&vxlan->hlist6.hlist);
|
||||
#endif
|
||||
spin_unlock(&vn->sock_lock);
|
||||
}
|
||||
|
||||
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
|
||||
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
|
||||
struct vxlan_dev_node *node)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
__be32 vni = vxlan->default_dst.remote_vni;
|
||||
|
||||
node->vxlan = vxlan;
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
|
||||
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
|
||||
spin_unlock(&vn->sock_lock);
|
||||
}
|
||||
|
||||
|
@ -2778,6 +2783,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
|
|||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
struct vxlan_sock *vs = NULL;
|
||||
struct vxlan_dev_node *node;
|
||||
|
||||
if (!vxlan->cfg.no_share) {
|
||||
spin_lock(&vn->sock_lock);
|
||||
|
@ -2795,12 +2801,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
|
|||
if (IS_ERR(vs))
|
||||
return PTR_ERR(vs);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (ipv6)
|
||||
if (ipv6) {
|
||||
rcu_assign_pointer(vxlan->vn6_sock, vs);
|
||||
else
|
||||
node = &vxlan->hlist6;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
rcu_assign_pointer(vxlan->vn4_sock, vs);
|
||||
vxlan_vs_add_dev(vs, vxlan);
|
||||
node = &vxlan->hlist4;
|
||||
}
|
||||
vxlan_vs_add_dev(vs, vxlan, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -705,7 +705,7 @@ done:
|
|||
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
||||
struct sk_buff_head *pktq, uint totlen)
|
||||
{
|
||||
struct sk_buff *glom_skb;
|
||||
struct sk_buff *glom_skb = NULL;
|
||||
struct sk_buff *skb;
|
||||
u32 addr = sdiodev->sbwad;
|
||||
int err = 0;
|
||||
|
@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
|||
return -ENOMEM;
|
||||
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
|
||||
glom_skb);
|
||||
if (err) {
|
||||
brcmu_pkt_buf_free_skb(glom_skb);
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
skb_queue_walk(pktq, skb) {
|
||||
memcpy(skb->data, glom_skb->data, skb->len);
|
||||
|
@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
|||
pktq);
|
||||
|
||||
done:
|
||||
brcmu_pkt_buf_free_skb(glom_skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -4928,6 +4928,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
|||
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
|
||||
GFP_KERNEL);
|
||||
} else if (ieee80211_is_action(mgmt->frame_control)) {
|
||||
if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
|
||||
brcmf_err("invalid action frame length\n");
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
|
||||
if (af_params == NULL) {
|
||||
brcmf_err("unable to allocate frame\n");
|
||||
|
@ -6871,7 +6876,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
|
|||
wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
|
||||
if (!wiphy) {
|
||||
brcmf_err("Could not allocate wiphy device\n");
|
||||
return NULL;
|
||||
goto ops_out;
|
||||
}
|
||||
memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
|
||||
set_wiphy_dev(wiphy, busdev);
|
||||
|
@ -7005,6 +7010,7 @@ priv_out:
|
|||
ifp->vif = NULL;
|
||||
wiphy_out:
|
||||
brcmf_free_wiphy(wiphy);
|
||||
ops_out:
|
||||
kfree(ops);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
{
|
||||
RING_IDX req_prod = queue->rx.req_prod_pvt;
|
||||
int notify;
|
||||
int err = 0;
|
||||
|
||||
if (unlikely(!netif_carrier_ok(queue->info->netdev)))
|
||||
return;
|
||||
|
@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
struct xen_netif_rx_request *req;
|
||||
|
||||
skb = xennet_alloc_one_rx_buffer(queue);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
id = xennet_rxidx(req_prod);
|
||||
|
||||
|
@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
|
||||
queue->rx.req_prod_pvt = req_prod;
|
||||
|
||||
/* Not enough requests? Try again later. */
|
||||
if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
|
||||
/* Try again later if there are not enough requests or skb allocation
|
||||
* failed.
|
||||
* Enough requests is quantified as the sum of newly created slots and
|
||||
* the unconsumed slots at the backend.
|
||||
*/
|
||||
if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
|
||||
unlikely(err)) {
|
||||
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -488,21 +488,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
|||
|
||||
rval = device_add(&nvmem->dev);
|
||||
if (rval)
|
||||
goto out;
|
||||
goto err_put_device;
|
||||
|
||||
if (config->compat) {
|
||||
rval = nvmem_setup_compat(nvmem, config);
|
||||
if (rval)
|
||||
goto out;
|
||||
goto err_device_del;
|
||||
}
|
||||
|
||||
if (config->cells)
|
||||
nvmem_add_cells(nvmem, config);
|
||||
|
||||
return nvmem;
|
||||
out:
|
||||
ida_simple_remove(&nvmem_ida, nvmem->id);
|
||||
kfree(nvmem);
|
||||
|
||||
err_device_del:
|
||||
device_del(&nvmem->dev);
|
||||
err_put_device:
|
||||
put_device(&nvmem->dev);
|
||||
|
||||
return ERR_PTR(rval);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmem_register);
|
||||
|
|
|
@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
|||
|
||||
BUG_ON(!dev);
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
BUG_ON(size <= 0);
|
||||
|
||||
|
@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
|||
|
||||
BUG_ON(!dev);
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc) {
|
||||
WARN_ON(!ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
DBG_RUN("%s() iovp 0x%lx/%x\n",
|
||||
__func__, (long)iova, size);
|
||||
|
@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||
|
||||
BUG_ON(!dev);
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return 0;
|
||||
|
||||
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
|
||||
|
||||
|
@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||
|
||||
BUG_ON(!dev);
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc) {
|
||||
WARN_ON(!ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
|
||||
__func__, nents, sg_virt(sglist), sglist->length);
|
||||
|
|
|
@ -154,7 +154,10 @@ struct dino_device
|
|||
};
|
||||
|
||||
/* Looks nice and keeps the compiler happy */
|
||||
#define DINO_DEV(d) ((struct dino_device *) d)
|
||||
#define DINO_DEV(d) ({ \
|
||||
void *__pdata = d; \
|
||||
BUG_ON(!__pdata); \
|
||||
(struct dino_device *)__pdata; })
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -111,8 +111,10 @@ static u32 lba_t32;
|
|||
|
||||
|
||||
/* Looks nice and keeps the compiler happy */
|
||||
#define LBA_DEV(d) ((struct lba_device *) (d))
|
||||
|
||||
#define LBA_DEV(d) ({ \
|
||||
void *__pdata = d; \
|
||||
BUG_ON(!__pdata); \
|
||||
(struct lba_device *)__pdata; })
|
||||
|
||||
/*
|
||||
** Only allow 8 subsidiary busses per LBA
|
||||
|
|
|
@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
|
|||
return 0;
|
||||
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* check if mask is >= than the current max IO Virt Address
|
||||
|
@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
|||
int pide;
|
||||
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
/* save offset bits */
|
||||
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
|
||||
|
@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
|||
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
|
||||
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc) {
|
||||
WARN_ON(!ioc);
|
||||
return;
|
||||
}
|
||||
offset = iova & ~IOVP_MASK;
|
||||
iova ^= offset; /* clear offset bits */
|
||||
size += offset;
|
||||
|
@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
|
||||
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return 0;
|
||||
|
||||
/* Fast path single entry scatterlists. */
|
||||
if (nents == 1) {
|
||||
|
@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||
__func__, nents, sg_virt(sglist), sglist->length);
|
||||
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc) {
|
||||
WARN_ON(!ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef SBA_COLLECT_STATS
|
||||
ioc->usg_calls++;
|
||||
|
|
|
@ -2711,13 +2711,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
|
|||
* related to the kernel should not use this.
|
||||
*/
|
||||
data = vt_get_shift_state();
|
||||
ret = __put_user(data, p);
|
||||
ret = put_user(data, p);
|
||||
break;
|
||||
case TIOCL_GETMOUSEREPORTING:
|
||||
console_lock(); /* May be overkill */
|
||||
data = mouse_reporting();
|
||||
console_unlock();
|
||||
ret = __put_user(data, p);
|
||||
ret = put_user(data, p);
|
||||
break;
|
||||
case TIOCL_SETVESABLANK:
|
||||
console_lock();
|
||||
|
@ -2726,7 +2726,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
|
|||
break;
|
||||
case TIOCL_GETKMSGREDIRECT:
|
||||
data = vt_get_kmsg_redirect();
|
||||
ret = __put_user(data, p);
|
||||
ret = put_user(data, p);
|
||||
break;
|
||||
case TIOCL_SETKMSGREDIRECT:
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
|
|
|
@ -911,17 +911,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
|
||||
|
||||
vaddr = elf_ppnt->p_vaddr;
|
||||
/*
|
||||
* If we are loading ET_EXEC or we have already performed
|
||||
* the ET_DYN load_addr calculations, proceed normally.
|
||||
*/
|
||||
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
|
||||
elf_flags |= MAP_FIXED;
|
||||
} else if (loc->elf_ex.e_type == ET_DYN) {
|
||||
/* Try and get dynamic programs out of the way of the
|
||||
* default mmap base, as well as whatever program they
|
||||
* might try to exec. This is because the brk will
|
||||
* follow the loader, and is not movable. */
|
||||
load_bias = ELF_ET_DYN_BASE - vaddr;
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
load_bias += arch_mmap_rnd();
|
||||
load_bias = ELF_PAGESTART(load_bias);
|
||||
/*
|
||||
* This logic is run once for the first LOAD Program
|
||||
* Header for ET_DYN binaries to calculate the
|
||||
* randomization (load_bias) for all the LOAD
|
||||
* Program Headers, and to calculate the entire
|
||||
* size of the ELF mapping (total_size). (Note that
|
||||
* load_addr_set is set to true later once the
|
||||
* initial mapping is performed.)
|
||||
*
|
||||
* There are effectively two types of ET_DYN
|
||||
* binaries: programs (i.e. PIE: ET_DYN with INTERP)
|
||||
* and loaders (ET_DYN without INTERP, since they
|
||||
* _are_ the ELF interpreter). The loaders must
|
||||
* be loaded away from programs since the program
|
||||
* may otherwise collide with the loader (especially
|
||||
* for ET_EXEC which does not have a randomized
|
||||
* position). For example to handle invocations of
|
||||
* "./ld.so someprog" to test out a new version of
|
||||
* the loader, the subsequent program that the
|
||||
* loader loads must avoid the loader itself, so
|
||||
* they cannot share the same load range. Sufficient
|
||||
* room for the brk must be allocated with the
|
||||
* loader as well, since brk must be available with
|
||||
* the loader.
|
||||
*
|
||||
* Therefore, programs are loaded offset from
|
||||
* ELF_ET_DYN_BASE and loaders are loaded into the
|
||||
* independently randomized mmap region (0 load_bias
|
||||
* without MAP_FIXED).
|
||||
*/
|
||||
if (elf_interpreter) {
|
||||
load_bias = ELF_ET_DYN_BASE;
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
load_bias += arch_mmap_rnd();
|
||||
elf_flags |= MAP_FIXED;
|
||||
} else
|
||||
load_bias = 0;
|
||||
|
||||
/*
|
||||
* Since load_bias is used for all subsequent loading
|
||||
* calculations, we must lower it by the first vaddr
|
||||
* so that the remaining calculations based on the
|
||||
* ELF vaddrs will be correctly offset. The result
|
||||
* is then page aligned.
|
||||
*/
|
||||
load_bias = ELF_PAGESTART(load_bias - vaddr);
|
||||
|
||||
total_size = total_mapping_size(elf_phdata,
|
||||
loc->elf_ex.e_phnum);
|
||||
if (!total_size) {
|
||||
|
|
|
@ -1133,11 +1133,12 @@ void shrink_dcache_sb(struct super_block *sb)
|
|||
LIST_HEAD(dispose);
|
||||
|
||||
freed = list_lru_walk(&sb->s_dentry_lru,
|
||||
dentry_lru_isolate_shrink, &dispose, UINT_MAX);
|
||||
dentry_lru_isolate_shrink, &dispose, 1024);
|
||||
|
||||
this_cpu_sub(nr_dentry_unused, freed);
|
||||
shrink_dentry_list(&dispose);
|
||||
} while (freed > 0);
|
||||
cond_resched();
|
||||
} while (list_lru_count(&sb->s_dentry_lru) > 0);
|
||||
}
|
||||
EXPORT_SYMBOL(shrink_dcache_sb);
|
||||
|
||||
|
|
11
fs/exec.c
11
fs/exec.c
|
@ -215,8 +215,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|||
|
||||
if (write) {
|
||||
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
|
||||
unsigned long ptr_size;
|
||||
struct rlimit *rlim;
|
||||
unsigned long ptr_size, limit;
|
||||
|
||||
/*
|
||||
* Since the stack will hold pointers to the strings, we
|
||||
|
@ -245,14 +244,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|||
return page;
|
||||
|
||||
/*
|
||||
* Limit to 1/4-th the stack size for the argv+env strings.
|
||||
* Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
|
||||
* (whichever is smaller) for the argv+env strings.
|
||||
* This ensures that:
|
||||
* - the remaining binfmt code will not run out of stack space,
|
||||
* - the program will have a reasonable amount of stack left
|
||||
* to work from.
|
||||
*/
|
||||
rlim = current->signal->rlim;
|
||||
if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
|
||||
limit = _STK_LIM / 4 * 3;
|
||||
limit = min(limit, rlimit(RLIMIT_STACK) / 4);
|
||||
if (size > limit)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ struct mount {
|
|||
struct mnt_namespace *mnt_ns; /* containing namespace */
|
||||
struct mountpoint *mnt_mp; /* where is it mounted */
|
||||
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
|
||||
struct list_head mnt_umounting; /* list entry for umount propagation */
|
||||
#ifdef CONFIG_FSNOTIFY
|
||||
struct hlist_head mnt_fsnotify_marks;
|
||||
__u32 mnt_fsnotify_mask;
|
||||
|
|
|
@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
|
|||
INIT_LIST_HEAD(&mnt->mnt_slave_list);
|
||||
INIT_LIST_HEAD(&mnt->mnt_slave);
|
||||
INIT_HLIST_NODE(&mnt->mnt_mp_list);
|
||||
INIT_LIST_HEAD(&mnt->mnt_umounting);
|
||||
#ifdef CONFIG_FSNOTIFY
|
||||
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
|
||||
#endif
|
||||
|
|
212
fs/pnode.c
212
fs/pnode.c
|
@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
|
|||
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
|
||||
}
|
||||
|
||||
static inline struct mount *last_slave(struct mount *p)
|
||||
{
|
||||
return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
|
||||
}
|
||||
|
||||
static inline struct mount *next_slave(struct mount *p)
|
||||
{
|
||||
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
|
||||
|
@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
|
|||
}
|
||||
}
|
||||
|
||||
static struct mount *skip_propagation_subtree(struct mount *m,
|
||||
struct mount *origin)
|
||||
{
|
||||
/*
|
||||
* Advance m such that propagation_next will not return
|
||||
* the slaves of m.
|
||||
*/
|
||||
if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
|
||||
m = last_slave(m);
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
static struct mount *next_group(struct mount *m, struct mount *origin)
|
||||
{
|
||||
while (1) {
|
||||
|
@ -415,68 +433,107 @@ void propagate_mount_unlock(struct mount *mnt)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
|
||||
*/
|
||||
static void mark_umount_candidates(struct mount *mnt)
|
||||
static void umount_one(struct mount *mnt, struct list_head *to_umount)
|
||||
{
|
||||
struct mount *parent = mnt->mnt_parent;
|
||||
struct mount *m;
|
||||
|
||||
BUG_ON(parent == mnt);
|
||||
|
||||
for (m = propagation_next(parent, parent); m;
|
||||
m = propagation_next(m, parent)) {
|
||||
struct mount *child = __lookup_mnt(&m->mnt,
|
||||
mnt->mnt_mountpoint);
|
||||
if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
|
||||
continue;
|
||||
if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
|
||||
SET_MNT_MARK(child);
|
||||
}
|
||||
}
|
||||
CLEAR_MNT_MARK(mnt);
|
||||
mnt->mnt.mnt_flags |= MNT_UMOUNT;
|
||||
list_del_init(&mnt->mnt_child);
|
||||
list_del_init(&mnt->mnt_umounting);
|
||||
list_move_tail(&mnt->mnt_list, to_umount);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
|
||||
* parent propagates to.
|
||||
*/
|
||||
static void __propagate_umount(struct mount *mnt)
|
||||
static bool __propagate_umount(struct mount *mnt,
|
||||
struct list_head *to_umount,
|
||||
struct list_head *to_restore)
|
||||
{
|
||||
struct mount *parent = mnt->mnt_parent;
|
||||
struct mount *m;
|
||||
bool progress = false;
|
||||
struct mount *child;
|
||||
|
||||
BUG_ON(parent == mnt);
|
||||
/*
|
||||
* The state of the parent won't change if this mount is
|
||||
* already unmounted or marked as without children.
|
||||
*/
|
||||
if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
|
||||
goto out;
|
||||
|
||||
for (m = propagation_next(parent, parent); m;
|
||||
m = propagation_next(m, parent)) {
|
||||
struct mount *topper;
|
||||
struct mount *child = __lookup_mnt(&m->mnt,
|
||||
mnt->mnt_mountpoint);
|
||||
/*
|
||||
* umount the child only if the child has no children
|
||||
* and the child is marked safe to unmount.
|
||||
*/
|
||||
if (!child || !IS_MNT_MARKED(child))
|
||||
/* Verify topper is the only grandchild that has not been
|
||||
* speculatively unmounted.
|
||||
*/
|
||||
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
|
||||
if (child->mnt_mountpoint == mnt->mnt.mnt_root)
|
||||
continue;
|
||||
CLEAR_MNT_MARK(child);
|
||||
if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
|
||||
continue;
|
||||
/* Found a mounted child */
|
||||
goto children;
|
||||
}
|
||||
|
||||
/* If there is exactly one mount covering all of child
|
||||
* replace child with that mount.
|
||||
*/
|
||||
topper = find_topper(child);
|
||||
if (topper)
|
||||
mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
|
||||
topper);
|
||||
/* Mark mounts that can be unmounted if not locked */
|
||||
SET_MNT_MARK(mnt);
|
||||
progress = true;
|
||||
|
||||
if (list_empty(&child->mnt_mounts)) {
|
||||
list_del_init(&child->mnt_child);
|
||||
child->mnt.mnt_flags |= MNT_UMOUNT;
|
||||
list_move_tail(&child->mnt_list, &mnt->mnt_list);
|
||||
/* If a mount is without children and not locked umount it. */
|
||||
if (!IS_MNT_LOCKED(mnt)) {
|
||||
umount_one(mnt, to_umount);
|
||||
} else {
|
||||
children:
|
||||
list_move_tail(&mnt->mnt_umounting, to_restore);
|
||||
}
|
||||
out:
|
||||
return progress;
|
||||
}
|
||||
|
||||
static void umount_list(struct list_head *to_umount,
|
||||
struct list_head *to_restore)
|
||||
{
|
||||
struct mount *mnt, *child, *tmp;
|
||||
list_for_each_entry(mnt, to_umount, mnt_list) {
|
||||
list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
|
||||
/* topper? */
|
||||
if (child->mnt_mountpoint == mnt->mnt.mnt_root)
|
||||
list_move_tail(&child->mnt_umounting, to_restore);
|
||||
else
|
||||
umount_one(child, to_umount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void restore_mounts(struct list_head *to_restore)
|
||||
{
|
||||
/* Restore mounts to a clean working state */
|
||||
while (!list_empty(to_restore)) {
|
||||
struct mount *mnt, *parent;
|
||||
struct mountpoint *mp;
|
||||
|
||||
mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
|
||||
CLEAR_MNT_MARK(mnt);
|
||||
list_del_init(&mnt->mnt_umounting);
|
||||
|
||||
/* Should this mount be reparented? */
|
||||
mp = mnt->mnt_mp;
|
||||
parent = mnt->mnt_parent;
|
||||
while (parent->mnt.mnt_flags & MNT_UMOUNT) {
|
||||
mp = parent->mnt_mp;
|
||||
parent = parent->mnt_parent;
|
||||
}
|
||||
if (parent != mnt->mnt_parent)
|
||||
mnt_change_mountpoint(parent, mp, mnt);
|
||||
}
|
||||
}
|
||||
|
||||
static void cleanup_umount_visitations(struct list_head *visited)
|
||||
{
|
||||
while (!list_empty(visited)) {
|
||||
struct mount *mnt =
|
||||
list_first_entry(visited, struct mount, mnt_umounting);
|
||||
list_del_init(&mnt->mnt_umounting);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* collect all mounts that receive propagation from the mount in @list,
|
||||
* and return these additional mounts in the same list.
|
||||
|
@ -487,12 +544,69 @@ static void __propagate_umount(struct mount *mnt)
|
|||
int propagate_umount(struct list_head *list)
|
||||
{
|
||||
struct mount *mnt;
|
||||
LIST_HEAD(to_restore);
|
||||
LIST_HEAD(to_umount);
|
||||
LIST_HEAD(visited);
|
||||
|
||||
list_for_each_entry_reverse(mnt, list, mnt_list)
|
||||
mark_umount_candidates(mnt);
|
||||
/* Find candidates for unmounting */
|
||||
list_for_each_entry_reverse(mnt, list, mnt_list) {
|
||||
struct mount *parent = mnt->mnt_parent;
|
||||
struct mount *m;
|
||||
|
||||
/*
|
||||
* If this mount has already been visited it is known that it's
|
||||
* entire peer group and all of their slaves in the propagation
|
||||
* tree for the mountpoint has already been visited and there is
|
||||
* no need to visit them again.
|
||||
*/
|
||||
if (!list_empty(&mnt->mnt_umounting))
|
||||
continue;
|
||||
|
||||
list_add_tail(&mnt->mnt_umounting, &visited);
|
||||
for (m = propagation_next(parent, parent); m;
|
||||
m = propagation_next(m, parent)) {
|
||||
struct mount *child = __lookup_mnt(&m->mnt,
|
||||
mnt->mnt_mountpoint);
|
||||
if (!child)
|
||||
continue;
|
||||
|
||||
if (!list_empty(&child->mnt_umounting)) {
|
||||
/*
|
||||
* If the child has already been visited it is
|
||||
* know that it's entire peer group and all of
|
||||
* their slaves in the propgation tree for the
|
||||
* mountpoint has already been visited and there
|
||||
* is no need to visit this subtree again.
|
||||
*/
|
||||
m = skip_propagation_subtree(m, parent);
|
||||
continue;
|
||||
} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
|
||||
/*
|
||||
* We have come accross an partially unmounted
|
||||
* mount in list that has not been visited yet.
|
||||
* Remember it has been visited and continue
|
||||
* about our merry way.
|
||||
*/
|
||||
list_add_tail(&child->mnt_umounting, &visited);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check the child and parents while progress is made */
|
||||
while (__propagate_umount(child,
|
||||
&to_umount, &to_restore)) {
|
||||
/* Is the parent a umount candidate? */
|
||||
child = child->mnt_parent;
|
||||
if (list_empty(&child->mnt_umounting))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
umount_list(&to_umount, &to_restore);
|
||||
restore_mounts(&to_restore);
|
||||
cleanup_umount_visitations(&visited);
|
||||
list_splice_tail(&to_umount, list);
|
||||
|
||||
list_for_each_entry(mnt, list, mnt_list)
|
||||
__propagate_umount(mnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
|
|||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
|
||||
/**
|
||||
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
* @mask: the cpumask poiter
|
||||
* @start: the start location
|
||||
*
|
||||
* The implementation does not assume any bit in @mask is set (including @start).
|
||||
*
|
||||
* After the loop, cpu is >= nr_cpu_ids.
|
||||
*/
|
||||
#define for_each_cpu_wrap(cpu, mask, start) \
|
||||
for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
|
||||
(cpu) < nr_cpumask_bits; \
|
||||
(cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
|
||||
|
||||
/**
|
||||
* for_each_cpu_and - iterate over every cpu in both masks
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
|
|
|
@ -44,6 +44,7 @@ struct list_lru_node {
|
|||
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
|
||||
struct list_lru_memcg *memcg_lrus;
|
||||
#endif
|
||||
long nr_items;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct list_lru {
|
||||
|
|
|
@ -788,6 +788,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev);
|
|||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||
struct mlx5_buf *buf, int node);
|
||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
||||
|
|
|
@ -22,6 +22,7 @@ struct route_info {
|
|||
#include <net/flow.h>
|
||||
#include <net/ip6_fib.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/lwtunnel.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/route.h>
|
||||
|
@ -233,4 +234,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
|
|||
return daddr;
|
||||
}
|
||||
|
||||
static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
|
||||
{
|
||||
return a->dst.dev == b->dst.dev &&
|
||||
a->rt6i_idev == b->rt6i_idev &&
|
||||
ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
|
||||
!lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -221,9 +221,17 @@ struct vxlan_config {
|
|||
bool no_share;
|
||||
};
|
||||
|
||||
struct vxlan_dev_node {
|
||||
struct hlist_node hlist;
|
||||
struct vxlan_dev *vxlan;
|
||||
};
|
||||
|
||||
/* Pseudo network device */
|
||||
struct vxlan_dev {
|
||||
struct hlist_node hlist; /* vni hash table */
|
||||
struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
|
||||
#endif
|
||||
struct list_head next; /* vxlan's per namespace list */
|
||||
struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
|
|
@ -885,6 +885,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_pointer_value(env, insn->src_reg)) {
|
||||
verbose("R%d leaks addr into mem\n", insn->src_reg);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check whether atomic_add can read the memory */
|
||||
err = check_mem_access(env, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, -1);
|
||||
|
|
|
@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int core_kernel_text(unsigned long addr)
|
||||
int notrace core_kernel_text(unsigned long addr)
|
||||
{
|
||||
if (addr >= (unsigned long)_stext &&
|
||||
addr < (unsigned long)_etext)
|
||||
|
|
|
@ -1767,6 +1767,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
|
|||
if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
|
||||
/* Prior smp_mb__after_atomic() orders against prior enqueue. */
|
||||
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
||||
smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
|
||||
swake_up(&rdp_leader->nocb_wq);
|
||||
}
|
||||
}
|
||||
|
@ -2021,6 +2022,7 @@ wait_again:
|
|||
* nocb_gp_head, where they await a grace period.
|
||||
*/
|
||||
gotcbs = false;
|
||||
smp_mb(); /* wakeup before ->nocb_head reads. */
|
||||
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
|
||||
rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
|
||||
if (!rdp->nocb_gp_head)
|
||||
|
|
|
@ -6287,6 +6287,9 @@ enum s_alloc {
|
|||
* Build an iteration mask that can exclude certain CPUs from the upwards
|
||||
* domain traversal.
|
||||
*
|
||||
* Only CPUs that can arrive at this group should be considered to continue
|
||||
* balancing.
|
||||
*
|
||||
* Asymmetric node setups can result in situations where the domain tree is of
|
||||
* unequal depth, make sure to skip domains that already cover the entire
|
||||
* range.
|
||||
|
@ -6298,18 +6301,31 @@ enum s_alloc {
|
|||
*/
|
||||
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
|
||||
{
|
||||
const struct cpumask *span = sched_domain_span(sd);
|
||||
const struct cpumask *sg_span = sched_group_cpus(sg);
|
||||
struct sd_data *sdd = sd->private;
|
||||
struct sched_domain *sibling;
|
||||
int i;
|
||||
|
||||
for_each_cpu(i, span) {
|
||||
for_each_cpu(i, sg_span) {
|
||||
sibling = *per_cpu_ptr(sdd->sd, i);
|
||||
if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
|
||||
|
||||
/*
|
||||
* Can happen in the asymmetric case, where these siblings are
|
||||
* unused. The mask will not be empty because those CPUs that
|
||||
* do have the top domain _should_ span the domain.
|
||||
*/
|
||||
if (!sibling->child)
|
||||
continue;
|
||||
|
||||
/* If we would not end up here, we can't continue from here */
|
||||
if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
|
||||
continue;
|
||||
|
||||
cpumask_set_cpu(i, sched_group_mask(sg));
|
||||
}
|
||||
|
||||
/* We must not have empty masks here */
|
||||
WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6333,7 +6349,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|||
|
||||
cpumask_clear(covered);
|
||||
|
||||
for_each_cpu(i, span) {
|
||||
for_each_cpu_wrap(i, span, cpu) {
|
||||
struct cpumask *sg_span;
|
||||
|
||||
if (cpumask_test_cpu(i, covered))
|
||||
|
@ -7666,22 +7682,6 @@ int sched_cpu_dying(unsigned int cpu)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
|
||||
|
||||
static void sched_init_smt(void)
|
||||
{
|
||||
/*
|
||||
* We've enumerated all CPUs and will assume that if any CPU
|
||||
* has SMT siblings, CPU0 will too.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(0)) > 1)
|
||||
static_branch_enable(&sched_smt_present);
|
||||
}
|
||||
#else
|
||||
static inline void sched_init_smt(void) { }
|
||||
#endif
|
||||
|
||||
void __init sched_init_smp(void)
|
||||
{
|
||||
cpumask_var_t non_isolated_cpus;
|
||||
|
@ -7712,9 +7712,6 @@ void __init sched_init_smp(void)
|
|||
|
||||
init_sched_rt_class();
|
||||
init_sched_dl_class();
|
||||
|
||||
sched_init_smt();
|
||||
|
||||
sched_smp_initialized = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -5997,43 +5997,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|||
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Implement a for_each_cpu() variant that starts the scan at a given cpu
|
||||
* (@start), and wraps around.
|
||||
*
|
||||
* This is used to scan for idle CPUs; such that not all CPUs looking for an
|
||||
* idle CPU find the same CPU. The down-side is that tasks tend to cycle
|
||||
* through the LLC domain.
|
||||
*
|
||||
* Especially tbench is found sensitive to this.
|
||||
*/
|
||||
|
||||
static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
|
||||
{
|
||||
int next;
|
||||
|
||||
again:
|
||||
next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
|
||||
|
||||
if (*wrapped) {
|
||||
if (next >= start)
|
||||
return nr_cpumask_bits;
|
||||
} else {
|
||||
if (next >= nr_cpumask_bits) {
|
||||
*wrapped = 1;
|
||||
n = -1;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
#define for_each_cpu_wrap(cpu, mask, start, wrap) \
|
||||
for ((wrap) = 0, (cpu) = (start)-1; \
|
||||
(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
|
||||
(cpu) < nr_cpumask_bits; )
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
||||
static inline void set_idle_cores(int cpu, int val)
|
||||
|
@ -6063,7 +6026,7 @@ static inline bool test_idle_cores(int cpu, bool def)
|
|||
* Since SMT siblings share all cache levels, inspecting this limited remote
|
||||
* state should be fairly cheap.
|
||||
*/
|
||||
void __update_idle_core(struct rq *rq)
|
||||
void update_idle_core(struct rq *rq)
|
||||
{
|
||||
int core = cpu_of(rq);
|
||||
int cpu;
|
||||
|
@ -6093,17 +6056,14 @@ unlock:
|
|||
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
|
||||
{
|
||||
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
|
||||
int core, cpu, wrap;
|
||||
|
||||
if (!static_branch_likely(&sched_smt_present))
|
||||
return -1;
|
||||
int core, cpu;
|
||||
|
||||
if (!test_idle_cores(target, false))
|
||||
return -1;
|
||||
|
||||
cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
|
||||
|
||||
for_each_cpu_wrap(core, cpus, target, wrap) {
|
||||
for_each_cpu_wrap(core, cpus, target) {
|
||||
bool idle = true;
|
||||
|
||||
for_each_cpu(cpu, cpu_smt_mask(core)) {
|
||||
|
@ -6131,9 +6091,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
|
|||
{
|
||||
int cpu;
|
||||
|
||||
if (!static_branch_likely(&sched_smt_present))
|
||||
return -1;
|
||||
|
||||
for_each_cpu(cpu, cpu_smt_mask(target)) {
|
||||
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
||||
continue;
|
||||
|
@ -6169,7 +6126,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
|||
u64 avg_cost, avg_idle = this_rq()->avg_idle;
|
||||
u64 time, cost;
|
||||
s64 delta;
|
||||
int cpu, wrap;
|
||||
int cpu;
|
||||
|
||||
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
|
||||
if (!this_sd)
|
||||
|
@ -6186,7 +6143,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
|||
|
||||
time = local_clock();
|
||||
|
||||
for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
|
||||
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
|
||||
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
||||
continue;
|
||||
if (idle_cpu(cpu))
|
||||
|
|
|
@ -43,6 +43,12 @@ extern void cpu_load_update_active(struct rq *this_rq);
|
|||
static inline void cpu_load_update_active(struct rq *this_rq) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
extern void update_idle_core(struct rq *rq);
|
||||
#else
|
||||
static inline void update_idle_core(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helpers for converting nanosecond timing to jiffy resolution
|
||||
*/
|
||||
|
@ -779,23 +785,6 @@ static inline int cpu_of(struct rq *rq)
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
||||
extern struct static_key_false sched_smt_present;
|
||||
|
||||
extern void __update_idle_core(struct rq *rq);
|
||||
|
||||
static inline void update_idle_core(struct rq *rq)
|
||||
{
|
||||
if (static_branch_unlikely(&sched_smt_present))
|
||||
__update_idle_core(rq);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void update_idle_core(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
|
||||
|
|
|
@ -1927,7 +1927,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
|||
#endif
|
||||
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
|
||||
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
||||
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
|
||||
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
|
||||
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
|
||||
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
|
||||
}
|
||||
|
|
|
@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
|||
}
|
||||
EXPORT_SYMBOL(cpumask_any_but);
|
||||
|
||||
/**
|
||||
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
|
||||
* @n: the cpu prior to the place to search
|
||||
* @mask: the cpumask pointer
|
||||
* @start: the start point of the iteration
|
||||
* @wrap: assume @n crossing @start terminates the iteration
|
||||
*
|
||||
* Returns >= nr_cpu_ids on completion
|
||||
*
|
||||
* Note: the @wrap argument is required for the start condition when
|
||||
* we cannot assume @start is set in @mask.
|
||||
*/
|
||||
int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||
{
|
||||
int next;
|
||||
|
||||
again:
|
||||
next = cpumask_next(n, mask);
|
||||
|
||||
if (wrap && n < start && next >= start) {
|
||||
return nr_cpumask_bits;
|
||||
|
||||
} else if (next >= nr_cpumask_bits) {
|
||||
wrap = true;
|
||||
n = -1;
|
||||
goto again;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_next_wrap);
|
||||
|
||||
/* These are not inline because of header tangles. */
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/**
|
||||
|
|
|
@ -1373,8 +1373,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
get_page(page);
|
||||
spin_unlock(ptl);
|
||||
split_huge_page(page);
|
||||
put_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
goto out_unlocked;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
|||
l = list_lru_from_kmem(nlru, item);
|
||||
list_add_tail(item, &l->list);
|
||||
l->nr_items++;
|
||||
nlru->nr_items++;
|
||||
spin_unlock(&nlru->lock);
|
||||
return true;
|
||||
}
|
||||
|
@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
|
|||
l = list_lru_from_kmem(nlru, item);
|
||||
list_del_init(item);
|
||||
l->nr_items--;
|
||||
nlru->nr_items--;
|
||||
spin_unlock(&nlru->lock);
|
||||
return true;
|
||||
}
|
||||
|
@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
|
|||
|
||||
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
|
||||
{
|
||||
long count = 0;
|
||||
int memcg_idx;
|
||||
struct list_lru_node *nlru;
|
||||
|
||||
count += __list_lru_count_one(lru, nid, -1);
|
||||
if (list_lru_memcg_aware(lru)) {
|
||||
for_each_memcg_cache_index(memcg_idx)
|
||||
count += __list_lru_count_one(lru, nid, memcg_idx);
|
||||
}
|
||||
return count;
|
||||
nlru = &lru->node[nid];
|
||||
return nlru->nr_items;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(list_lru_count_node);
|
||||
|
||||
|
@ -226,6 +223,7 @@ restart:
|
|||
assert_spin_locked(&nlru->lock);
|
||||
case LRU_REMOVED:
|
||||
isolated++;
|
||||
nlru->nr_items--;
|
||||
/*
|
||||
* If the lru lock has been dropped, our list
|
||||
* traversal is now invalid and so we have to
|
||||
|
|
|
@ -2237,7 +2237,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|||
|
||||
/* Guard against exceeding limits of the address space. */
|
||||
address &= PAGE_MASK;
|
||||
if (address >= TASK_SIZE)
|
||||
if (address >= (TASK_SIZE & PAGE_MASK))
|
||||
return -ENOMEM;
|
||||
address += PAGE_SIZE;
|
||||
|
||||
|
|
|
@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
|
|||
__mdb_entry_to_br_ip(entry, &complete_info->ip);
|
||||
mdb.obj.complete_priv = complete_info;
|
||||
mdb.obj.complete = br_mdb_complete;
|
||||
switchdev_port_obj_add(port_dev, &mdb.obj);
|
||||
if (switchdev_port_obj_add(port_dev, &mdb.obj))
|
||||
kfree(complete_info);
|
||||
}
|
||||
} else if (port_dev && type == RTM_DELMDB) {
|
||||
switchdev_port_obj_del(port_dev, &mdb.obj);
|
||||
|
|
|
@ -4641,6 +4641,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
|
|||
}
|
||||
EXPORT_SYMBOL(gro_find_complete_by_type);
|
||||
|
||||
static void napi_skb_free_stolen_head(struct sk_buff *skb)
|
||||
{
|
||||
skb_dst_drop(skb);
|
||||
kmem_cache_free(skbuff_head_cache, skb);
|
||||
}
|
||||
|
||||
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
|
||||
{
|
||||
switch (ret) {
|
||||
|
@ -4654,12 +4660,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
|
|||
break;
|
||||
|
||||
case GRO_MERGED_FREE:
|
||||
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
|
||||
skb_dst_drop(skb);
|
||||
kmem_cache_free(skbuff_head_cache, skb);
|
||||
} else {
|
||||
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
|
||||
napi_skb_free_stolen_head(skb);
|
||||
else
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
break;
|
||||
|
||||
case GRO_HELD:
|
||||
|
@ -4729,10 +4733,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
|
|||
break;
|
||||
|
||||
case GRO_DROP:
|
||||
case GRO_MERGED_FREE:
|
||||
napi_reuse_skb(napi, skb);
|
||||
break;
|
||||
|
||||
case GRO_MERGED_FREE:
|
||||
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
|
||||
napi_skb_free_stolen_head(skb);
|
||||
else
|
||||
napi_reuse_skb(napi, skb);
|
||||
break;
|
||||
|
||||
case GRO_MERGED:
|
||||
break;
|
||||
}
|
||||
|
@ -7521,7 +7531,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
|
|||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
|
||||
memcpy(stats64, netdev_stats, sizeof(*stats64));
|
||||
memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
|
||||
/* zero out counters that only exist in rtnl_link_stats64 */
|
||||
memset((char *)stats64 + sizeof(*netdev_stats), 0,
|
||||
sizeof(*stats64) - sizeof(*netdev_stats));
|
||||
|
@ -7563,9 +7573,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
|||
} else {
|
||||
netdev_stats_to_stats64(storage, &dev->stats);
|
||||
}
|
||||
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
|
||||
storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
|
||||
storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
|
||||
storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
|
||||
storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
|
||||
storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
|
||||
return storage;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_stats);
|
||||
|
|
|
@ -2300,6 +2300,8 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
tcp_init_send_head(sk);
|
||||
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
|
||||
__sk_dst_reset(sk);
|
||||
dst_release(sk->sk_rx_dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
tcp_saved_syn_free(tp);
|
||||
|
||||
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
|
||||
|
|
|
@ -1879,15 +1879,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
|
|||
if (dad_failed)
|
||||
ifp->flags |= IFA_F_DADFAILED;
|
||||
|
||||
if (ifp->flags&IFA_F_PERMANENT) {
|
||||
spin_lock_bh(&ifp->lock);
|
||||
addrconf_del_dad_work(ifp);
|
||||
ifp->flags |= IFA_F_TENTATIVE;
|
||||
spin_unlock_bh(&ifp->lock);
|
||||
if (dad_failed)
|
||||
ipv6_ifa_notify(0, ifp);
|
||||
in6_ifa_put(ifp);
|
||||
} else if (ifp->flags&IFA_F_TEMPORARY) {
|
||||
if (ifp->flags&IFA_F_TEMPORARY) {
|
||||
struct inet6_ifaddr *ifpub;
|
||||
spin_lock_bh(&ifp->lock);
|
||||
ifpub = ifp->ifpub;
|
||||
|
@ -1900,6 +1892,14 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
|
|||
spin_unlock_bh(&ifp->lock);
|
||||
}
|
||||
ipv6_del_addr(ifp);
|
||||
} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
|
||||
spin_lock_bh(&ifp->lock);
|
||||
addrconf_del_dad_work(ifp);
|
||||
ifp->flags |= IFA_F_TENTATIVE;
|
||||
spin_unlock_bh(&ifp->lock);
|
||||
if (dad_failed)
|
||||
ipv6_ifa_notify(0, ifp);
|
||||
in6_ifa_put(ifp);
|
||||
} else {
|
||||
ipv6_del_addr(ifp);
|
||||
}
|
||||
|
@ -3345,6 +3345,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct netdev_notifier_changeupper_info *info;
|
||||
struct inet6_dev *idev = __in6_dev_get(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
int run_pending = 0;
|
||||
int err;
|
||||
|
||||
|
@ -3360,7 +3361,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
case NETDEV_CHANGEMTU:
|
||||
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
|
||||
if (dev->mtu < IPV6_MIN_MTU) {
|
||||
addrconf_ifdown(dev, 1);
|
||||
addrconf_ifdown(dev, dev != net->loopback_dev);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3476,7 +3477,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
* IPV6_MIN_MTU stop IPv6 on this interface.
|
||||
*/
|
||||
if (dev->mtu < IPV6_MIN_MTU)
|
||||
addrconf_ifdown(dev, 1);
|
||||
addrconf_ifdown(dev, dev != net->loopback_dev);
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -771,10 +771,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
|||
goto next_iter;
|
||||
}
|
||||
|
||||
if (iter->dst.dev == rt->dst.dev &&
|
||||
iter->rt6i_idev == rt->rt6i_idev &&
|
||||
ipv6_addr_equal(&iter->rt6i_gateway,
|
||||
&rt->rt6i_gateway)) {
|
||||
if (rt6_duplicate_nexthop(iter, rt)) {
|
||||
if (rt->rt6i_nsiblings)
|
||||
rt->rt6i_nsiblings = 0;
|
||||
if (!(iter->rt6i_flags & RTF_EXPIRES))
|
||||
|
|
|
@ -2931,17 +2931,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
|
|||
struct rt6_info *rt, struct fib6_config *r_cfg)
|
||||
{
|
||||
struct rt6_nh *nh;
|
||||
struct rt6_info *rtnh;
|
||||
int err = -EEXIST;
|
||||
|
||||
list_for_each_entry(nh, rt6_nh_list, next) {
|
||||
/* check if rt6_info already exists */
|
||||
rtnh = nh->rt6_info;
|
||||
|
||||
if (rtnh->dst.dev == rt->dst.dev &&
|
||||
rtnh->rt6i_idev == rt->rt6i_idev &&
|
||||
ipv6_addr_equal(&rtnh->rt6i_gateway,
|
||||
&rt->rt6i_gateway))
|
||||
if (rt6_duplicate_nexthop(nh->rt6_info, rt))
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ int rds_tcp_accept_one(struct socket *sock)
|
|||
if (!sock) /* module unload or netns delete in progress */
|
||||
return -ENETUNREACH;
|
||||
|
||||
ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
|
||||
ret = sock_create_lite(sock->sk->sk_family,
|
||||
sock->sk->sk_type, sock->sk->sk_protocol,
|
||||
&new_sock);
|
||||
if (ret)
|
||||
|
|
|
@ -1008,6 +1008,9 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
|
|||
|
||||
return sch;
|
||||
}
|
||||
/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
|
||||
if (ops->destroy)
|
||||
ops->destroy(sch);
|
||||
err_out3:
|
||||
dev_put(dev);
|
||||
kfree((char *) sch - sch->padded);
|
||||
|
|
|
@ -627,7 +627,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
|
||||
sizeof(u32));
|
||||
if (!q->hhf_arrays[i]) {
|
||||
hhf_destroy(sch);
|
||||
/* Note: hhf_destroy() will be called
|
||||
* by our caller.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -638,7 +640,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
|
||||
BITS_PER_BYTE);
|
||||
if (!q->hhf_valid_bits[i]) {
|
||||
hhf_destroy(sch);
|
||||
/* Note: hhf_destroy() will be called
|
||||
* by our caller.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
/* pre-allocate qdiscs, attachment can't fail */
|
||||
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
|
||||
GFP_KERNEL);
|
||||
if (priv->qdiscs == NULL)
|
||||
if (!priv->qdiscs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
|
||||
|
@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
|
||||
TC_H_MAKE(TC_H_MAJ(sch->handle),
|
||||
TC_H_MIN(ntx + 1)));
|
||||
if (qdisc == NULL)
|
||||
goto err;
|
||||
if (!qdisc)
|
||||
return -ENOMEM;
|
||||
priv->qdiscs[ntx] = qdisc;
|
||||
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
|
||||
}
|
||||
|
||||
sch->flags |= TCQ_F_MQROOT;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mq_destroy(sch);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void mq_attach(struct Qdisc *sch)
|
||||
|
|
|
@ -118,10 +118,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
/* pre-allocate qdisc, attachment can't fail */
|
||||
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
|
||||
GFP_KERNEL);
|
||||
if (priv->qdiscs == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!priv->qdiscs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
dev_queue = netdev_get_tx_queue(dev, i);
|
||||
|
@ -129,10 +127,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
get_default_qdisc_ops(dev, i),
|
||||
TC_H_MAKE(TC_H_MAJ(sch->handle),
|
||||
TC_H_MIN(i + 1)));
|
||||
if (qdisc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!qdisc)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->qdiscs[i] = qdisc;
|
||||
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
|
||||
}
|
||||
|
@ -148,7 +145,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
priv->hw_owned = 1;
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
} else {
|
||||
netdev_set_num_tc(dev, qopt->num_tc);
|
||||
for (i = 0; i < qopt->num_tc; i++)
|
||||
|
@ -162,10 +159,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
|
||||
sch->flags |= TCQ_F_MQROOT;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mqprio_destroy(sch);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mqprio_attach(struct Qdisc *sch)
|
||||
|
|
|
@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
|
||||
q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
|
||||
if (!q->ht || !q->slots) {
|
||||
sfq_destroy(sch);
|
||||
/* Note: sfq_destroy() will be called by our caller */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < q->divisor; i++)
|
||||
q->ht[i] = SFQ_EMPTY_SLOT;
|
||||
|
||||
|
|
|
@ -305,8 +305,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
|||
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_PID] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
|
||||
[NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
|
||||
.len = WLAN_PMKID_LEN },
|
||||
[NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
|
||||
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
|
||||
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
|
||||
|
@ -362,6 +361,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
|||
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
|
||||
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
|
||||
[NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
|
||||
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
|
||||
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
|
||||
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
|
||||
|
@ -512,7 +512,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
|
|||
static const struct nla_policy
|
||||
nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
|
||||
[NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
|
||||
[NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
|
||||
[NL80211_NAN_FUNC_SERVICE_ID] = {
|
||||
.len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
|
||||
[NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
|
||||
[NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
|
||||
|
@ -6326,6 +6326,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
|
|||
struct nlattr *attr1, *attr2;
|
||||
int n_channels = 0, tmp1, tmp2;
|
||||
|
||||
nla_for_each_nested(attr1, freqs, tmp1)
|
||||
if (nla_len(attr1) != sizeof(u32))
|
||||
return 0;
|
||||
|
||||
nla_for_each_nested(attr1, freqs, tmp1) {
|
||||
n_channels++;
|
||||
/*
|
||||
|
|
|
@ -3500,7 +3500,7 @@ sub process {
|
|||
$fixedline =~ s/\s*=\s*$/ = {/;
|
||||
fix_insert_line($fixlinenr, $fixedline);
|
||||
$fixedline = $line;
|
||||
$fixedline =~ s/^(.\s*){\s*/$1/;
|
||||
$fixedline =~ s/^(.\s*)\{\s*/$1/;
|
||||
fix_insert_line($fixlinenr, $fixedline);
|
||||
}
|
||||
}
|
||||
|
@ -3841,7 +3841,7 @@ sub process {
|
|||
my $fixedline = rtrim($prevrawline) . " {";
|
||||
fix_insert_line($fixlinenr, $fixedline);
|
||||
$fixedline = $rawline;
|
||||
$fixedline =~ s/^(.\s*){\s*/$1\t/;
|
||||
$fixedline =~ s/^(.\s*)\{\s*/$1\t/;
|
||||
if ($fixedline !~ /^\+\s*$/) {
|
||||
fix_insert_line($fixlinenr, $fixedline);
|
||||
}
|
||||
|
@ -4330,7 +4330,7 @@ sub process {
|
|||
if (ERROR("SPACING",
|
||||
"space required before the open brace '{'\n" . $herecurr) &&
|
||||
$fix) {
|
||||
$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
|
||||
$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <linux/utsname.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define MAX_LOCK_DEPTH 2000UL
|
||||
#define MAX_LOCK_DEPTH 63UL
|
||||
|
||||
#define asmlinkage
|
||||
#define __visible
|
||||
|
|
|
@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
|
|||
|
||||
if (chdir(cwd) != 0)
|
||||
err(1, "chdir to private tmpfs");
|
||||
|
||||
if (umount2(".", MNT_DETACH) != 0)
|
||||
err(1, "detach private tmpfs");
|
||||
}
|
||||
|
||||
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
|
||||
|
@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
|
|||
err(1, "chown");
|
||||
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
|
||||
err(1, "chmod");
|
||||
}
|
||||
}
|
||||
|
||||
capng_get_caps_process();
|
||||
|
||||
|
@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
|
|||
} else {
|
||||
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
|
||||
exec_other_validate_cap("./validate_cap_sgidnonroot",
|
||||
false, false, true, false);
|
||||
false, false, true, false);
|
||||
|
||||
if (fork_wait()) {
|
||||
printf("[RUN]\tNon-root +ia, sgidroot => i\n");
|
||||
|
|
Loading…
Reference in New Issue