This is the 4.9.36 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllc3lYACgkQONu9yGCS
 aT7rhBAAoYx4e8Cbp2iUSEOupe4zWD087bWaQELz9EDh5fsr8zgelyzoeK6y0Rxy
 ngwjVViu9gkouqeHY8gLCFcL4Q/2NFMCGmZ+x7rDV1+d7PWrxCtDw9xnlwIzHtlb
 K9KeWs1TTNtmN2ZQMmnnOQZhdwSseHsA+bdn60n/ol3+n5cLcil6B/xOFCLPlynq
 kr3CZg70HV91V1Cy1jj4/QOY+LE19ratIAY3lirLNphrVt/AWFYBUnBwH4Y7Pked
 VrQ5v0UsB7SHzuuWPkoTE2hM103TjVMoA+/SF6zurb0KcxP+uo9o6RPsOzxeBco/
 cfKGI6ZozOFHy/KIEzC+/EDhdlG08t4MMiAS73hYNthMkVSz6AafNrfGQNmgO+lG
 b2jCmzdp3vdAan4JQ2FvGuUYHFQaskJh5FGCSFiWvzOl1fbIBknjBA+Wubj91lKn
 YqC3srZwi5Z+Ol644uEMorBI3mYVPHlkdSvw17dS786x4zL1ATxpRrAGhcbK/jdU
 e8x/S2nr+A3thvLgK75R6nAfr+srnKz4MUaN3vHLnvXYECGNEXddj5FNPO1lSbeK
 28F/5+W8Onm+pBsldLJjzN4v3Fsq+JWtUZGod+aMZJq0l+Mu8bjZ6rtTbepv5YOv
 6RA8MgBt2heLU39thiXXcpk7BPLTHk79cvKV8drQww4cGNuk48k=
 =HbH+
 -----END PGP SIGNATURE-----

Merge 4.9.36 into android-4.9

Changes in 4.9.36
	ipv6: release dst on error in ip6_dst_lookup_tail
	net: don't call strlen on non-terminated string in dev_set_alias()
	decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb
	net: Zero ifla_vf_info in rtnl_fill_vfinfo()
	net: vrf: Make add_fib_rules per network namespace flag
	af_unix: Add sockaddr length checks before accessing sa_family in bind and connect handlers
	Fix an intermittent pr_emerg warning about lo becoming free.
	sctp: disable BH in sctp_for_each_endpoint
	net: caif: Fix a sleep-in-atomic bug in cfpkt_create_pfx
	net: tipc: Fix a sleep-in-atomic bug in tipc_msg_reverse
	net/mlx5e: Added BW check for DIM decision mechanism
	net/mlx5e: Fix wrong indications in DIM due to counter wraparound
	proc: snmp6: Use correct type in memset
	igmp: acquire pmc lock for ip_mc_clear_src()
	igmp: add a missing spin_lock_init()
	ipv6: fix calling in6_ifa_hold incorrectly for dad work
	sctp: return next obj by passing pos + 1 into sctp_transport_get_idx
	net/mlx5e: Avoid doing a cleanup call if the profile doesn't have it
	net/mlx5: Wait for FW readiness before initializing command interface
	net/mlx5e: Fix timestamping capabilities reporting
	decnet: always not take dst->__refcnt when inserting dst into hash table
	net: 8021q: Fix one possible panic caused by BUG_ON in free_netdev
	sfc: provide dummy definitions of vswitch functions
	ipv6: Do not leak throw route references
	rtnetlink: add IFLA_GROUP to ifla_policy
	netfilter: xt_TCPMSS: add more sanity tests on tcph->doff
	netfilter: synproxy: fix conntrackd interaction
	NFSv4: fix a reference leak caused WARNING messages
	NFSv4.x/callback: Create the callback service through svc_create_pooled
	xen/blkback: don't use xen_blkif_get() in xen-blkback kthread
	drm/ast: Handle configuration without P2A bridge
	mm, swap_cgroup: reschedule when neeed in swap_cgroup_swapoff()
	MIPS: head: Reorder instructions missing a delay slot
	MIPS: Avoid accidental raw backtrace
	MIPS: pm-cps: Drop manual cache-line alignment of ready_count
	MIPS: Fix IRQ tracing & lockdep when rescheduling
	ALSA: hda - Fix endless loop of codec configure
	ALSA: hda - set input_path bitmap to zero after moving it to new place
	NFSv4.1: Fix a race in nfs4_proc_layoutget
	gpiolib: fix filtering out unwanted events
	drm/vmwgfx: Free hash table allocated by cmdbuf managed res mgr
	dm thin: do not queue freed thin mapping for next stage processing
	x86/mm: Fix boot crash caused by incorrect loop count calculation in sync_global_pgds()
	usb: gadget: f_fs: Fix possibe deadlock
	l2tp: fix race in l2tp_recv_common()
	l2tp: ensure session can't get removed during pppol2tp_session_ioctl()
	l2tp: fix duplicate session creation
	l2tp: hold session while sending creation notifications
	l2tp: take a reference on sessions used in genetlink handlers
	mm: numa: avoid waiting on freed migrated pages
	sparc64: Handle PIO & MEM non-resumable errors.
	sparc64: Zero pages on allocation for mondo and error queues.
	net: ethtool: add support for 2500BaseT and 5000BaseT link modes
	net: phy: add an option to disable EEE advertisement
	dt-bindings: net: add EEE capability constants
	net: phy: fix sign type error in genphy_config_eee_advert
	net: phy: use boolean dt properties for eee broken modes
	dt: bindings: net: use boolean dt properties for eee broken modes
	ARM64: dts: meson-gxbb-odroidc2: fix GbE tx link breakage
	xen/blkback: don't free be structure too early
	KVM: x86: fix fixing of hypercalls
	scsi: sd: Fix wrong DPOFUA disable in sd_read_cache_type
	stmmac: add missing of_node_put
	scsi: lpfc: Set elsiocb contexts to NULL after freeing it
	qla2xxx: Terminate exchange if corrupted
	qla2xxx: Fix erroneous invalid handle message
	drm/amdgpu: fix program vce instance logic error.
	drm/amdgpu: add support for new hainan variants
	net: phy: dp83848: add DP83620 PHY support
	perf/x86/intel: Handle exclusive threadid correctly on CPU hotplug
	net: korina: Fix NAPI versus resources freeing
	powerpc/eeh: Enable IO path on permanent error
	net: ethtool: Initialize buffer when querying device channel settings
	xen-netback: fix memory leaks on XenBus disconnect
	xen-netback: protect resource cleaning on XenBus disconnect
	bnxt_en: Fix "uninitialized variable" bug in TPA code path.
	bpf: don't trigger OOM killer under pressure with map alloc
	objtool: Fix IRET's opcode
	gianfar: Do not reuse pages from emergency reserve
	Btrfs: Fix deadlock between direct IO and fast fsync
	Btrfs: fix truncate down when no_holes feature is enabled
	virtio_console: fix a crash in config_work_handler
	swiotlb-xen: update dev_addr after swapping pages
	xen-netfront: Fix Rx stall during network stress and OOM
	scsi: virtio_scsi: Reject commands when virtqueue is broken
	iwlwifi: fix kernel crash when unregistering thermal zone
	platform/x86: ideapad-laptop: handle ACPI event 1
	amd-xgbe: Check xgbe_init() return code
	net: dsa: Check return value of phy_connect_direct()
	drm/amdgpu: check ring being ready before using
	vfio/spapr: fail tce_iommu_attach_group() when iommu_data is null
	mlxsw: spectrum_router: Correctly reallocate adjacency entries
	virtio_net: fix PAGE_SIZE > 64k
	ip6_tunnel: must reload ipv6h in ip6ip6_tnl_xmit()
	vxlan: do not age static remote mac entries
	ibmveth: Add a proper check for the availability of the checksum features
	kernel/panic.c: add missing \n
	Documentation: devicetree: change the mediatek ethernet compatible string
	drm/etnaviv: trick drm_mm into giving out a low IOVA
	perf/x86/intel/uncore: Fix hardcoded socket 0 assumption in the Haswell init code
	pinctrl: intel: Set pin direction properly
	net: phy: marvell: fix Marvell 88E1512 used in SGMII mode
	mac80211: recalculate min channel width on VHT opmode changes
	perf/x86/intel: Use ULL constant to prevent undefined shift behaviour
	HID: i2c-hid: Add sleep between POWER ON and RESET
	scsi: lpfc: avoid double free of resource identifiers
	spi: davinci: use dma_mapping_error()
	arm64: assembler: make adr_l work in modules under KASLR
	net: thunderx: acpi: fix LMAC initialization
	drm/radeon/si: load special ucode for certain MC configs
	drm/amd/powerplay: fix vce cg logic error on CZ/St.
	drm/amd/powerplay: refine vce dpm update code on Cz.
	pmem: return EIO on read_pmem() failure
	mac80211: initialize SMPS field in HT capabilities
	x86/tsc: Add the Intel Denverton Processor to native_calibrate_tsc()
	x86/mpx: Use compatible types in comparison to fix sparse error
	perf/core: Fix sys_perf_event_open() vs. hotplug
	perf/x86: Reject non sampling events with precise_ip
	aio: fix lock dep warning
	coredump: Ensure proper size of sparse core files
	swiotlb: ensure that page-sized mappings are page-aligned
	s390/ctl_reg: make __ctl_load a full memory barrier
	usb: dwc2: gadget: Fix GUSBCFG.USBTRDTIM value
	be2net: fix status check in be_cmd_pmac_add()
	be2net: don't delete MAC on close on unprivileged BE3 VFs
	be2net: fix MAC addr setting on privileged BE3 VFs
	perf probe: Fix to show correct locations for events on modules
	net: phy: dp83867: allow RGMII_TXID/RGMII_RXID interface types
	tipc: allocate user memory with GFP_KERNEL flag
	perf probe: Fix to probe on gcc generated functions in modules
	net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV
	sctp: check af before verify address in sctp_addr_id2transport
	ip6_tunnel, ip6_gre: fix setting of DSCP on encapsulated packets
	ravb: Fix use-after-free on `ifconfig eth0 down`
	mm/vmalloc.c: huge-vmap: fail gracefully on unexpected huge vmap mappings
	xfrm: fix stack access out of bounds with CONFIG_XFRM_SUB_POLICY
	xfrm: NULL dereference on allocation failure
	xfrm: Oops on error in pfkey_msg2xfrm_state()
	netfilter: use skb_to_full_sk in ip_route_me_harder
	watchdog: bcm281xx: Fix use of uninitialized spinlock.
	sched/loadavg: Avoid loadavg spikes caused by delayed NO_HZ accounting
	spi: When no dma_chan map buffers with spi_master's parent
	spi: fix device-node leaks
	regulator: tps65086: Fix expected switch DT node names
	regulator: tps65086: Fix DT node referencing in of_parse_cb
	ARM: OMAP2+: omap_device: Sync omap_device and pm_runtime after probe defer
	ARM: dts: OMAP3: Fix MFG ID EEPROM
	ARM64/ACPI: Fix BAD_MADT_GICC_ENTRY() macro implementation
	ARM: 8685/1: ensure memblock-limit is pmd-aligned
	tools arch: Sync arch/x86/lib/memcpy_64.S with the kernel
	x86/boot/KASLR: Fix kexec crash due to 'virt_addr' calculation bug
	x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space
	x86/mm: Fix flush_tlb_page() on Xen
	ocfs2: o2hb: revert hb threshold to keep compatible
	iommu/vt-d: Don't over-free page table directories
	iommu: Handle default domain attach failure
	iommu/dma: Don't reserve PCI I/O windows
	iommu/amd: Fix incorrect error handling in amd_iommu_bind_pasid()
	iommu/amd: Fix interrupt remapping when disable guest_mode
	cpufreq: s3c2416: double free on driver init error path
	clk: scpi: don't add cpufreq device if the scpi dvfs node is disabled
	objtool: Fix another GCC jump table detection issue
	infiniband: hns: avoid gcc-7.0.1 warning for uninitialized data
	brcmfmac: avoid writing channel out of allocated array
	i2c: brcmstb: Fix START and STOP conditions
	mtd: nand: brcmnand: Check flash #WP pin status before nand erase/program
	arm64: fix NULL dereference in have_cpu_die()
	KVM: x86: fix emulation of RSM and IRET instructions
	KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh()
	KVM: x86: zero base3 of unusable segments
	KVM: nVMX: Fix exception injection
	Linux 4.9.36

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-07-05 16:18:14 +02:00
commit 184ce810ce
185 changed files with 1673 additions and 803 deletions

View File

@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
* Ethernet controller node
Required properties:
- compatible: Should be "mediatek,mt7623-eth"
- compatible: Should be "mediatek,mt2701-eth"
- reg: Address and length of the register set for the device
- interrupts: Should contain the three frame engines interrupts in numeric
order. These are fe_int0, fe_int1 and fe_int2.

View File

@ -35,6 +35,15 @@ Optional Properties:
- broken-turn-around: If set, indicates the PHY device does not correctly
release the turn around line low at the end of a MDIO transaction.
- eee-broken-100tx:
- eee-broken-1000t:
- eee-broken-10gt:
- eee-broken-1000kx:
- eee-broken-10gkx4:
- eee-broken-10gkr:
Mark the corresponding energy efficient ethernet mode as broken and
request the ethernet to stop advertising it.
Example:
ethernet-phy@0 {

View File

@ -3,9 +3,11 @@
Required properties:
- reg - The ID number for the phy, usually a small integer
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
for applicable values
for applicable values. Required only if interface type is
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
for applicable values
for applicable values. Required only if interface type is
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values

View File

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 35
SUBLEVEL = 36
EXTRAVERSION =
NAME = Roaring Lionus

View File

@ -121,7 +121,7 @@
&i2c3 {
clock-frequency = <400000>;
at24@50 {
compatible = "at24,24c02";
compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};

View File

@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
dev_err(dev, "failed to idle\n");
}
break;
case BUS_NOTIFY_BIND_DRIVER:
od = to_omap_device(pdev);
if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
pm_runtime_status_suspended(dev)) {
od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
pm_runtime_set_active(dev);
}
break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);

View File

@ -1211,15 +1211,15 @@ void __init adjust_lowmem_bounds(void)
high_memory = __va(arm_lowmem_limit - 1) + 1;
if (!memblock_limit)
memblock_limit = arm_lowmem_limit;
/*
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full pmd, which should be mapped.
*/
if (memblock_limit)
memblock_limit = round_down(memblock_limit, PMD_SIZE);
if (!memblock_limit)
memblock_limit = arm_lowmem_limit;
memblock_limit = round_down(memblock_limit, PMD_SIZE);
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {

View File

@ -85,6 +85,18 @@
status = "okay";
pinctrl-0 = <&eth_pins>;
pinctrl-names = "default";
phy-handle = <&eth_phy0>;
mdio {
compatible = "snps,dwmac-mdio";
#address-cells = <1>;
#size-cells = <0>;
eth_phy0: ethernet-phy@0 {
reg = <0>;
eee-broken-1000t;
};
};
};
&ir {

View File

@ -22,9 +22,9 @@
#define ACPI_MADT_GICC_LENGTH \
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
(entry)->header.length != ACPI_MADT_GICC_LENGTH)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
(unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI

View File

@ -164,22 +164,25 @@ lr .req x30 // link register
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
* <symbol> is within the range +/- 4 GB of the PC.
* <symbol> is within the range +/- 4 GB of the PC when running
* in core kernel context. In module context, a movz/movk sequence
* is used, since modules may be loaded far away from the kernel
* when KASLR is in effect.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
* @tmp: optional scratch register to be used if <dst> == sp, which
* is not allowed in an adrp instruction
*/
.macro adr_l, dst, sym, tmp=
.ifb \tmp
.macro adr_l, dst, sym
#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
.else
adrp \tmp, \sym
add \dst, \tmp, :lo12:\sym
.endif
#else
movz \dst, #:abs_g3:\sym
movk \dst, #:abs_g2_nc:\sym
movk \dst, #:abs_g1_nc:\sym
movk \dst, #:abs_g0_nc:\sym
#endif
.endm
/*
@ -190,6 +193,7 @@ lr .req x30 // link register
* the address
*/
.macro ldr_l, dst, sym, tmp=
#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@ -197,6 +201,15 @@ lr .req x30 // link register
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
#else
.ifb \tmp
adr_l \dst, \sym
ldr \dst, [\dst]
.else
adr_l \tmp, \sym
ldr \dst, [\tmp]
.endif
#endif
.endm
/*
@ -206,8 +219,13 @@ lr .req x30 // link register
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
#else
adr_l \tmp, \sym
str \src, [\tmp]
#endif
.endm
/*

View File

@ -934,7 +934,7 @@ static bool have_cpu_die(void)
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
if (cpu_ops[any_cpu]->cpu_die)
if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
return true;
#endif
return false;

View File

@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@ -137,6 +138,7 @@ work_pending:
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
TRACE_IRQS_OFF
jal schedule
local_irq_disable # make sure need_resched and
@ -173,6 +175,7 @@ syscall_exit_work:
beqz t0, work_pending # trace bit set?
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
TRACE_IRQS_ON
move a0, sp
jal syscall_trace_leave
b resume_userspace

View File

@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
beq t0, t1, dtb_found
#endif
li t1, -2
beq a0, t1, dtb_found
move t2, a1
beq a0, t1, dtb_found
li t2, 0
dtb_found:

View File

@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
}
if (!per_cpu(ready_count, core)) {
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
per_cpu(ready_count_alloc, core) = core_rc;
/* Ensure ready_count is aligned to a cacheline boundary */
core_rc += dlinesz - 1;
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}

View File

@ -199,6 +199,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
regs.cp0_status = KSU_KERNEL;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;

View File

@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
*
* When the @severity is EEH_LOG_PERM, the PE is going to be
* removed. Prior to that, the drivers for devices included in
* the PE will be closed. The drivers rely on working IO path
* to bring the devices to quiet state. Otherwise, PCI traffic
* from those devices after they are removed is like to cause
* another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*

View File

@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
: \
: "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
: "memory"); \
}
#define __ctl_store(array, low, high) { \

View File

@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
unsigned long order = get_order(size);
unsigned long p;
p = __get_free_pages(GFP_KERNEL, order);
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();

View File

@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
atomic_inc(&sun4v_resum_oflow_cnt);
}
/* Given a set of registers, get the virtual addressi that was being accessed
* by the faulting instructions at tpc.
*/
static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
{
unsigned int insn;
if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
return compute_effective_address(regs, insn,
(insn >> 25) & 0x1f);
}
return 0;
}
/* Attempt to handle non-resumable errors generated from userspace.
* Returns true if the signal was handled, false otherwise.
*/
bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
struct sun4v_error_entry *ent) {
unsigned int attrs = ent->err_attrs;
if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
unsigned long addr = ent->err_raddr;
siginfo_t info;
if (addr == ~(u64)0) {
/* This seems highly unlikely to ever occur */
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
} else {
unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
PAGE_SIZE);
/* Break the unfortunate news. */
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
addr);
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
page_cnt);
while (page_cnt-- > 0) {
if (pfn_valid(addr >> PAGE_SHIFT))
get_page(pfn_to_page(addr >> PAGE_SHIFT));
addr += PAGE_SIZE;
}
}
info.si_signo = SIGKILL;
info.si_errno = 0;
info.si_trapno = 0;
force_sig_info(info.si_signo, &info, current);
return true;
}
if (attrs & SUN4V_ERR_ATTRS_PIO) {
siginfo_t info;
info.si_signo = SIGBUS;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)sun4v_get_vaddr(regs);
force_sig_info(info.si_signo, &info, current);
return true;
}
/* Default to doing nothing */
return false;
}
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event, clear the first word of the entry, and die.
*/
@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
put_cpu();
if (!(regs->tstate & TSTATE_PRIV) &&
sun4v_nonresum_error_user_handled(regs, &local_copy)) {
/* DON'T PANIC: This userspace error was handled. */
return;
}
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == cpu) {

View File

@ -430,9 +430,6 @@ void choose_random_location(unsigned long input,
{
unsigned long random_addr, min_addr;
/* By default, keep output position unchanged. */
*virt_addr = *output;
if (cmdline_find_option_bool("nokaslr")) {
warn("KASLR disabled: 'nokaslr' on cmdline.");
return;

View File

@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
unsigned long output_len)
{
const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = (unsigned long)output;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
@ -397,7 +397,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#ifndef CONFIG_RELOCATABLE
if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
error("Destination address does not match LOAD_PHYSICAL_ADDR");
if ((unsigned long)output != virt_addr)
if (virt_addr != LOAD_PHYSICAL_ADDR)
error("Destination virtual address changed when not relocatable");
#endif

View File

@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input,
unsigned long output_size,
unsigned long *virt_addr)
{
/* No change from existing output location. */
*virt_addr = *output;
}
#endif

View File

@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
/* There's no sense in having PEBS for non sampling events: */
if (!is_sampling_event(event))
return -EINVAL;
}
/*
* check that PEBS LBR correction does not conflict with

View File

@ -3164,13 +3164,16 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
struct cpu_hw_events *sibling;
struct intel_excl_cntrs *c;
c = per_cpu(cpu_hw_events, i).excl_cntrs;
sibling = &per_cpu(cpu_hw_events, i);
c = sibling->excl_cntrs;
if (c && c->core_id == core_id) {
cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
cpuc->excl_cntrs = c;
cpuc->excl_thread_id = 1;
if (!sibling->excl_thread_id)
cpuc->excl_thread_id = 1;
break;
}
}
@ -3975,7 +3978,7 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",

View File

@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
void hswep_uncore_cpu_init(void)
{
int pkg = topology_phys_to_logical_pkg(0);
int pkg = boot_cpu_data.logical_proc_id;
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;

View File

@ -221,6 +221,9 @@ struct x86_emulate_ops {
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
int emul_flags;
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */

View File

@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
crystal_khz = 24000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:

View File

@ -2543,7 +2543,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
u64 smbase;
int ret;
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
@ -2592,11 +2592,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE;
}
if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
return X86EMUL_CONTINUE;
}
@ -5312,6 +5312,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
unsigned emul_flags;
ctxt->mem_read.pos = 0;
@ -5326,6 +5327,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
emul_flags = ctxt->ops->get_hflags(ctxt);
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
@ -5359,7 +5361,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@ -5388,7 +5390,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@ -5442,7 +5444,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)

View File

@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
((u64)1 << edx.split.bit_width_fixed) - 1;
}
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;

View File

@ -2455,7 +2455,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;

View File

@ -4999,6 +4999,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
if (base3)
*base3 = 0;
return false;
}
@ -5154,6 +5156,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}
static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
{
return emul_to_vcpu(ctxt)->arch.hflags;
}
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
}
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
@ -5193,6 +5205,8 @@ static const struct x86_emulate_ops emulate_ops = {
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
.set_hflags = emulator_set_hflags,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@ -5245,7 +5259,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
ctxt->emul_flags = vcpu->arch.hflags;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
@ -5636,8 +5649,6 @@ restart:
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
if (vcpu->arch.hflags != ctxt->emul_flags)
kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
@ -6111,7 +6122,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
return emulator_write_emulated(ctxt, rip, instruction, 3,
&ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)

View File

@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup);
*/
void sync_global_pgds(unsigned long start, unsigned long end, int removed)
{
unsigned long address;
unsigned long addr;
for (address = start; address <= end; address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address);
for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
const pgd_t *pgd_ref = pgd_offset_k(addr);
struct page *page;
/*
@ -113,7 +113,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
pgd_t *pgd;
spinlock_t *pgt_lock;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
pgd = (pgd_t *)page_address(page) + pgd_index(addr);
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);

View File

@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
if (info->si_addr == (void *)-1) {
if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void)
if (!kernel_managing_mpx_tables(current->mm))
return -EINVAL;
if (do_mpx_bt_fault()) {
force_sig(SIGSEGV, current);
/*
* The force_sig() is essentially "handling" this
* exception, so we do not pass up the error
* from do_mpx_bt_fault().
*/
}
return 0;
return do_mpx_bt_fault();
}
/*

View File

@ -263,8 +263,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
{
struct flush_tlb_info info;
if (end == 0)
end = start + PAGE_SIZE;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
@ -393,7 +391,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
preempt_enable();
}

View File

@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout;
int ret;
xen_blkif_get(blkif);
set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
@ -665,7 +663,6 @@ purge_gnt_list:
print_stats(ring);
ring->xenblkd = NULL;
xen_blkif_put(blkif);
return 0;
}

View File

@ -255,7 +255,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq);
ring->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
@ -316,8 +315,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif)
{
xen_blkif_disconnect(blkif);
WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
kfree(blkif->be->mode);
kfree(blkif->be);
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
@ -512,8 +513,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
kfree(be->mode);
kfree(be);
return 0;
}

View File

@ -1870,7 +1870,7 @@ static void config_work_handler(struct work_struct *work)
{
struct ports_device *portdev;
portdev = container_of(work, struct ports_device, control_work);
portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;

View File

@ -290,13 +290,15 @@ static int scpi_clocks_probe(struct platform_device *pdev)
of_node_put(child);
return ret;
}
}
/* Add the virtual cpufreq device */
cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
-1, NULL, 0);
if (IS_ERR(cpufreq_dev))
pr_warn("unable to register cpufreq device");
if (match->data != &scpi_dvfs_ops)
continue;
/* Add the virtual cpufreq device if it's DVFS clock provider */
cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
-1, NULL, 0);
if (IS_ERR(cpufreq_dev))
pr_warn("unable to register cpufreq device");
}
return 0;
}

View File

@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
rate = clk_get_rate(s3c_freq->hclk);
if (rate < 133 * 1000 * 1000) {
pr_err("cpufreq: HCLK not at 133MHz\n");
clk_put(s3c_freq->hclk);
ret = -EINVAL;
goto err_armclk;
}

View File

@ -707,7 +707,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
ge.timestamp = ktime_get_real_ns();
if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) {
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
int level = gpiod_get_value_cansleep(le->desc);
if (level)

View File

@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
if (!(*out_ring && (*out_ring)->adev)) {
DRM_ERROR("Ring %d is not initialized on IP %d\n",
ring, ip_type);
return -EINVAL;
}
return 0;
}

View File

@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
union power_info {
struct _ATOM_POWERPLAY_INFO info;
@ -7721,10 +7722,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667))) ||
((adev->pdev->revision == 0xc3) &&
(adev->pdev->device == 0x6665)))
(adev->pdev->device == 0x6667))))
chip_name = "hainan_k";
else if ((adev->pdev->revision == 0xc3) &&
(adev->pdev->device == 0x6665))
chip_name = "banks_k_2";
else
chip_name = "hainan";
break;

View File

@ -43,9 +43,13 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
@ -54,6 +58,9 @@
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
| GRBM_GFX_INDEX__VCE_ALL_PIPE)
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;

View File

@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
AMD_CG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
AMD_PG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;

View File

@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
/*EPR# 419220 -HW limitation to to */
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetEclkHardMin,
cz_get_eclk_level(hwmgr,
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
/*Program HardMin based on the vce_arbiter.ecclk */
if (hwmgr->vce_arbiter.ecclk == 0) {
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetEclkSoftMin, 1);
} else {
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetEclkHardMin,
cz_get_eclk_level(hwmgr,
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
}
}
return 0;
}

View File

@ -113,7 +113,11 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
bool DisableP2A;
enum {
ast_use_p2a,
ast_use_dt,
ast_use_defaults
} config_mode;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;

View File

@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
return ret;
}
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
struct device_node *np = dev->pdev->dev.of_node;
struct ast_private *ast = dev->dev_private;
uint32_t data, jregd0, jregd1;
/* Defaults */
ast->config_mode = ast_use_defaults;
*scu_rev = 0xffffffff;
/* Check if we have device-tree properties */
if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
scu_rev)) {
/* We do, disable P2A access */
ast->config_mode = ast_use_dt;
DRM_INFO("Using device-tree for configuration\n");
return;
}
/* Not all families have a P2A bridge */
if (dev->pdev->device != PCI_CHIP_AST2000)
return;
/*
* The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
* is disabled. We force using P2A if VGA only mode bit
* is set D[7]
*/
jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/* Double check it's actually working */
data = ast_read32(ast, 0xf004);
if (data != 0xFFFFFFFF) {
/* P2A works, grab silicon revision */
ast->config_mode = ast_use_p2a;
DRM_INFO("Using P2A bridge for configuration\n");
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
*scu_rev = ast_read32(ast, 0x1207c);
return;
}
}
/* We have a P2A bridge but it's disabled */
DRM_INFO("P2A bridge disabled, using default configuration\n");
}
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = dev->dev_private;
uint32_t data, jreg;
uint32_t jreg, scu_rev;
/*
* If VGA isn't enabled, we need to enable now or subsequent
* access to the scratch registers will fail. We also inform
* our caller that it needs to POST the chip
* (Assumption: VGA not enabled -> need to POST)
*/
if (!ast_is_vga_enabled(dev)) {
ast_enable_vga(dev);
DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
*need_post = true;
} else
*need_post = false;
/* Enable extended register access */
ast_enable_mmio(dev);
ast_open_key(ast);
/* Find out whether P2A works or whether to use device-tree */
ast_detect_config_mode(dev, &scu_rev);
/* Identify chipset */
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
uint32_t data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x1207c);
switch (data & 0x0300) {
switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
DRM_INFO("AST 1100 detected\n");
@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
}
/*
* If VGA isn't enabled, we need to enable now or subsequent
* access to the scratch registers will fail. We also inform
* our caller that it needs to POST the chip
* (Assumption: VGA not enabled -> need to POST)
*/
if (!ast_is_vga_enabled(dev)) {
ast_enable_vga(dev);
ast_enable_mmio(dev);
DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
*need_post = true;
} else
*need_post = false;
/* Check P2A Access */
ast->DisableP2A = true;
data = ast_read32(ast, 0xf004);
if (data != 0xFFFFFFFF)
ast->DisableP2A = false;
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
if (ast->DisableP2A == false) {
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x1207c);
data &= 0x300;
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
ast->support_wide_screen = true;
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
if (ast->chip == AST2300 &&
(scu_rev & 0x300) == 0x0) /* ast1300 */
ast->support_wide_screen = true;
if (ast->chip == AST2400 &&
(scu_rev & 0x300) == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
break;
}
@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->pdev->dev.of_node;
struct ast_private *ast = dev->dev_private;
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
if (ast->DisableP2A)
{
switch (ast->config_mode) {
case ast_use_dt:
/*
* If some properties are missing, use reasonable
* defaults for AST2400
*/
if (of_property_read_u32(np, "aspeed,mcr-configuration",
&mcr_cfg))
mcr_cfg = 0x00000577;
if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
&mcr_scu_mpll))
mcr_scu_mpll = 0x000050C0;
if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
&mcr_scu_strap))
mcr_scu_strap = 0;
break;
case ast_use_p2a:
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
mcr_cfg = ast_read32(ast, 0x10004);
mcr_scu_mpll = ast_read32(ast, 0x10120);
mcr_scu_strap = ast_read32(ast, 0x10170);
break;
case ast_use_defaults:
default:
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
ast->mclk = 396;
return 0;
}
if (mcr_cfg & 0x40)
ast->dram_bus_width = 16;
else
{
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x10004);
ast->dram_bus_width = 32;
if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (data & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_1Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (data & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (data & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break;
}
}
data = ast_read32(ast, 0x10120);
data2 = ast_read32(ast, 0x10170);
if (data2 & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = data & 0x1f;
num = (data & 0x3fe0) >> 5;
data = (data & 0xc000) >> 14;
switch (data) {
case 3:
div = 0x4;
break;
case 2:
case 1:
div = 0x2;
if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
div = 0x1;
case 1:
ast->dram_type = AST_DRAM_1Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (mcr_cfg & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (mcr_cfg & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break;
}
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
if (mcr_scu_strap & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = mcr_scu_mpll & 0x1f;
num = (mcr_scu_mpll & 0x3fe0) >> 5;
dsel = (mcr_scu_mpll & 0xc000) >> 14;
switch (dsel) {
case 3:
div = 0x4;
break;
case 2:
case 1:
div = 0x2;
break;
default:
div = 0x1;
break;
}
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}

View File

@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
if (ast->DisableP2A == false)
{
if (ast->config_mode == ast_use_p2a) {
if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev);
}
else
{
} else {
if (ast->tx_chip_type != AST_TX_NONE)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}

View File

@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
/*
* XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
* drm_mm into giving out a low IOVA after address space
* rollover. This needs a proper fix.
*/
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, mmu->last_iova, ~0UL,
DRM_MM_SEARCH_DEFAULT);
mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
if (ret != -ENOSPC)
break;

View File

@ -115,6 +115,8 @@ MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
MODULE_FIRMWARE("radeon/si58_mc.bin");
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
@ -1650,6 +1652,7 @@ static int si_init_microcode(struct radeon_device *rdev)
int err;
int new_fw = 0;
bool new_smc = false;
bool si58_fw = false;
DRM_DEBUG("\n");
@ -1742,6 +1745,10 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
/* this memory configuration requires special firmware */
if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
si58_fw = true;
DRM_INFO("Loading %s Microcode\n", new_chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@ -1845,7 +1852,10 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
if (si58_fw)
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);

View File

@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
drm_ht_remove(&man->resources);
kfree(man);
}

View File

@ -427,6 +427,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
goto out_unlock;
/*
* The HID over I2C specification states that if a DEVICE needs time
* after the PWR_ON request, it should utilise CLOCK stretching.
* However, it has been observered that the Windows driver provides a
* 1ms sleep between the PWR_ON and RESET requests and that some devices
* rely on this.
*/
usleep_range(1000, 5000);
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);

View File

@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
u8 *tmp_buf;
int len = 0;
int xfersz = brcmstb_i2c_get_xfersz(dev);
u32 cond, cond_per_msg;
if (dev->is_suspended)
return -EBUSY;
@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP));
cond = ~COND_START_STOP;
else
brcmstb_set_i2c_start_stop(dev,
COND_RESTART | COND_NOSTOP);
cond = COND_RESTART | COND_NOSTOP;
brcmstb_set_i2c_start_stop(dev, cond);
/* Send slave address */
if (!(pmsg->flags & I2C_M_NOSTART)) {
@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
}
}
cond_per_msg = cond;
/* Perform data transfer */
while (len) {
bytes_to_xfer = min(len, xfersz);
if (len <= xfersz && i == (num - 1))
brcmstb_set_i2c_start_stop(dev,
~(COND_START_STOP));
if (len <= xfersz) {
if (i == (num - 1))
cond_per_msg = cond_per_msg &
~(COND_RESTART | COND_NOSTOP);
else
cond_per_msg = cond;
} else {
cond_per_msg = (cond_per_msg & ~COND_RESTART) |
COND_NOSTOP;
}
brcmstb_set_i2c_start_stop(dev, cond_per_msg);
rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
bytes_to_xfer, pmsg);
@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
len -= bytes_to_xfer;
tmp_buf += bytes_to_xfer;
cond_per_msg = COND_NOSTART | COND_NOSTOP;
}
}

View File

@ -1267,6 +1267,7 @@ void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
u32 doorbell[2];
doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
doorbell[1] = 0;
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);

View File

@ -3857,11 +3857,9 @@ static void irte_ga_prepare(void *entry,
u8 vector, u32 dest_apicid, int devid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
struct iommu_dev_data *dev_data = search_dev_data(devid);
irte->lo.val = 0;
irte->hi.val = 0;
irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
irte->lo.fields_remap.int_type = delivery_mode;
irte->lo.fields_remap.dm = dest_mode;
irte->hi.fields.vector = vector;
@ -3917,10 +3915,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
struct irte_ga *irte = (struct irte_ga *) entry;
struct iommu_dev_data *dev_data = search_dev_data(devid);
if (!dev_data || !dev_data->use_vapic) {
if (!dev_data || !dev_data->use_vapic ||
!irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
irte->lo.fields_remap.guest_mode = 0;
modify_irte_ga(devid, index, irte, NULL);
}
}

View File

@ -695,9 +695,9 @@ out_clear_state:
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
mmput(mm);
out_free:
mmput(mm);
free_pasid_state(pasid_state);
out:

View File

@ -112,8 +112,7 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
unsigned long lo, hi;
resource_list_for_each_entry(window, &bridge->windows) {
if (resource_type(window->res) != IORESOURCE_MEM &&
resource_type(window->res) != IORESOURCE_IO)
if (resource_type(window->res) != IORESOURCE_MEM)
continue;
lo = iova_pfn(iovad, window->res->start - window->offset);

View File

@ -1144,7 +1144,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
level_pfn = pfn & level_mask(level - 1);
level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2)

View File

@ -383,36 +383,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
if (ret) {
kfree(device);
return ret;
}
if (ret)
goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
sysfs_remove_link(&dev->kobj, "iommu_group");
kfree(device);
return -ENOMEM;
ret = -ENOMEM;
goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
sysfs_remove_link(&dev->kobj, "iommu_group");
kfree(device);
return ret;
goto err_free_name;
}
kobject_get(group->devices_kobj);
@ -424,8 +418,10 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
__iommu_attach_device(group->domain, dev);
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@ -436,6 +432,21 @@ rename:
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
err_put_group:
mutex_lock(&group->mutex);
list_del(&device->list);
mutex_unlock(&group->mutex);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
err_free_name:
kfree(device->name);
err_remove_link:
sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
kfree(device);
pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);

View File

@ -1095,6 +1095,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return;
}
/*
* Increment the unmapped blocks. This prevents a race between the
* passdown io and reallocation of freed blocks.
*/
r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
if (r) {
metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
bio_io_error(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
return;
}
discard_parent = bio_alloc(GFP_NOIO, 1);
if (!discard_parent) {
DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@ -1115,19 +1128,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
end_discard(&op, r);
}
}
/*
* Increment the unmapped blocks. This prevents a race between the
* passdown io and reallocation of freed blocks.
*/
r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
if (r) {
metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
bio_io_error(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
return;
}
}
static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)

View File

@ -101,6 +101,9 @@ struct brcm_nand_dma_desc {
#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
#define NAND_POLL_STATUS_TIMEOUT_MS 100
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@ -765,6 +768,31 @@ enum {
CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
};
static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
u32 mask, u32 expected_val,
unsigned long timeout_ms)
{
unsigned long limit;
u32 val;
if (!timeout_ms)
timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
limit = jiffies + msecs_to_jiffies(timeout_ms);
do {
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
cpu_relax();
} while (time_after(limit, jiffies));
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);
return -ETIMEDOUT;
}
static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
{
u32 val = en ? CS_SELECT_NAND_WP : 0;
@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
static int old_wp = -1;
int ret;
if (old_wp != wp) {
dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
old_wp = wp;
}
/*
* make sure ctrl/flash ready before and after
* changing state of #WP pin
*/
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
NAND_STATUS_READY,
NAND_CTRL_RDY |
NAND_STATUS_READY, 0);
if (ret)
return;
brcmnand_set_wp(ctrl, wp);
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
NAND_STATUS_READY |
NAND_STATUS_WP,
NAND_CTRL_RDY |
NAND_STATUS_READY |
(wp ? 0 : NAND_STATUS_WP), 0);
if (ret)
dev_err_ratelimited(&host->pdev->dev,
"nand #WP expected %s\n",
wp ? "on" : "off");
}
}
@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data)
static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
{
struct brcmnand_controller *ctrl = host->ctrl;
u32 intfc;
int ret;
dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
WARN_ON(!(intfc & INTFC_CTLR_READY));
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
WARN_ON(ret);
mb(); /* flush previous writes */
brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,

View File

@ -2825,8 +2825,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
if (ret)
if (ret) {
netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
}
/*
* Initialize DMA related features

View File

@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
hw_if->init(pdata);
ret = hw_if->init(pdata);
if (ret)
return ret;
ret = phy_if->phy_start(pdata);
if (ret)

View File

@ -1097,7 +1097,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
{
#ifdef CONFIG_INET
struct tcphdr *th;
int len, nw_off, tcp_opt_len;
int len, nw_off, tcp_opt_len = 0;
if (tcp_ts)
tcp_opt_len = 12;

View File

@ -48,8 +48,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
u8 lmac_count;
u8 max_lmac;
u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@ -1159,13 +1160,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
bgx->lmac_count++;
return AE_OK;
}

View File

@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;

View File

@ -319,6 +319,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
* address
*/
if (BEx_chip(adapter) && be_virtfn(adapter) &&
!check_privilege(adapter, BE_PRIV_FILTMGMT))
return -EPERM;
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@ -3630,7 +3637,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
be_dev_mac_del(adapter, adapter->pmac_id[0]);
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT))
be_dev_mac_del(adapter, adapter->pmac_id[0]);
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@ -3783,8 +3794,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;

View File

@ -2951,7 +2951,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
}
/* try reuse page */
if (unlikely(page_count(page) != 1))
if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */

View File

@ -1604,8 +1604,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->hw_features = NETIF_F_SG;
if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
}
netdev->features |= netdev->hw_features;

View File

@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
korina_free_ring(dev);
napi_disable(&lp->napi);
korina_free_ring(dev);
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
korina_free_ring(dev);
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
korina_free_ring(dev);
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);

View File

@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
__func__);
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}

View File

@ -283,13 +283,15 @@ struct mlx5e_dma_info {
struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */
int bpms; /* bytes per msec */
int epms; /* events per msec */
};
struct mlx5e_rx_am_sample {
ktime_t time;
unsigned int pkt_ctr;
u16 event_ctr;
ktime_t time;
u32 pkt_ctr;
u32 byte_ctr;
u16 event_ctr;
};
struct mlx5e_rx_am { /* Adaptive Moderation */

View File

@ -1183,11 +1183,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
(BIT(1) << HWTSTAMP_TX_ON);
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
(BIT(1) << HWTSTAMP_FILTER_ALL);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_ALL);
return 0;
}

View File

@ -3846,7 +3846,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return netdev;
err_cleanup_nic:
profile->cleanup(priv);
if (profile->cleanup)
profile->cleanup(priv);
free_netdev(netdev);
return NULL;

View File

@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
mlx5e_am_step(am);
}
#define IS_SIGNIFICANT_DIFF(val, ref) \
(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
struct mlx5e_rx_am_stats *prev)
{
int diff;
if (!prev->ppms)
return curr->ppms ? MLX5E_AM_STATS_BETTER :
if (!prev->bpms)
return curr->bpms ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_SAME;
diff = curr->ppms - prev->ppms;
if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
return (diff > 0) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
if (!prev->epms)
return curr->epms ? MLX5E_AM_STATS_WORSE :
MLX5E_AM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
diff = curr->epms - prev->epms;
if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
return (diff < 0) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
return MLX5E_AM_STATS_SAME;
}
@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
{
s->time = ktime_get();
s->pkt_ctr = rq->stats.packets;
s->byte_ctr = rq->stats.bytes;
s->event_ctr = rq->cq.event_ctr;
}
#define MLX5E_AM_NEVENTS 64
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
struct mlx5e_rx_am_sample *end,
@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
{
/* u32 holds up to 71 minutes, should be enough */
u32 delta_us = ktime_us_delta(end->time, start->time);
unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
start->byte_ctr);
if (!delta_us)
return;
curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
delta_us);
}
void mlx5e_rx_am_work(struct work_struct *work)
@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
switch (am->state) {
case MLX5E_AM_MEASURE_IN_PROGRESS:
nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
am->start_sample.event_ctr);
if (nevents < MLX5E_AM_NEVENTS)
break;
mlx5e_am_sample(rq, &end_sample);

View File

@ -155,8 +155,9 @@ static struct mlx5_profile profile[] = {
},
};
#define FW_INIT_TIMEOUT_MILI 2000
#define FW_INIT_WAIT_MS 2
#define FW_INIT_TIMEOUT_MILI 2000
#define FW_INIT_WAIT_MS 2
#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{
@ -956,6 +957,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
*/
dev->state = MLX5_DEVICE_STATE_UP;
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI);
goto out;
}
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");

View File

@ -1178,7 +1178,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
static int
mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
struct mlxsw_sp_nexthop_group *nh_grp,
bool reallocate)
{
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
@ -1193,7 +1194,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
continue;
}
if (nh->update) {
if (nh->update || reallocate) {
err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
adj_index, nh);
if (err)
@ -1254,7 +1255,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@ -1282,7 +1284,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;

View File

@ -229,18 +229,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
int ring_size;
int i;
/* Free RX skb ringbuffer */
if (priv->rx_skb[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++)
dev_kfree_skb(priv->rx_skb[q][i]);
}
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@ -269,6 +257,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
/* Free RX skb ringbuffer */
if (priv->rx_skb[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++)
dev_kfree_skb(priv->rx_skb[q][i]);
}
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/

View File

@ -2801,6 +2801,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1,
#ifdef CONFIG_SFC_SRIOV
.vswitching_probe = efx_port_dummy_op_int,
.vswitching_restore = efx_port_dummy_op_int,
.vswitching_remove = efx_port_dummy_op_void,
#endif
};
const struct efx_nic_type falcon_b0_nic_type = {
@ -2902,4 +2907,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
#ifdef CONFIG_SFC_SRIOV
.vswitching_probe = efx_port_dummy_op_int,
.vswitching_restore = efx_port_dummy_op_int,
.vswitching_remove = efx_port_dummy_op_void,
#endif
};

View File

@ -346,6 +346,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
#else
struct plat_stmmacenet_data *

View File

@ -17,6 +17,7 @@
#include <linux/phy.h>
#define TI_DP83848C_PHY_ID 0x20005ca0
#define TI_DP83620_PHY_ID 0x20005ce0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
#define TI_DP83822_PHY_ID 0x2000a240
@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TI_DP83620_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
{ TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};

View File

@ -113,12 +113,16 @@ static int dp83867_of_init(struct phy_device *phydev)
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (ret)
if (ret &&
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
if (ret)
if (ret &&
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
return of_property_read_u32(of_node, "ti,fifo-depth",

View File

@ -1200,7 +1200,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
if (phydev->supported & SUPPORTED_FIBRE) {
if (phydev->supported & SUPPORTED_FIBRE &&
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
if (err < 0)
goto error;

View File

@ -1384,6 +1384,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
/* Mask prohibited EEE modes */
val &= ~phydev->eee_broken_modes;
phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
return 0;

View File

@ -1145,6 +1145,43 @@ static int genphy_config_advert(struct phy_device *phydev)
return changed;
}
/**
* genphy_config_eee_advert - disable unwanted eee mode advertisement
* @phydev: target phy_device struct
*
* Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy
* efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't
* changed, and 1 if it has changed.
*/
static int genphy_config_eee_advert(struct phy_device *phydev)
{
int broken = phydev->eee_broken_modes;
int old_adv, adv;
/* Nothing to disable */
if (!broken)
return 0;
/* If the following call fails, we assume that EEE is not
* supported by the phy. If we read 0, EEE is not advertised
* In both case, we don't need to continue
*/
adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
if (adv <= 0)
return 0;
old_adv = adv;
adv &= ~broken;
/* Advertising remains unchanged with the broken mask */
if (old_adv == adv)
return 0;
phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv);
return 1;
}
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
* @phydev: target phy_device struct
@ -1203,15 +1240,20 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/
int genphy_config_aneg(struct phy_device *phydev)
{
int result;
int err, changed;
changed = genphy_config_eee_advert(phydev);
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
result = genphy_config_advert(phydev);
if (result < 0) /* error */
return result;
if (result == 0) {
err = genphy_config_advert(phydev);
if (err < 0) /* error */
return err;
changed |= err;
if (changed == 0) {
/* Advertisement hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated?
*/
@ -1221,16 +1263,16 @@ int genphy_config_aneg(struct phy_device *phydev)
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
result = 1; /* do restart aneg */
changed = 1; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before.
*/
if (result > 0)
result = genphy_restart_aneg(phydev);
if (changed > 0)
return genphy_restart_aneg(phydev);
return result;
return 0;
}
EXPORT_SYMBOL(genphy_config_aneg);
@ -1588,6 +1630,33 @@ static void of_set_phy_supported(struct phy_device *phydev)
__set_phy_supported(phydev, max_speed);
}
static void of_set_phy_eee_broken(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
u32 broken = 0;
if (!IS_ENABLED(CONFIG_OF_MDIO))
return;
if (!node)
return;
if (of_property_read_bool(node, "eee-broken-100tx"))
broken |= MDIO_EEE_100TX;
if (of_property_read_bool(node, "eee-broken-1000t"))
broken |= MDIO_EEE_1000T;
if (of_property_read_bool(node, "eee-broken-10gt"))
broken |= MDIO_EEE_10GT;
if (of_property_read_bool(node, "eee-broken-1000kx"))
broken |= MDIO_EEE_1000KX;
if (of_property_read_bool(node, "eee-broken-10gkx4"))
broken |= MDIO_EEE_10GKX4;
if (of_property_read_bool(node, "eee-broken-10gkr"))
broken |= MDIO_EEE_10GKR;
phydev->eee_broken_modes = broken;
}
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@ -1625,6 +1694,11 @@ static int phy_probe(struct device *dev)
of_set_phy_supported(phydev);
phydev->advertising = phydev->supported;
/* Get the EEE modes we want to prohibit. We will ask
* the PHY stop advertising these mode later on
*/
of_set_phy_eee_broken(phydev);
/* Set the state to READY by default */
phydev->state = PHY_READY;

View File

@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 1, 64)
/* With mergeable buffers we align buffer address and use the low bits to
* encode its true size. Buffer size is up to 1 page so we need to align to
* square root of page size to ensure we reserve enough bits to encode the true
* size.
*/
#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
/* Minimum alignment for mergeable packet buffers. */
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"

View File

@ -36,12 +36,14 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
#include <net/netns/generic.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
static bool add_fib_rules = true;
static unsigned int vrf_net_id;
struct net_vrf {
struct rtable __rcu *rth;
@ -1237,6 +1239,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
bool *add_fib_rules;
struct net *net;
int err;
if (!data || !data[IFLA_VRF_TABLE])
@ -1252,13 +1256,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto out;
if (add_fib_rules) {
net = dev_net(dev);
add_fib_rules = net_generic(net, vrf_net_id);
if (*add_fib_rules) {
err = vrf_add_fib_rules(dev);
if (err) {
unregister_netdevice(dev);
goto out;
}
add_fib_rules = false;
*add_fib_rules = false;
}
out:
@ -1341,16 +1347,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
.notifier_call = vrf_device_event,
};
/* Initialize per network namespace state */
static int __net_init vrf_netns_init(struct net *net)
{
bool *add_fib_rules = net_generic(net, vrf_net_id);
*add_fib_rules = true;
return 0;
}
static struct pernet_operations vrf_net_ops __net_initdata = {
.init = vrf_netns_init,
.id = &vrf_net_id,
.size = sizeof(bool),
};
static int __init vrf_init_module(void)
{
int rc;
register_netdevice_notifier(&vrf_notifier_block);
rc = rtnl_link_register(&vrf_link_ops);
rc = register_pernet_subsys(&vrf_net_ops);
if (rc < 0)
goto error;
rc = rtnl_link_register(&vrf_link_ops);
if (rc < 0) {
unregister_pernet_subsys(&vrf_net_ops);
goto error;
}
return 0;
error:

View File

@ -2285,7 +2285,7 @@ static void vxlan_cleanup(unsigned long arg)
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
if (f->state & NUD_PERMANENT)
if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;

View File

@ -5913,7 +5913,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 i, j;
u32 total;
u32 chaninfo;
u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@ -5961,33 +5960,36 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
ch.bw == BRCMU_CHAN_BW_80)
continue;
channel = band->channels;
index = band->n_channels;
channel = NULL;
for (j = 0; j < band->n_channels; j++) {
if (channel[j].hw_value == ch.control_ch_num) {
index = j;
if (band->channels[j].hw_value == ch.control_ch_num) {
channel = &band->channels[j];
break;
}
}
channel[index].center_freq =
ieee80211_channel_to_frequency(ch.control_ch_num,
band->band);
channel[index].hw_value = ch.control_ch_num;
if (!channel) {
/* It seems firmware supports some channel we never
* considered. Something new in IEEE standard?
*/
brcmf_err("Ignoring unexpected firmware channel %d\n",
ch.control_ch_num);
continue;
}
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
*/
if (ch.bw == BRCMU_CHAN_BW_80) {
channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
channel->flags &= ~IEEE80211_CHAN_NO_80MHZ;
} else if (ch.bw == BRCMU_CHAN_BW_40) {
brcmf_update_bw40_channel_flag(&channel[index], &ch);
brcmf_update_bw40_channel_flag(channel, &ch);
} else {
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
* for subsequent chanspecs.
*/
channel[index].flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ;
channel->flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;
@ -5995,11 +5997,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
&chaninfo);
if (!err) {
if (chaninfo & WL_CHAN_RADAR)
channel[index].flags |=
channel->flags |=
(IEEE80211_CHAN_RADAR |
IEEE80211_CHAN_NO_IR);
if (chaninfo & WL_CHAN_PASSIVE)
channel[index].flags |=
channel->flags |=
IEEE80211_CHAN_NO_IR;
}
}

View File

@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
return;
IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
thermal_zone_device_unregister(mvm->tz_device.tzone);
mvm->tz_device.tzone = NULL;
if (mvm->tz_device.tzone) {
thermal_zone_device_unregister(mvm->tz_device.tzone);
mvm->tz_device.tzone = NULL;
}
}
static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
return;
IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
mvm->cooling_dev.cdev = NULL;
if (mvm->cooling_dev.cdev) {
thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
mvm->cooling_dev.cdev = NULL;
}
}
#endif /* CONFIG_THERMAL */

View File

@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_packets = 0;
unsigned int index;
spin_lock(&vif->lock);
if (vif->queues == NULL)
goto out;
/* Aggregate tx and rx stats from each queue */
for (index = 0; index < num_queues; ++index) {
for (index = 0; index < vif->num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
}
out:
spin_unlock(&vif->lock);
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
vif->dev->stats.tx_bytes = tx_bytes;

View File

@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
unsigned int queue_index;
xen_unregister_watchers(be->vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(be->vif);
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
xenvif_deinit_queue(&be->vif->queues[queue_index]);
spin_lock(&be->vif->lock);
vfree(be->vif->queues);
be->vif->num_queues = 0;
be->vif->queues = NULL;
spin_unlock(&be->vif->lock);
xenvif_disconnect_ctrl(be->vif);
}
}
@ -1040,6 +1051,8 @@ static void connect(struct backend_info *be)
err:
if (be->vif->num_queues > 0)
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
xenvif_deinit_queue(&be->vif->queues[queue_index]);
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;

View File

@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}

View File

@ -87,7 +87,9 @@ static int read_pmem(struct page *page, unsigned int off,
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
kunmap_atomic(mem);
return rc;
if (rc)
return -EIO;
return 0;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,

View File

@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
return 0;
}
static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
{
u32 value;
value = readl(padcfg0);
if (input) {
value &= ~PADCFG0_GPIORXDIS;
value |= PADCFG0_GPIOTXDIS;
} else {
value &= ~PADCFG0_GPIOTXDIS;
value |= PADCFG0_GPIORXDIS;
}
writel(value, padcfg0);
}
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
/* Disable TX buffer and enable RX (this will be input) */
value &= ~PADCFG0_GPIORXDIS;
value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
/* Disable TX buffer and enable RX (this will be input) */
__intel_gpio_set_direction(padcfg0, true);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
value = readl(padcfg0);
if (input)
value |= PADCFG0_GPIOTXDIS;
else
value &= ~PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
__intel_gpio_set_direction(padcfg0, input);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);

View File

@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 8:
case 7:
case 6:
case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:

View File

@ -156,19 +156,19 @@ static struct tps65086_regulator regulators[] = {
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)),
TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)),
TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
};
static int tps65086_of_parse_cb(struct device_node *dev,
static int tps65086_of_parse_cb(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
int ret;
/* Check for 25mV step mode */
if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
switch (desc->id) {
case BUCK1:
case BUCK2:
@ -192,7 +192,7 @@ static int tps65086_of_parse_cb(struct device_node *dev,
}
/* Check for decay mode */
if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) {
if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
ret = regmap_write_bits(config->regmap,
regulators[desc->id].decay_reg,
regulators[desc->id].decay_mask,

View File

@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;

View File

@ -5951,18 +5951,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}

View File

@ -1555,7 +1555,8 @@ typedef struct {
struct atio {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t data[58];
__le16 attr_n_length;
uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
};

Some files were not shown because too many files have changed in this diff Show More