Merge android-4.9.165 (72b54df) into msm-4.9

* refs/heads/tmp-72b54df:
  Linux 4.9.165
  KVM: X86: Fix residual mmio emulation request to userspace
  KVM: nVMX: Ignore limit checks on VMX instructions using flat segments
  KVM: nVMX: Sign extend displacements of VMX instr's mem operands
  drm/radeon/evergreen_cs: fix missing break in switch statement
  media: uvcvideo: Avoid NULL pointer dereference at the end of streaming
  rcu: Do RCU GP kthread self-wakeup from softirq and interrupt
  md: Fix failed allocation of md_register_thread
  perf intel-pt: Fix divide by zero when TSC is not available
  perf intel-pt: Fix overlap calculation for padding
  perf auxtrace: Define auxtrace record alignment
  perf intel-pt: Fix CYC timestamp calculation after OVF
  bcache: never writeback a discard operation
  PM / wakeup: Rework wakeup source timer cancellation
  nfsd: fix wrong check in write_v4_end_grace()
  nfsd: fix memory corruption caused by readdir
  NFS: Don't recoalesce on error in nfs_pageio_complete_mirror()
  NFS: Fix an I/O request leakage in nfs_do_recoalesce
  NFS: Fix I/O request leakages
  dm: fix to_sector() for 32bit
  ARM: s3c24xx: Fix boolean expressions in osiris_dvs_notify
  powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
  powerpc: Fix 32-bit KVM-PR lockup and host crash with MacOS guest
  powerpc/83xx: Also save/restore SPRG4-7 during suspend
  powerpc/powernv: Make opal log only readable by root
  powerpc/wii: properly disable use of BATs when requested.
  powerpc/32: Clear on-stack exception marker upon exception return
  jbd2: fix compile warning when using JBUFFER_TRACE
  jbd2: clear dirty flag when revoking a buffer from an older transaction
  serial: 8250_pci: Have ACCES cards that use the four port Pericom PI7C9X7954 chip use the pci_pericom_setup()
  serial: 8250_pci: Fix number of ports for ACCES serial cards
  8250: FIX Fourth port offset of Pericom PI7C9X7954 boards
  serial: 8250_of: assume reg-shift of 2 for mrvl,mmp-uart
  serial: uartps: Fix stuck ISR if RX disabled with non-empty FIFO
  drm/i915: Relax mmap VMA check
  i2c: tegra: fix maximum transfer size
  parport_pc: fix find_superio io compare code, should use equal test.
  intel_th: Don't reference unassigned outputs
  device property: Fix the length used in PROPERTY_ENTRY_STRING()
  kernel/sysctl.c: add missing range check in do_proc_dointvec_minmax_conv
  mm/vmalloc: fix size check for remap_vmalloc_range_partial()
  mm: hwpoison: fix thp split handing in soft_offline_in_use_page()
  nfit: acpi_nfit_ctl(): Check out_obj->type in the right place
  clk: ingenic: Fix doc of ingenic_cgu_div_info
  clk: ingenic: Fix round_rate misbehaving with non-integer dividers
  clk: clk-twl6040: Fix imprecise external abort for pdmclk
  ext2: Fix underflow in ext2_max_size()
  ext4: fix crash during online resizing
  cpufreq: pxa2xx: remove incorrect __init annotation
  cpufreq: tegra124: add missing of_node_put()
  libertas_tf: don't set URB_ZERO_PACKET on IN USB transfer
  crypto: pcbc - remove bogus memcpy()s with src == dest
  Btrfs: fix corruption reading shared and compressed extents after hole punching
  btrfs: ensure that a DUP or RAID1 block group has exactly two stripes
  m68k: Add -ffreestanding to CFLAGS
  splice: don't merge into linked buffers
  fs/devpts: always delete dcache dentry-s in dput()
  scsi: target/iscsi: Avoid iscsit_release_commands_from_conn() deadlock
  scsi: sd: Optimal I/O size should be a multiple of physical block size
  scsi: virtio_scsi: don't send sc payload with tmfs
  s390/virtio: handle find on invalid queue gracefully
  clocksource/drivers/exynos_mct: Clear timer interrupt when shutdown
  clocksource/drivers/exynos_mct: Move one-shot check from tick clear to ISR
  regulator: s2mpa01: Fix step values for some LDOs
  regulator: s2mps11: Fix steps for buck7, buck8 and LDO35
  spi: pxa2xx: Setup maximum supported DMA transfer length
  spi: ti-qspi: Fix mmap read when more than one CS in use
  ACPI / device_sysfs: Avoid OF modalias creation for removed device
  tracing: Do not free iter->trace in fail path of tracing_open_pipe()
  tracing: Use strncpy instead of memcpy for string keys in hist triggers
  CIFS: Fix read after write for files with read caching
  CIFS: Do not reset lease state to NONE on lease break
  crypto: arm64/aes-ccm - fix logical bug in AAD MAC handling
  crypto: hash - set CRYPTO_TFM_NEED_KEY if ->setkey() fails
  libnvdimm: Fix altmap reservation size calculation
  libnvdimm/pmem: Honor force_raw for legacy pmem regions
  libnvdimm/label: Clear 'updating' flag after label-set update
  stm class: Prevent division by zero
  tmpfs: fix uninitialized return value in shmem_link
  net: set static variable an initial value in atl2_probe()
  nfp: bpf: fix ALU32 high bits clearance bug
  nfp: bpf: fix code-gen bug on BPF_ALU | BPF_XOR | BPF_K
  net: thunderx: make CFG_DONE message to run through generic send-ack sequence
  mac80211_hwsim: propagate genlmsg_reply return code
  phonet: fix building with clang
  ARC: uacces: remove lp_start, lp_end from clobber list
  ARCv2: lib: memcpy: fix doing prefetchw outside of buffer
  tmpfs: fix link accounting when a tmpfile is linked in
  net: marvell: mvneta: fix DMA debug warning
  arm64: Relax GIC version check during early boot
  ASoC: topology: free created components in tplg load error
  net: mv643xx_eth: disable clk on error path in mv643xx_eth_shared_probe()
  qmi_wwan: apply SET_DTR quirk to Sierra WP7607
  pinctrl: meson: meson8b: fix the sdxc_a data 1..3 pins
  net: systemport: Fix reception of BPDUs
  scsi: libiscsi: Fix race between iscsi_xmit_task and iscsi_complete_task
  assoc_array: Fix shortcut creation
  ARM: 8824/1: fix a migrating irq bug when hotplug cpu
  clk: sunxi: A31: Fix wrong AHB gate number
  Input: st-keyscan - fix potential zalloc NULL dereference
  i2c: cadence: Fix the hold bit setting
  net: hns: Fix object reference leaks in hns_dsaf_roce_reset()
  mm: page_alloc: fix ref bias in page_frag_alloc() for 1-byte allocs
  mm/gup: fix gup_pmd_range() for dax
  floppy: check_events callback should not return a negative number
  Input: matrix_keypad - use flush_delayed_work()
  Input: cap11xx - switch to using set_brightness_blocking()
  ARM: OMAP2+: Variable "reg" in function omap4_dsi_mux_pads() could be uninitialized
  s390/dasd: fix using offset into zero size array error
  gpu: ipu-v3: Fix CSI offsets for imx53
  gpu: ipu-v3: Fix i.MX51 CSI control registers offset
  crypto: ahash - fix another early termination in hash walk
  crypto: caam - fixed handling of sg list
  stm class: Fix an endless loop in channel allocation
  iio: adc: exynos-adc: Fix NULL pointer exception on unbind
  ASoC: fsl_esai: fix register setting issue in RIGHT_J mode
  9p/net: fix memory leak in p9_client_create
  9p: use inode->i_lock to protect i_size_write() under 32-bit
  media: videobuf2-v4l2: drop WARN_ON in vb2_warn_zero_bytesused()
  FROMLIST: psi: introduce psi monitor
  FROMLIST: refactor header includes to allow kthread.h inclusion in psi_types.h
  FROMLIST: psi: track changed states
  FROMLIST: psi: split update_stats into parts
  FROMLIST: psi: rename psi fields in preparation for psi trigger addition
  FROMLIST: psi: make psi_enable static
  FROMLIST: psi: introduce state_mask to represent stalled psi states
  ANDROID: cuttlefish_defconfig: Enable CONFIG_PSI
  BACKPORT: kernel: cgroup: add poll file operation
  BACKPORT: fs: kernfs: add poll file operation
  UPSTREAM: psi: avoid divide-by-zero crash inside virtual machines
  UPSTREAM: psi: clarify the Kconfig text for the default-disable option
  UPSTREAM: psi: fix aggregation idle shut-off
  UPSTREAM: cgroup add cftype->open/release() callbacks
  UPSTREAM: kernfs: Check KERNFS_HAS_RELEASE before calling kernfs_release_file()
  UPSTREAM: kernfs: fix locking around kernfs_ops->release() callback
  UPSTREAM: kernfs: add kernfs_ops->open/release() callbacks
  UPSTREAM: psi: fix reference to kernel commandline enable
  BACKPORT: psi: make disabling/enabling easier for vendor kernels
  UPSTREAM: kernel/sched/psi.c: simplify cgroup_move_task()
  BACKPORT: psi: cgroup support
  UPSTREAM: psi: pressure stall information for CPU, memory, and IO
  BACKPORT: sched: introduce this_rq_lock_irq()
  BACKPORT: sched: sched.h: make rq locking and clock functions available in stats.h
  UPSTREAM: sched: loadavg: make calc_load_n() public
  UPSTREAM: sched: loadavg: consolidate LOAD_INT, LOAD_FRAC, CALC_LOAD
  BACKPORT: delayacct: track delays from thrashing cache pages
  BACKPORT: mm: workingset: tell cache transitions from workingset thrashing
  BACKPORT: cgroup: misc changes
  UPSTREAM: sched/headers: Remove <linux/sched.h> from <linux/sched/loadavg.h>
  UPSTREAM: sched/headers: Move loadavg related definitions from <linux/sched.h> to <linux/sched/loadavg.h>
  UPSTREAM: sched/headers, delayacct: Move the 'struct task_delay_info' definition from <linux/sched.h> to <linux/delayacct.h>
  UPSTREAM: sched/headers: Prepare for new header dependencies before moving code to <linux/sched/loadavg.h>
  BACKPORT: sched/core: Add wrappers for lockdep_(un)pin_lock()
  UPSTREAM: Avoid page waitqueue race leaving possible page locker waiting
  UPSTREAM: mm: add PageWaiters indicating tasks are waiting for a page bit
  UPSTREAM: workqueue: make workqueue available early during boot
  ANDROID: ion_dummy_driver: Remove SYSTEM_CONTIG heap
  ANDROID: ion_dummy_driver: Rework ion_dummy_driver to avoid direct indexing into the heaps
  ANDROID: ion_dummy_driver: Use IS_ERR_OR_NULL() before destroying heaps
  ANDROID: sched/fair: fix energy compute when a cluster is only a cpu core in multi-cluster system

Conflicts:
	arch/arm/kernel/irq.c
	drivers/scsi/sd.c
	include/linux/cgroup.h
	include/linux/sched.h
	kernel/sched/Makefile
	kernel/sched/core.c
	mm/page_alloc.c

Change-Id: I387345d8f6d2145e2456fbead266c897475dfb4c
Signed-off-by: jianzhou <jianzhou@codeaurora.org>
This commit is contained in:
jianzhou 2019-04-28 13:43:53 +08:00
commit b82a5df2dc
192 changed files with 3960 additions and 866 deletions

View File

@ -0,0 +1,180 @@
================================
PSI - Pressure Stall Information
================================
:Date: April, 2018
:Author: Johannes Weiner <hannes@cmpxchg.org>
When CPU, memory or IO devices are contended, workloads experience
latency spikes, throughput losses, and run the risk of OOM kills.
Without an accurate measure of such contention, users are forced to
either play it safe and under-utilize their hardware resources, or
roll the dice and frequently suffer the disruptions resulting from
excessive overcommit.
The psi feature identifies and quantifies the disruptions caused by
such resource crunches and the time impact it has on complex workloads
or even entire systems.
Having an accurate measure of productivity losses caused by resource
scarcity aids users in sizing workloads to hardware--or provisioning
hardware according to workload demand.
As psi aggregates this information in realtime, systems can be managed
dynamically using techniques such as load shedding, migrating jobs to
other systems or data centers, or strategically pausing or killing low
priority or restartable batch jobs.
This allows maximizing hardware utilization without sacrificing
workload health or risking major disruptions such as OOM kills.
Pressure interface
==================
Pressure information for each resource is exported through the
respective file in /proc/pressure/ -- cpu, memory, and io.
The format for CPU is as such:
some avg10=0.00 avg60=0.00 avg300=0.00 total=0
and for memory and IO:
some avg10=0.00 avg60=0.00 avg300=0.00 total=0
full avg10=0.00 avg60=0.00 avg300=0.00 total=0
The "some" line indicates the share of time in which at least some
tasks are stalled on a given resource.
The "full" line indicates the share of time in which all non-idle
tasks are stalled on a given resource simultaneously. In this state
actual CPU cycles are going to waste, and a workload that spends
extended time in this state is considered to be thrashing. This has
severe impact on performance, and it's useful to distinguish this
situation from a state where some tasks are stalled but the CPU is
still doing productive work. As such, time spent in this subset of the
stall state is tracked separately and exported in the "full" averages.
The ratios are tracked as recent trends over ten, sixty, and three
hundred second windows, which gives insight into short term events as
well as medium and long term trends. The total absolute stall time is
tracked and exported as well, to allow detection of latency spikes
which wouldn't necessarily make a dent in the time averages, or to
average trends over custom time frames.
Monitoring for pressure thresholds
==================================
Users can register triggers and use poll() to be woken up when resource
pressure exceeds certain thresholds.
A trigger describes the maximum cumulative stall time over a specific
time window, e.g. 100ms of total stall time within any 500ms window to
generate a wakeup event.
To register a trigger user has to open psi interface file under
/proc/pressure/ representing the resource to be monitored and write the
desired threshold and time window. The open file descriptor should be
used to wait for trigger events using select(), poll() or epoll().
The following format is used:
<some|full> <stall amount in us> <time window in us>
For example writing "some 150000 1000000" into /proc/pressure/memory
would add 150ms threshold for partial memory stall measured within
1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
would add 50ms threshold for full io stall measured within 1sec time window.
Triggers can be set on more than one psi metric and more than one trigger
for the same psi metric can be specified. However for each trigger a separate
file descriptor is required to be able to poll it separately from others,
therefore for each trigger a separate open() syscall should be made even
when opening the same psi interface file.
Monitors activate only when system enters stall state for the monitored
psi metric and deactivates upon exit from the stall state. While system is
in the stall state psi signal growth is monitored at a rate of 10 times per
tracking window.
The kernel accepts window sizes ranging from 500ms to 10s, therefore min
monitoring update interval is 50ms and max is 1s. Min limit is set to
prevent overly frequent polling. Max limit is chosen as a high enough number
after which monitors are most likely not needed and psi averages can be used
instead.
When activated, psi monitor stays active for at least the duration of one
tracking window to avoid repeated activations/deactivations when system is
bouncing in and out of the stall state.
Notifications to the userspace are rate-limited to one per tracking window.
The trigger will de-register when the file descriptor used to define the
trigger is closed.
Userspace monitor usage example
===============================
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <poll.h>
#include <string.h>
#include <unistd.h>
/*
* Monitor memory partial stall with 1s tracking window size
* and 150ms threshold.
*/
int main() {
const char trig[] = "some 150000 1000000";
struct pollfd fds;
int n;
fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
if (fds.fd < 0) {
printf("/proc/pressure/memory open error: %s\n",
strerror(errno));
return 1;
}
fds.events = POLLPRI;
if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
printf("/proc/pressure/memory write error: %s\n",
strerror(errno));
return 1;
}
printf("waiting for events...\n");
while (1) {
n = poll(&fds, 1, -1);
if (n < 0) {
printf("poll error: %s\n", strerror(errno));
return 1;
}
if (fds.revents & POLLERR) {
printf("got POLLERR, event source is gone\n");
return 0;
}
if (fds.revents & POLLPRI) {
printf("event triggered!\n");
} else {
printf("unknown event received: 0x%x\n", fds.revents);
return 1;
}
}
return 0;
}
Cgroup2 interface
=================
In a system with a CONFIG_CGROUP=y kernel and the cgroup2 filesystem
mounted, pressure stall information is also tracked for tasks grouped
into cgroups. Each subdirectory in the cgroupfs mountpoint contains
cpu.pressure, memory.pressure, and io.pressure files; the format is
the same as the /proc/pressure/ files.
Per-cgroup psi monitors can be specified and used the same way as
system-wide ones.

View File

@ -717,6 +717,12 @@ All time durations are in microseconds.
$PERIOD duration. If only one number is written, $MAX is
updated.
cpu.pressure
A read-only nested-key file which exists on non-root cgroups.
Shows pressure stall information for CPU. See
Documentation/accounting/psi.txt for details.
5-2. Memory
@ -925,6 +931,12 @@ PAGE_SIZE multiple when read back.
Swap usage hard limit. If a cgroup's swap usage reaches this
limit, anonymous meomry of the cgroup will not be swapped out.
memory.pressure
A read-only nested-key file which exists on non-root cgroups.
Shows pressure stall information for memory. See
Documentation/accounting/psi.txt for details.
5-2-2. Usage Guidelines
@ -1055,6 +1067,12 @@ blk-mq devices.
8:16 rbps=2097152 wbps=max riops=max wiops=max
io.pressure
A read-only nested-key file which exists on non-root cgroups.
Shows pressure stall information for IO. See
Documentation/accounting/psi.txt for details.
5-3-2. Writeback

View File

@ -3409,6 +3409,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
before loading.
See Documentation/blockdev/ramdisk.txt.
psi= [KNL] Enable or disable pressure stall information
tracking.
Format: <bool>
psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
probe for; one of (bare|imps|exps|lifebook|any).
psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports

View File

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 164
SUBLEVEL = 165
EXTRAVERSION =
NAME = Roaring Lionus

View File

@ -209,7 +209,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return n;
}
@ -438,7 +438,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return n;
}
@ -658,7 +658,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return res;
}
@ -691,7 +691,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count)
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return res;
}

View File

@ -25,15 +25,11 @@
#endif
#ifdef CONFIG_ARC_HAS_LL64
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
# define PREFETCH_READ(RX) prefetch [RX, 28]
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
@ -41,8 +37,6 @@
#endif
ENTRY_CFI(memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy32_64bytes
;; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_1
;; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_2
;; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_3
;; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5

View File

@ -1502,6 +1502,7 @@ config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
select GENERIC_IRQ_MIGRATION
depends on SMP
select GENERIC_IRQ_MIGRATION
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.

View File

@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);

1
arch/arm/kernel/irq.c Normal file → Executable file
View File

@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>

View File

@ -254,7 +254,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU

View File

@ -115,6 +115,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
u32 reg;
int ret;
if (dsi_id == 0) {
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@ -130,7 +131,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return -ENODEV;
}
regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
ret = regmap_read(omap4_dsi_mux_syscon,
OMAP4_DSIPHY_SYSCON_OFFSET,
&reg);
if (ret)
return ret;
reg &= ~enable_mask;
reg &= ~pipd_mask;

View File

@ -70,16 +70,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
switch (val) {
case CPUFREQ_PRECHANGE:
if (old_dvs & !new_dvs ||
cur_dvs & !new_dvs) {
if ((old_dvs && !new_dvs) ||
(cur_dvs && !new_dvs)) {
pr_debug("%s: exiting dvs\n", __func__);
cur_dvs = false;
gpio_set_value(OSIRIS_GPIO_DVS, 1);
}
break;
case CPUFREQ_POSTCHANGE:
if (!old_dvs & new_dvs ||
!cur_dvs & new_dvs) {
if ((!old_dvs && new_dvs) ||
(!cur_dvs && new_dvs)) {
pr_debug("entering dvs\n");
cur_dvs = true;
gpio_set_value(OSIRIS_GPIO_DVS, 0);

View File

@ -6,6 +6,7 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUP_FREEZER=y

View File

@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
8: mov w7, w8
8: cbz w8, 91f
mov w7, w8
add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
eor v0.16b, v0.16b, v1.16b
91: eor v0.16b, v0.16b, v1.16b
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret

View File

@ -535,8 +535,7 @@ set_hcr:
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #24, #4
cmp x0, #1
b.ne 3f
cbz x0, 3f
mrs_s x0, ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1

View File

@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
KBUILD_AFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += $(cpuflags-y) -pipe
KBUILD_CFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += -pipe -ffreestanding
ifdef CONFIG_MMU
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2

View File

@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>

View File

@ -9,6 +9,7 @@
*/
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/io.h>
#include <asm/setup.h>

View File

@ -698,6 +698,9 @@ fast_exception_return:
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r11)
REST_GPR(10, r11)
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
@ -932,6 +935,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtcrf 0xFF,r10
mtlr r11
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r1)
/*
* Once we put values in SRR0 and SRR1, we are in a state
* where exceptions are not recoverable, since taking an
@ -969,6 +975,9 @@ exc_exit_restart_end:
mtlr r11
lwz r10,_CCR(r1)
mtcrf 0xff,r10
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r1)
REST_2GPRS(9, r1)
.globl exc_exit_restart
exc_exit_restart:

View File

@ -153,7 +153,7 @@ void __giveup_fpu(struct task_struct *tsk)
save_fpu(tsk);
msr = tsk->thread.regs->msr;
msr &= ~MSR_FP;
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;

View File

@ -547,6 +547,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
/*
* Copy out only the low-order word of vrsave.
*/
int start, end;
union {
elf_vrreg_t reg;
u32 word;
@ -555,8 +556,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
vrsave.word = target->thread.vrsave;
start = 33 * sizeof(vector128);
end = start + sizeof(vrsave);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
start, end);
}
return ret;
@ -594,6 +597,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
/*
* We use only the first word of vrsave.
*/
int start, end;
union {
elf_vrreg_t reg;
u32 word;
@ -602,8 +606,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
vrsave.word = target->thread.vrsave;
start = 33 * sizeof(vector128);
end = start + sizeof(vrsave);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
start, end);
if (!ret)
target->thread.vrsave = vrsave.word;
}

View File

@ -26,13 +26,13 @@
#define SS_MSR 0x74
#define SS_SDR1 0x78
#define SS_LR 0x7c
#define SS_SPRG 0x80 /* 4 SPRGs */
#define SS_DBAT 0x90 /* 8 DBATs */
#define SS_IBAT 0xd0 /* 8 IBATs */
#define SS_TB 0x110
#define SS_CR 0x118
#define SS_GPREG 0x11c /* r12-r31 */
#define STATE_SAVE_SIZE 0x16c
#define SS_SPRG 0x80 /* 8 SPRGs */
#define SS_DBAT 0xa0 /* 8 DBATs */
#define SS_IBAT 0xe0 /* 8 IBATs */
#define SS_TB 0x120
#define SS_CR 0x128
#define SS_GPREG 0x12c /* r12-r31 */
#define STATE_SAVE_SIZE 0x17c
.section .data
.align 5
@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
stw r7, SS_SPRG+12(r3)
stw r8, SS_SDR1(r3)
mfspr r4, SPRN_SPRG4
mfspr r5, SPRN_SPRG5
mfspr r6, SPRN_SPRG6
mfspr r7, SPRN_SPRG7
stw r4, SS_SPRG+16(r3)
stw r5, SS_SPRG+20(r3)
stw r6, SS_SPRG+24(r3)
stw r7, SS_SPRG+28(r3)
mfspr r4, SPRN_DBAT0U
mfspr r5, SPRN_DBAT0L
mfspr r6, SPRN_DBAT1U
@ -493,6 +503,16 @@ mpc83xx_deep_resume:
mtspr SPRN_IBAT7U, r6
mtspr SPRN_IBAT7L, r7
lwz r4, SS_SPRG+16(r3)
lwz r5, SS_SPRG+20(r3)
lwz r6, SS_SPRG+24(r3)
lwz r7, SS_SPRG+28(r3)
mtspr SPRN_SPRG4, r4
mtspr SPRN_SPRG5, r5
mtspr SPRN_SPRG6, r6
mtspr SPRN_SPRG7, r7
lwz r4, SS_SPRG+0(r3)
lwz r5, SS_SPRG+4(r3)
lwz r6, SS_SPRG+8(r3)

View File

@ -22,6 +22,7 @@
#include <linux/cpufreq.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
@ -48,7 +49,7 @@ static int calc_freq(struct spu_gov_info_struct *info)
cpu = info->policy->cpu;
busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
cpu, busy_spus, info->busy_spus);

View File

@ -24,6 +24,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/rt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@ -986,9 +987,9 @@ static void spu_calc_load(void)
unsigned long active_tasks; /* fixed-point */
active_tasks = count_active_contexts() * FIXED_1;
CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
}
static void spusched_wake(unsigned long data)
@ -1070,9 +1071,6 @@ void spuctx_switch_state(struct spu_context *ctx,
}
}
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
static int show_spu_loadavg(struct seq_file *s, void *private)
{
int a, b, c;

View File

@ -104,6 +104,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
/* MEM2 64MB@0x10000000 */
delta = wii_hole_start + wii_hole_size;
size = top - delta;
if (__map_without_bats)
return delta;
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > size)
break;

View File

@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
}
static struct bin_attribute opal_msglog_attr = {
.attr = {.name = "msglog", .mode = 0444},
.attr = {.name = "msglog", .mode = 0400},
.read = opal_msglog_read
};

View File

@ -17,15 +17,12 @@
#include <linux/kernel_stat.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <asm/appldata.h>
#include <asm/smp.h>
#include "appldata.h"
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
/*
* OS data
*

View File

@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/slab.h>

View File

@ -8,6 +8,7 @@
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <linux/sched/loadavg.h>
#include <asm/auxio.h>

View File

@ -9,6 +9,7 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y

View File

@ -5965,6 +5965,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
static int handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu->mmio_needed = 0;
return 0;
}
@ -7046,6 +7047,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
off = exit_qualification; /* holds the displacement */
if (addr_size == 1)
off = (gva_t)sign_extend64(off, 31);
else if (addr_size == 0)
off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
@ -7088,10 +7093,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
/* Protected mode: #GP(0)/#SS(0) if the memory
* operand is outside the segment limit.
/*
* Protected mode: #GP(0)/#SS(0) if the memory operand is
* outside the segment limit. All CPUs that support VMX ignore
* limit checks for flat segments, i.e. segments with base==0,
* limit==0xffffffff and of type expand-up data or code.
*/
exn = exn || (off + sizeof(u64) > s.limit);
if (!(s.base == 0 && s.limit == 0xffffffff &&
((s.type & 8) || !(s.type & 4))))
exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,

View File

@ -6769,6 +6769,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu->mmio_needed = 0;
r = 0;
goto out;
}

View File

@ -85,17 +85,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
if (walk->entrylen && (walk->offset & alignmask) && !err) {
unsigned int nbytes;
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(walk->entrylen,
(unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@ -115,7 +115,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
if (nbytes) {
if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
@ -189,6 +189,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
return ret;
}
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static void ahash_set_needkey(struct crypto_ahash *tfm)
{
const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (tfm->setkey != ahash_nosetkey &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@ -200,20 +215,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
else
err = tfm->setkey(tfm, key, keylen);
if (err)
if (unlikely(err)) {
ahash_set_needkey(tfm);
return err;
}
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static inline unsigned int ahash_align_buffer_size(unsigned len,
unsigned long mask)
{
@ -482,8 +493,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
if (alg->setkey) {
hash->setkey = alg->setkey;
if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
ahash_set_needkey(hash);
}
if (alg->export)
hash->export = alg->export;

View File

@ -52,7 +52,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
@ -76,7 +76,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmpbuf[bsize];
do {
@ -89,8 +89,6 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@ -130,7 +128,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
do {
fn(crypto_cipher_tfm(tfm), dst, src);
@ -142,8 +140,6 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@ -156,7 +152,7 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmpbuf[bsize];
do {
@ -169,8 +165,6 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}

View File

@ -52,6 +52,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
return err;
}
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
{
if (crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@ -64,8 +71,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
else
err = shash->setkey(tfm, key, keylen);
if (err)
if (unlikely(err)) {
shash_set_needkey(tfm, shash);
return err;
}
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@ -367,7 +376,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
if (crypto_shash_alg_has_setkey(alg))
crt->setkey = shash_async_setkey;
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
@ -389,9 +399,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
hash->descsize = alg->descsize;
if (crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
shash_set_needkey(hash, alg);
return 0;
}

View File

@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
acpi_status status;
int len, count;
int i, nval;
char *c;
acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
if (ACPI_FAILURE(status))
return -ENODEV;
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);

View File

@ -307,6 +307,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return -EINVAL;
}
if (out_obj->type != ACPI_TYPE_BUFFER) {
dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
dimm_name, cmd_name, out_obj->type);
rc = -EINVAL;
goto out;
}
if (call_pkg) {
call_pkg->nd_fw_size = out_obj->buffer.length;
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@ -325,13 +332,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
if (out_obj->package.type != ACPI_TYPE_BUFFER) {
dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
__func__, dimm_name, cmd_name, out_obj->type);
rc = -EINVAL;
goto out;
}
if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
dimm_name, cmd_name, out_obj->buffer.length);

View File

@ -116,7 +116,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
if (!ws)
return;
del_timer_sync(&ws->timer);
__pm_relax(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_drop);
@ -204,6 +203,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
del_timer_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
*/
ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);

View File

@ -3752,7 +3752,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
if (lock_fdc(drive))
return -EINTR;
return 0;
poll_drive(false, 0);
process_fd_request();
}

View File

@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
return pdmclk->enabled;
}
static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
unsigned int reg)
{
const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
int ret;
ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
if (ret < 0)
return ret;
ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
if (ret < 0)
return ret;
return 0;
}
/*
* TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
* Cold Temperature". This affects cold boot and deeper idle states it
* seems. The workaround consists of resetting HPPLL and LPPLL.
*/
static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
{
int ret;
ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
if (ret)
return ret;
ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
if (ret)
return ret;
return 0;
}
static int twl6040_pdmclk_prepare(struct clk_hw *hw)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
int ret;
ret = twl6040_power(pdmclk->twl6040, 1);
if (!ret)
pdmclk->enabled = 1;
if (ret)
return ret;
ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
if (ret)
goto out_err;
pdmclk->enabled = 1;
return 0;
out_err:
dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
twl6040_power(pdmclk->twl6040, 0);
return ret;
}

View File

@ -364,16 +364,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
long rate = *parent_rate;
unsigned int div = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
if (clk_info->type & CGU_CLK_DIV)
rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
else if (clk_info->type & CGU_CLK_FIXDIV)
rate /= clk_info->fixdiv.div;
div = clk_info->fixdiv.div;
return rate;
return DIV_ROUND_UP(*parent_rate, div);
}
static int
@ -393,7 +393,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
if (clk_info->type & CGU_CLK_DIV) {
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
rate = parent_rate / div;
rate = DIV_ROUND_UP(parent_rate, div);
if (rate != req_rate)
return -EINVAL;

View File

@ -78,7 +78,7 @@ struct ingenic_cgu_mux_info {
* @reg: offset of the divider control register within the CGU
* @shift: number of bits to left shift the divide value by (ie. the index of
* the lowest bit of the divide value within its control register)
* @div: number of bits to divide the divider value by (i.e. if the
* @div: number to divide the divider value by (i.e. if the
* effective divider value is the value written to the register
* multiplied by some constant)
* @bits: the size of the divide value in bits

View File

@ -252,9 +252,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
0x060, BIT(10), 0);
static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
0x060, BIT(12), 0);
0x060, BIT(11), 0);
static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
0x060, BIT(13), 0);
0x060, BIT(12), 0);
static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
0x060, BIT(13), 0);
static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",

View File

@ -388,6 +388,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
{
/* Clear the MCT tick interrupt */
if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
}
static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@ -404,6 +411,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
exynos4_mct_tick_clear(mevt);
return 0;
}
@ -420,8 +428,11 @@ static int set_state_periodic(struct clock_event_device *evt)
return 0;
}
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
struct clock_event_device *evt = &mevt->evt;
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
@ -430,16 +441,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
/* Clear the MCT tick interrupt */
if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
}
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
struct clock_event_device *evt = &mevt->evt;
exynos4_mct_tick_clear(mevt);
evt->event_handler(evt);

View File

@ -192,7 +192,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return ret;
}
static void __init pxa_cpufreq_init_voltages(void)
static void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
@ -208,7 +208,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return 0;
}
static void __init pxa_cpufreq_init_voltages(void) { }
static void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,

View File

@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
of_node_put(np);
return 0;
out_switch_to_pllx:

View File

@ -18,6 +18,7 @@
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/math64.h>
#include <linux/module.h>
@ -130,10 +131,6 @@ struct menu_device {
int interval_ptr;
};
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
static inline int get_loadavg(unsigned long load)
{
return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;

View File

@ -2131,6 +2131,7 @@ static void init_aead_job(struct aead_request *req,
if (unlikely(req->src != req->dst)) {
if (!edesc->dst_nents) {
dst_dma = sg_dma_address(req->dst);
out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *

View File

@ -1600,7 +1600,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
if (vma->vm_file != filp)
return false;
return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
return vma->vm_start == addr &&
(vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
}
/**

View File

@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;

View File

@ -884,8 +884,8 @@ static struct ipu_devtype ipu_type_imx51 = {
.cpmem_ofs = 0x1f000000,
.srm_ofs = 0x1f040000,
.tpm_ofs = 0x1f060000,
.csi0_ofs = 0x1f030000,
.csi1_ofs = 0x1f038000,
.csi0_ofs = 0x1e030000,
.csi1_ofs = 0x1e038000,
.ic_ofs = 0x1e020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
@ -900,8 +900,8 @@ static struct ipu_devtype ipu_type_imx53 = {
.cpmem_ofs = 0x07000000,
.srm_ofs = 0x07040000,
.tpm_ofs = 0x07060000,
.csi0_ofs = 0x07030000,
.csi1_ofs = 0x07038000,
.csi0_ofs = 0x06030000,
.csi1_ofs = 0x06038000,
.ic_ofs = 0x06020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,

View File

@ -599,11 +599,15 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
int master;
spin_lock(&gth->gth_lock);
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
}

View File

@ -253,6 +253,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
;
if (i == width)
return pos;
/* step over [pos..pos+i) to continue search */
pos += i;
}
return -1;
@ -565,7 +568,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
int ret = -EINVAL;
int ret = -EINVAL, wlimit = 1;
u32 size;
if (stmf->output.nr_chans)
@ -593,8 +596,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (id->__reserved_0 || id->__reserved_1)
goto err_free;
if (id->width < 1 ||
id->width > PAGE_SIZE / stm->data->sw_mmiosz)
if (stm->data->sw_mmiosz)
wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
if (id->width < 1 || id->width > wlimit)
goto err_free;
ret = stm_file_assign(stmf, id->id, id->width);

View File

@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */

View File

@ -794,7 +794,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
/* payload size is only 12 bit */
static struct i2c_adapter_quirks tegra_i2c_quirks = {
.max_read_len = 4096,
.max_write_len = 4096,
.max_write_len = 4096 - 12,
};
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {

View File

@ -916,7 +916,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
if (IS_REACHABLE(CONFIG_INPUT)) {
if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
free_irq(info->tsirq, info);
input_unregister_device(info->input);
}

View File

@ -75,9 +75,7 @@
struct cap11xx_led {
struct cap11xx_priv *priv;
struct led_classdev cdev;
struct work_struct work;
u32 reg;
enum led_brightness new_brightness;
};
#endif
@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
}
#ifdef CONFIG_LEDS_CLASS
static void cap11xx_led_work(struct work_struct *work)
{
struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
struct cap11xx_priv *priv = led->priv;
int value = led->new_brightness;
/*
* All LEDs share the same duty cycle as this is a HW limitation.
* Brightness levels per LED are either 0 (OFF) and 1 (ON).
*/
regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
BIT(led->reg), value ? BIT(led->reg) : 0);
}
static void cap11xx_led_set(struct led_classdev *cdev,
enum led_brightness value)
static int cap11xx_led_set(struct led_classdev *cdev,
enum led_brightness value)
{
struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
struct cap11xx_priv *priv = led->priv;
if (led->new_brightness == value)
return;
led->new_brightness = value;
schedule_work(&led->work);
/*
* All LEDs share the same duty cycle as this is a HW
* limitation. Brightness levels per LED are either
* 0 (OFF) and 1 (ON).
*/
return regmap_update_bits(priv->regmap,
CAP11XX_REG_LED_OUTPUT_CONTROL,
BIT(led->reg),
value ? BIT(led->reg) : 0);
}
static int cap11xx_init_leds(struct device *dev,
@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
led->cdev.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
led->cdev.flags = 0;
led->cdev.brightness_set = cap11xx_led_set;
led->cdev.brightness_set_blocking = cap11xx_led_set;
led->cdev.max_brightness = 1;
led->cdev.brightness = LED_OFF;
@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
led->reg = reg;
led->priv = priv;
INIT_WORK(&led->work, cap11xx_led_work);
error = devm_led_classdev_register(dev, &led->cdev);
if (error) {
of_node_put(child);

View File

@ -220,7 +220,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
keypad->stopped = true;
spin_unlock_irq(&keypad->lock);
flush_work(&keypad->work.work);
flush_delayed_work(&keypad->work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
* we should disable them now.

View File

@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
input_dev->id.bustype = BUS_HOST;
keypad_data->input_dev = input_dev;
error = keypad_matrix_key_parse_dt(keypad_data);
if (error)
return error;
@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, keypad_data);
keypad_data->input_dev = input_dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(keypad_data->base))

View File

@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/leds.h>
#include <linux/reboot.h>
#include "../leds.h"

View File

@ -68,6 +68,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
in_use > CUTOFF_WRITEBACK_SYNC)
return false;
if (bio_op(bio) == REQ_OP_DISCARD)
return false;
if (dc->partial_stripes_expensive &&
bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio_sectors(bio)))

View File

@ -3798,6 +3798,8 @@ static int raid10_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
if (!mddev->sync_thread)
goto out_free_conf;
}
return 0;

View File

@ -6977,6 +6977,8 @@ static int raid5_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
if (!mddev->sync_thread)
goto abort;
}
/* Ok, everything is just fine now */

View File

@ -638,6 +638,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
if (!uvc_hw_timestamps_param)
return;
/*
* We will get called from __vb2_queue_cancel() if there are buffers
* done but not dequeued by the user, but the sample array has already
* been released at that time. Just bail out in that case.
*/
if (!clock->samples)
return;
spin_lock_irqsave(&clock->lock, flags);
if (clock->count < clock->size)

View File

@ -146,7 +146,6 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
return;
check_once = true;
WARN_ON(1);
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)

View File

@ -1338,13 +1338,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
static int cards_found;
static int cards_found = 0;
unsigned long mmio_start;
int mmio_len;
int err;
cards_found = 0;
err = pci_enable_device(pdev);
if (err)
return err;

View File

@ -104,6 +104,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
reg = rxchk_readl(priv, RXCHK_CONTROL);
/* Clear L2 header checks, which would prevent BPDUs
* from being received.
*/
reg &= ~RXCHK_L2_HDR_DIS;
if (priv->rx_chk_en)
reg |= RXCHK_EN;
else

View File

@ -999,7 +999,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
nic_enable_vf(nic, vf, true);
goto unlock;
break;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
if (vf >= nic->num_vf_en)

View File

@ -162,6 +162,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
return 1;
}
static void nicvf_send_cfg_done(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
if (nicvf_send_msg_to_pf(nic, &mbx)) {
netdev_err(nic->netdev,
"PF didn't respond to CFG DONE msg\n");
}
}
static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
{
if (bgx->rx)
@ -1178,7 +1189,6 @@ int nicvf_open(struct net_device *netdev)
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
netif_carrier_off(netdev);
@ -1267,8 +1277,7 @@ int nicvf_open(struct net_device *netdev)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
/* Send VF config done msg to PF */
mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
nicvf_write_to_mbx(nic, &mbx);
nicvf_send_cfg_done(nic);
return 0;
cleanup:

View File

@ -2820,6 +2820,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
dsaf_dev = dev_get_drvdata(&pdev->dev);
if (!dsaf_dev) {
dev_err(&pdev->dev, "dsaf_dev is NULL\n");
put_device(&pdev->dev);
return -ENODEV;
}
@ -2827,6 +2828,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
dsaf_dev->ae_dev.name);
put_device(&pdev->dev);
return -ENODEV;
}

View File

@ -2886,7 +2886,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
return ret;
goto err_put_clk;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@ -2894,6 +2894,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
err_put_clk:
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)

View File

@ -2050,7 +2050,7 @@ err_drop_frame:
if (unlikely(!skb))
goto err_drop_frame_ret_pool;
dma_sync_single_range_for_cpu(dev->dev.parent,
dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
rx_desc->buf_phys_addr,
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,

View File

@ -756,15 +756,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum alu_op alu_op, bool skip)
enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
if (skip) {
meta->skip = true;
return 0;
}
wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
@ -1017,7 +1012,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
}
static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@ -1027,7 +1022,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
}
static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@ -1037,7 +1032,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
}
static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@ -1047,7 +1042,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
}
static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@ -1057,7 +1052,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
}
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)

View File

@ -895,8 +895,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
{QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */

View File

@ -3167,7 +3167,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
genlmsg_reply(skb, info);
res = genlmsg_reply(skb, info);
break;
}

View File

@ -433,8 +433,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
skb_tail_pointer(skb),
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
cardp->rx_urb);
ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);

View File

@ -492,7 +492,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos)
int pos, unsigned long flags)
{
u64 cookie = nd_region_interleave_set_cookie(nd_region);
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@ -530,7 +530,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
if (nspm->alt_name)
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
nd_label->flags = __cpu_to_le32(flags);
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
nd_label->position = __cpu_to_le16(pos);
nd_label->isetcookie = __cpu_to_le64(cookie);
@ -922,13 +922,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
int i;
int i, rc;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
int rc, count = 0;
int count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid);
@ -946,7 +946,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
if (rc < 0)
return rc;
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
NSLABEL_FLAG_UPDATING);
if (rc)
return rc;
}
if (size == 0)
return 0;
/* Clear the UPDATING flag per UEFI 2.7 expectations */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
if (rc)
return rc;
}

View File

@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
if (is_nd_pfn(dev) || is_nd_btt(dev))
return false;
if (ndns->force_raw)
return false;
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
IORESOURCE_SYSTEM_RAM,

View File

@ -515,7 +515,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
unsigned long reserve = PHYS_PFN(SZ_8K);
unsigned long reserve = PFN_UP(SZ_8K);
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);

View File

@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
{
int i;
for (i = 0; i < NR_SUPERIOS; i++)
if (superios[i].io != p->base)
if (superios[i].io == p->base)
return &superios[i];
return NULL;
}

View File

@ -662,7 +662,7 @@ static const char * const sd_a_groups[] = {
static const char * const sdxc_a_groups[] = {
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
"sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
"sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
};
static const char * const pcm_a_groups[] = {

View File

@ -68,6 +68,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/tick.h>

View File

@ -303,13 +303,13 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(2, STEP_50_MV),
regulator_desc_ldo(3, STEP_50_MV),
regulator_desc_ldo(4, STEP_50_MV),
regulator_desc_ldo(5, STEP_50_MV),
regulator_desc_ldo(5, STEP_25_MV),
regulator_desc_ldo(6, STEP_25_MV),
regulator_desc_ldo(7, STEP_50_MV),
regulator_desc_ldo(8, STEP_50_MV),
regulator_desc_ldo(9, STEP_50_MV),
regulator_desc_ldo(10, STEP_50_MV),
regulator_desc_ldo(11, STEP_25_MV),
regulator_desc_ldo(11, STEP_50_MV),
regulator_desc_ldo(12, STEP_50_MV),
regulator_desc_ldo(13, STEP_50_MV),
regulator_desc_ldo(14, STEP_50_MV),
@ -320,11 +320,11 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(19, STEP_50_MV),
regulator_desc_ldo(20, STEP_50_MV),
regulator_desc_ldo(21, STEP_50_MV),
regulator_desc_ldo(22, STEP_25_MV),
regulator_desc_ldo(23, STEP_25_MV),
regulator_desc_ldo(22, STEP_50_MV),
regulator_desc_ldo(23, STEP_50_MV),
regulator_desc_ldo(24, STEP_50_MV),
regulator_desc_ldo(25, STEP_50_MV),
regulator_desc_ldo(26, STEP_50_MV),
regulator_desc_ldo(26, STEP_25_MV),
regulator_desc_buck1_4(1),
regulator_desc_buck1_4(2),
regulator_desc_buck1_4(3),

View File

@ -376,7 +376,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(32, STEP_50_MV),
regulator_desc_s2mps11_ldo(33, STEP_50_MV),
regulator_desc_s2mps11_ldo(34, STEP_50_MV),
regulator_desc_s2mps11_ldo(35, STEP_50_MV),
regulator_desc_s2mps11_ldo(35, STEP_25_MV),
regulator_desc_s2mps11_ldo(36, STEP_50_MV),
regulator_desc_s2mps11_ldo(37, STEP_50_MV),
regulator_desc_s2mps11_ldo(38, STEP_50_MV),
@ -386,8 +386,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};

View File

@ -4508,6 +4508,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
usrparm.psf_data &= 0x7fffffffULL;
usrparm.rssd_result &= 0x7fffffffULL;
}
/* at least 2 bytes are accessed and should be allocated */
if (usrparm.psf_data_len < 2) {
DBF_DEV_EVENT(DBF_WARNING, device,
"Symmetrix ioctl invalid data length %d",
usrparm.psf_data_len);
rc = -EINVAL;
goto out;
}
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);

View File

@ -283,6 +283,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
{
struct virtio_ccw_vq_info *info;
if (!vcdev->airq_info)
return;
list_for_each_entry(info, &vcdev->virtqueues, node)
drop_airq_indicator(info->vq, vcdev->airq_info);
}
@ -424,7 +426,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
return vcdev->config_block->num;
return vcdev->config_block->num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)

View File

@ -1448,7 +1448,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
return -ENODATA;
spin_lock_bh(&conn->session->back_lock);
if (conn->task == NULL) {
spin_unlock_bh(&conn->session->back_lock);
return -ENODATA;
}
__iscsi_get_task(task);
spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);

63
drivers/scsi/sd.c Normal file → Executable file
View File

@ -2737,6 +2737,55 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->ws10 = 1;
}
/*
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, not a
* multiple of the physical block size, or simply garbage.
*/
static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
unsigned int dev_max)
{
struct scsi_device *sdp = sdkp->device;
unsigned int opt_xfer_bytes =
sdkp->opt_xfer_blocks * sdp->sector_size;
if (sdkp->opt_xfer_blocks > dev_max) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u logical blocks " \
"> dev_max (%u logical blocks)\n",
sdkp->opt_xfer_blocks, dev_max);
return false;
}
if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u logical blocks " \
"> sd driver limit (%u logical blocks)\n",
sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
return false;
}
if (opt_xfer_bytes < PAGE_SIZE) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u bytes < " \
"PAGE_SIZE (%u bytes)\n",
opt_xfer_bytes, (unsigned int)PAGE_SIZE);
return false;
}
if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u bytes not a " \
"multiple of physical block size (%u bytes)\n",
opt_xfer_bytes, sdkp->physical_block_size);
return false;
}
sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
opt_xfer_bytes);
return true;
}
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@ -2801,18 +2850,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
rw_max = q->limits.io_opt =
sdkp->opt_xfer_blocks * sdp->sector_size;
else
sdkp->opt_xfer_blocks * sdp->sector_size;
} else
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);

View File

@ -693,7 +693,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = cpu_to_virtio32(vscsi->vdev,
@ -752,7 +751,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,

View File

@ -1668,6 +1668,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
platform_info->enable_dma = false;
} else {
master->can_dma = pxa2xx_spi_can_dma;
master->max_dma_len = MAX_DMA_LEN;
}
}

View File

@ -457,8 +457,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base) {
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
MEM_CS_EN(spi->chip_select),
MEM_CS_MASK);
MEM_CS_MASK,
MEM_CS_EN(spi->chip_select));
}
qspi->mmap_enabled = true;
}
@ -470,7 +470,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base)
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
0, MEM_CS_MASK);
MEM_CS_MASK, 0);
qspi->mmap_enabled = false;
}

View File

@ -37,11 +37,6 @@ static struct ion_platform_heap dummy_heaps[] = {
.type = ION_HEAP_TYPE_SYSTEM,
.name = "system",
},
{
.id = ION_HEAP_TYPE_SYSTEM_CONTIG,
.type = ION_HEAP_TYPE_SYSTEM_CONTIG,
.name = "system contig",
},
{
.id = ION_HEAP_TYPE_CARVEOUT,
.type = ION_HEAP_TYPE_CARVEOUT,
@ -76,34 +71,30 @@ static int __init ion_dummy_init(void)
return -ENOMEM;
/* Allocate a dummy carveout heap */
carveout_ptr = alloc_pages_exact(
dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
GFP_KERNEL);
if (carveout_ptr)
dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
virt_to_phys(carveout_ptr);
else
pr_err("ion_dummy: Could not allocate carveout\n");
/* Allocate a dummy chunk heap */
chunk_ptr = alloc_pages_exact(
dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
GFP_KERNEL);
if (chunk_ptr)
dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
else
pr_err("ion_dummy: Could not allocate chunk\n");
for (i = 0; i < dummy_ion_pdata.nr; i++) {
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
!heap_data->base)
continue;
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
/* Allocate a dummy carveout heap */
carveout_ptr = alloc_pages_exact(heap_data->size,
GFP_KERNEL);
if (!carveout_ptr) {
pr_err("ion_dummy: Could not allocate carveout\n");
continue;
}
heap_data->base = virt_to_phys(carveout_ptr);
}
if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
continue;
if (heap_data->type == ION_HEAP_TYPE_CHUNK) {
/* Allocate a dummy chunk heap */
chunk_ptr = alloc_pages_exact(heap_data->size,
GFP_KERNEL);
if (!chunk_ptr) {
pr_err("ion_dummy: Could not allocate chunk\n");
continue;
}
heap_data->base = virt_to_phys(chunk_ptr);
}
heaps[i] = ion_heap_create(heap_data);
if (IS_ERR_OR_NULL(heaps[i])) {
@ -114,20 +105,28 @@ static int __init ion_dummy_init(void)
}
return 0;
err:
for (i = 0; i < dummy_ion_pdata.nr; ++i)
ion_heap_destroy(heaps[i]);
for (i = 0; i < dummy_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
if (!IS_ERR_OR_NULL(heaps[i]))
ion_heap_destroy(heaps[i]);
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
heap_data->size);
}
carveout_ptr = NULL;
}
if (heap_data->type == ION_HEAP_TYPE_CHUNK) {
if (chunk_ptr) {
free_pages_exact(chunk_ptr, heap_data->size);
}
chunk_ptr = NULL;
}
}
kfree(heaps);
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
return err;
}
device_initcall(ion_dummy_init);
@ -138,19 +137,26 @@ static void __exit ion_dummy_exit(void)
ion_device_destroy(idev);
for (i = 0; i < dummy_ion_pdata.nr; i++)
ion_heap_destroy(heaps[i]);
kfree(heaps);
for (i = 0; i < dummy_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
if (!IS_ERR_OR_NULL(heaps[i]))
ion_heap_destroy(heaps[i]);
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
heap_data->size);
}
carveout_ptr = NULL;
}
if (heap_data->type == ION_HEAP_TYPE_CHUNK) {
if (chunk_ptr) {
free_pages_exact(chunk_ptr, heap_data->size);
}
chunk_ptr = NULL;
}
}
kfree(heaps);
}
__exitcall(ion_dummy_exit);

View File

@ -4084,9 +4084,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
spin_lock(&se_cmd->t_state_lock);
spin_lock_irq(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irq(&se_cmd->t_state_lock);
}
}
spin_unlock_bh(&conn->cmd_lock);

View File

@ -97,6 +97,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (of_property_read_u32(np, "reg-offset", &prop) == 0)
port->mapbase += prop;
/* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
if (of_device_is_compatible(np, "mrvl,mmp-uart"))
port->regshift = 2;
/* Check for registers offset within the devices address range */
if (of_property_read_u32(np, "reg-shift", &prop) == 0)
port->regshift = prop;

View File

@ -1330,6 +1330,30 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
static int pci_pericom_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
unsigned int bar, offset = board->first_offset, maxnr;
bar = FL_GET_BASE(board->flags);
if (board->flags & FL_BASE_BARS)
bar += idx;
else
offset += idx * board->uart_offset;
if (idx==3)
offset = 0x38;
maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
(board->reg_shift + 3);
if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
return 1;
return setup_port(priv, port, bar, offset, board->reg_shift);
}
static int
ce4100_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@ -2096,6 +2120,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_default_setup,
.exit = pci_plx9050_exit,
},
/*
* Pericom (Only 7954 - It have a offset jump for port 4)
*/
{
.vendor = PCI_VENDOR_ID_PERICOM,
.device = PCI_DEVICE_ID_PERICOM_PI7C9X7954,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
/*
* PLX
*/
@ -2126,6 +2160,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_default_setup,
.exit = pci_plx9050_exit,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_pericom_setup,
},
/*
* SBS Technologies, Inc., PMC-OCTALPRO 232
*/
@ -4976,10 +5115,10 @@ static struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@ -4988,10 +5127,10 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@ -5000,10 +5139,10 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@ -5012,13 +5151,13 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7951 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@ -5027,16 +5166,16 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@ -5045,13 +5184,13 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
@ -5060,19 +5199,19 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
pbn_pericom_PI7C9X7954 },
/*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/

View File

@ -362,7 +362,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
cdns_uart_handle_tx(dev_id);
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
if (isrstatus & CDNS_UART_IXR_RXMASK)
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
*/
if (isrstatus & CDNS_UART_IXR_RXMASK &&
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
spin_unlock(&port->lock);

View File

@ -40,6 +40,9 @@
*/
#define P9_LOCK_TIMEOUT (30*HZ)
/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
#define V9FS_STAT2INODE_KEEP_ISIZE 1
extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, umode_t mode, dev_t);
void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct super_block *sb, unsigned int flags);
void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
unsigned int flags);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
}
int v9fs_open_to_dotl_flags(int flags);
static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
{
/*
* 32-bit need the lock, concurrent updates could break the
* sequences and make i_size_read() loop forever.
* 64-bit updates are atomic and can skip the locking.
*/
if (sizeof(i_size) > sizeof(long))
spin_lock(&inode->i_lock);
i_size_write(inode, i_size);
if (sizeof(i_size) > sizeof(long))
spin_unlock(&inode->i_lock);
}
#endif

View File

@ -442,7 +442,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
i_size = i_size_read(inode);
if (iocb->ki_pos > i_size) {
inode_add_bytes(inode, iocb->ki_pos - i_size);
i_size_write(inode, iocb->ki_pos);
/*
* Need to serialize against i_size_write() in
* v9fs_stat2inode()
*/
v9fs_i_size_write(inode, iocb->ki_pos);
}
return retval;
}

View File

@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
if (retval)
goto error;
v9fs_stat2inode(st, inode, sb);
v9fs_stat2inode(st, inode, sb, 0);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
return inode;
@ -1078,7 +1078,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
generic_fillattr(d_inode(dentry), stat);
p9stat_free(st);
@ -1156,12 +1156,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
* @stat: Plan 9 metadata (mistat) structure
* @inode: inode to populate
* @sb: superblock of filesystem
* @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
*
*/
void
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct super_block *sb)
struct super_block *sb, unsigned int flags)
{
umode_t mode;
char ext[32];
@ -1202,10 +1203,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
mode = p9mode2perm(v9ses, stat);
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
i_size_write(inode, stat->length);
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
inode->i_blocks = (stat->length + 512 - 1) >> 9;
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
@ -1402,9 +1404,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
{
int umode;
dev_t rdev;
loff_t i_size;
struct p9_wstat *st;
struct v9fs_session_info *v9ses;
unsigned int flags;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_stat(fid);
@ -1417,16 +1419,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
goto out;
spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
i_size = inode->i_size;
v9fs_stat2inode(st, inode, inode->i_sb);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
V9FS_STAT2INODE_KEEP_ISIZE : 0;
v9fs_stat2inode(st, inode, inode->i_sb, flags);
out:
p9stat_free(st);
kfree(st);

View File

@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
if (retval)
goto error;
v9fs_stat2inode_dotl(st, inode);
v9fs_stat2inode_dotl(st, inode, 0);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
if (retval)
@ -496,7 +496,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, d_inode(dentry));
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
generic_fillattr(d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@ -607,11 +607,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
* v9fs_stat2inode_dotl - populate an inode structure with stat info
* @stat: stat structure
* @inode: inode to populate
* @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
*
*/
void
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
unsigned int flags)
{
umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
@ -631,7 +633,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
i_size_write(inode, stat->st_size);
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
@ -661,8 +664,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
if (stat->st_result_mask & P9_STATS_SIZE)
i_size_write(inode, stat->st_size);
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
stat->st_result_mask & P9_STATS_SIZE)
v9fs_i_size_write(inode, stat->st_size);
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
@ -928,9 +932,9 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
{
loff_t i_size;
struct p9_stat_dotl *st;
struct v9fs_session_info *v9ses;
unsigned int flags;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
@ -942,16 +946,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
goto out;
spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
i_size = inode->i_size;
v9fs_stat2inode_dotl(st, inode);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
V9FS_STAT2INODE_KEEP_ISIZE : 0;
v9fs_stat2inode_dotl(st, inode, flags);
out:
kfree(st);
return 0;

Some files were not shown because too many files have changed in this diff Show More