Merge android-4.9.191 (9ca8608
) into msm-4.9
* refs/heads/tmp-9ca8608: Linux 4.9.191 mm/zsmalloc.c: fix build when CONFIG_COMPACTION=n x86/ptrace: fix up botched merge of spectrev1 fix i2c: piix4: Fix port selection for AMD Family 16h Model 30h KVM: arm/arm64: vgic-v2: Handle SGI bits in GICD_I{S,C}PENDR0 as WI KVM: arm/arm64: vgic: Fix potential deadlock when ap_list is long mac80211: fix possible sta leak Revert "cfg80211: fix processing world regdomain when non modular" VMCI: Release resource if the work is already queued stm class: Fix a double free of stm_source_device mmc: core: Fix init of SD cards reporting an invalid VDD range mmc: sdhci-of-at91: add quirk for broken HS200 uprobes/x86: Fix detection of 32-bit user mode ptrace,x86: Make user_64bit_mode() available to 32-bit builds USB: storage: ums-realtek: Whitelist auto-delink support USB: storage: ums-realtek: Update module parameter description for auto_delink_en usb: host: xhci: rcar: Fix typo in compatible string matching usb: host: ohci: fix a race condition between shutdown and irq usb: chipidea: udc: don't do hardware access if gadget has stopped USB: cdc-wdm: fix race between write and disconnect due to flag abuse usb-storage: Add new JMS567 revision to unusual_devs mm/zsmalloc.c: fix race condition in zs_destroy_pool x86/apic: Include the LDR when clearing out APIC registers x86/apic: Do not initialize LDR and DFR for bigsmp KVM: x86: Don't update RIP or do single-step on faulting emulation ALSA: seq: Fix potential concurrent access to the deleted pool ALSA: line6: Fix memory leak at line6_init_pcm() error path tcp: make sure EPOLLOUT wont be missed ALSA: usb-audio: Fix an OOB bug in parse_audio_mixer_unit ALSA: usb-audio: Fix a stack buffer overflow bug in check_input_term tcp: fix tcp_rtx_queue_tail in case of empty retransmit queue scsi: ufs: Fix RX_TERMINATION_FORCE_ENABLE define value watchdog: bcm2835_wdt: Fix module autoload tools: hv: fix KVP and VSS daemons exit code usb: host: fotg2: restart hcd after port reset i2c: emev2: avoid race when unregistering slave client xen/blkback: fix memory leaks usb: gadget: composite: Clear "suspended" on reset/disconnect iommu/dma: Handle SG length overflow better dmaengine: ste_dma40: fix unneeded variable warning x86/CPU/AMD: Clear RDRAND CPUID bit on AMD family 15h/16h Revert "perf test 6: Fix missing kvm module load for s390" xfs: fix missing ILOCK unlock when xfs_setattr_nonsize fails due to EDQUOT mm/zsmalloc.c: migration can leave pages in ZS_EMPTY indefinitely mm, page_owner: handle THP splits correctly genirq: Properly pair kobject_del() with kobject_add() dm table: fix invalid memory accesses with too high sector number dm space map metadata: fix missing store of apply_bops() return value dm btree: fix order of block initialization in btree_split_beneath x86/boot: Fix boot regression caused by bootparam sanitizing x86/boot: Save fields explicitly, zero out everything else x86/apic: Handle missing global clockevent gracefully x86/retpoline: Don't clobber RFLAGS during CALL_NOSPEC on i386 userfaultfd_release: always remove uffd flags and clear vm_userfaultfd_ctx gpiolib: never report open-drain/source lines as 'input' to user-space Revert "dm bufio: fix deadlock with loop device" HID: wacom: Correct distance scale for 2nd-gen Intuos devices HID: wacom: correct misreported EKR ring values selftests: kvm: Adding config fragments perf pmu-events: Fix missing "cpu_clk_unhalted.core" event drm/vmwgfx: fix memory leak when too many retries have occurred x86/lib/cpu: Address missing prototypes warning libata: add SG safety checks in SFF pio transfers net: hisilicon: Fix dma_map_single failed on arm64 net: hisilicon: fix hip04-xmit never return TX_BUSY net: hisilicon: make hip04_tx_reclaim non-reentrant net: cxgb3_main: Fix a resource leak in a error path in 'init_one()' HID: input: fix a4tech horizontal wheel custom usage NFSv4: Fix a potential sleep while atomic in nfs4_do_reclaim() can: peak_usb: force the string buffer NULL-terminated can: sja1000: force the string buffer NULL-terminated perf bench numa: Fix cpu0 binding isdn: hfcsusb: Fix mISDN driver crash caused by transfer buffer on the stack isdn: mISDN: hfcsusb: Fix possible null-pointer dereferences in start_isoc_chain() net: usb: qmi_wwan: Add the BroadMobi BM818 card ASoC: ti: davinci-mcasp: Correct slot_width posed constraint st_nci_hci_connectivity_event_received: null check the allocation st21nfca_connectivity_event_received: null check the allocation can: dev: call netif_carrier_off() in register_candev() bonding: Force slave speed check after link state recovery for 802.3ad ASoC: dapm: Fix handling of custom_stop_condition on DAPM graph walks netfilter: ebtables: fix a memory leak bug in compat MIPS: kernel: only use i8253 clocksource with periodic clockevent HID: Add 044f:b320 ThrustMaster, Inc. 2 in 1 DT ANDROID: sched: Disallow WALT with CFS bandwidth control ANDROID: fiq_debugger: remove ANDROID: Add a tracepoint for mapping inode to full path Conflicts: drivers/staging/android/fiq_debugger/fiq_debugger.c drivers/usb/gadget/composite.c fs/userfaultfd.c sound/usb/mixer.c Change-Id: I1731d5df58f9f2679bfcfa4ecbfaa9a9c273ed77 Signed-off-by: jianzhou <jianzhou@codeaurora.org>
This commit is contained in:
commit
6fdca0f109
|
@ -3855,6 +3855,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Run specified binary instead of /init from the ramdisk,
|
||||
used for early userspace startup. See initrd.
|
||||
|
||||
rdrand= [X86]
|
||||
force - Override the decision by the kernel to hide the
|
||||
advertisement of RDRAND support (this affects
|
||||
certain AMD processors because of buggy BIOS
|
||||
support, specifically around the suspend/resume
|
||||
path).
|
||||
|
||||
reboot= [KNL]
|
||||
Format (x86 or x86_64):
|
||||
[w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 190
|
||||
SUBLEVEL = 191
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -17,7 +17,3 @@ config SHARP_PARAM
|
|||
|
||||
config SHARP_SCOOP
|
||||
bool
|
||||
|
||||
config FIQ_GLUE
|
||||
bool
|
||||
select FIQ
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
obj-y += firmware.o
|
||||
|
||||
obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
|
||||
obj-$(CONFIG_ICST) += icst.o
|
||||
obj-$(CONFIG_SA1111) += sa1111.o
|
||||
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
|
||||
|
|
|
@ -1,118 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2008 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
|
||||
.global fiq_glue_end
|
||||
|
||||
/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
|
||||
|
||||
ENTRY(fiq_glue)
|
||||
/* store pc, cpsr from previous mode, reserve space for spsr */
|
||||
mrs r12, spsr
|
||||
sub lr, lr, #4
|
||||
subs r10, #1
|
||||
bne nested_fiq
|
||||
|
||||
str r12, [sp, #-8]!
|
||||
str lr, [sp, #-4]!
|
||||
|
||||
/* store r8-r14 from previous mode */
|
||||
sub sp, sp, #(7 * 4)
|
||||
stmia sp, {r8-r14}^
|
||||
nop
|
||||
|
||||
/* store r0-r7 from previous mode */
|
||||
stmfd sp!, {r0-r7}
|
||||
|
||||
/* setup func(data,regs) arguments */
|
||||
mov r0, r9
|
||||
mov r1, sp
|
||||
mov r3, r8
|
||||
|
||||
mov r7, sp
|
||||
|
||||
/* Get sp and lr from non-user modes */
|
||||
and r4, r12, #MODE_MASK
|
||||
cmp r4, #USR_MODE
|
||||
beq fiq_from_usr_mode
|
||||
|
||||
mov r7, sp
|
||||
orr r4, r4, #(PSR_I_BIT | PSR_F_BIT)
|
||||
msr cpsr_c, r4
|
||||
str sp, [r7, #(4 * 13)]
|
||||
str lr, [r7, #(4 * 14)]
|
||||
mrs r5, spsr
|
||||
str r5, [r7, #(4 * 17)]
|
||||
|
||||
cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
/* use fiq stack if we reenter this mode */
|
||||
subne sp, r7, #(4 * 3)
|
||||
|
||||
fiq_from_usr_mode:
|
||||
msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
mov r2, sp
|
||||
sub sp, r7, #12
|
||||
stmfd sp!, {r2, ip, lr}
|
||||
/* call func(data,regs) */
|
||||
blx r3
|
||||
ldmfd sp, {r2, ip, lr}
|
||||
mov sp, r2
|
||||
|
||||
/* restore/discard saved state */
|
||||
cmp r4, #USR_MODE
|
||||
beq fiq_from_usr_mode_exit
|
||||
|
||||
msr cpsr_c, r4
|
||||
ldr sp, [r7, #(4 * 13)]
|
||||
ldr lr, [r7, #(4 * 14)]
|
||||
msr spsr_cxsf, r5
|
||||
|
||||
fiq_from_usr_mode_exit:
|
||||
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
|
||||
ldmfd sp!, {r0-r7}
|
||||
ldr lr, [sp, #(4 * 7)]
|
||||
ldr r12, [sp, #(4 * 8)]
|
||||
add sp, sp, #(10 * 4)
|
||||
exit_fiq:
|
||||
msr spsr_cxsf, r12
|
||||
add r10, #1
|
||||
cmp r11, #0
|
||||
moveqs pc, lr
|
||||
bx r11 /* jump to custom fiq return function */
|
||||
|
||||
nested_fiq:
|
||||
orr r12, r12, #(PSR_F_BIT)
|
||||
b exit_fiq
|
||||
|
||||
fiq_glue_end:
|
||||
|
||||
ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
|
||||
stmfd sp!, {r4}
|
||||
mrs r4, cpsr
|
||||
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
movs r8, r0
|
||||
mov r9, r1
|
||||
mov sp, r2
|
||||
mov r11, r3
|
||||
moveq r10, #0
|
||||
movne r10, #1
|
||||
msr cpsr_c, r4
|
||||
ldmfd sp!, {r4}
|
||||
bx lr
|
||||
|
|
@ -1,147 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/fiq.h>
|
||||
#include <asm/fiq_glue.h>
|
||||
|
||||
extern unsigned char fiq_glue, fiq_glue_end;
|
||||
extern void fiq_glue_setup(void *func, void *data, void *sp,
|
||||
fiq_return_handler_t fiq_return_handler);
|
||||
|
||||
static struct fiq_handler fiq_debbuger_fiq_handler = {
|
||||
.name = "fiq_glue",
|
||||
};
|
||||
DEFINE_PER_CPU(void *, fiq_stack);
|
||||
static struct fiq_glue_handler *current_handler;
|
||||
static fiq_return_handler_t fiq_return_handler;
|
||||
static DEFINE_MUTEX(fiq_glue_lock);
|
||||
|
||||
static void fiq_glue_setup_helper(void *info)
|
||||
{
|
||||
struct fiq_glue_handler *handler = info;
|
||||
fiq_glue_setup(handler->fiq, handler,
|
||||
__get_cpu_var(fiq_stack) + THREAD_START_SP,
|
||||
fiq_return_handler);
|
||||
}
|
||||
|
||||
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
|
||||
{
|
||||
int ret;
|
||||
int cpu;
|
||||
|
||||
if (!handler || !handler->fiq)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&fiq_glue_lock);
|
||||
if (fiq_stack) {
|
||||
ret = -EBUSY;
|
||||
goto err_busy;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *stack;
|
||||
stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
|
||||
if (WARN_ON(!stack)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_fiq_stack;
|
||||
}
|
||||
per_cpu(fiq_stack, cpu) = stack;
|
||||
}
|
||||
|
||||
ret = claim_fiq(&fiq_debbuger_fiq_handler);
|
||||
if (WARN_ON(ret))
|
||||
goto err_claim_fiq;
|
||||
|
||||
current_handler = handler;
|
||||
on_each_cpu(fiq_glue_setup_helper, handler, true);
|
||||
set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
|
||||
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
return 0;
|
||||
|
||||
err_claim_fiq:
|
||||
err_alloc_fiq_stack:
|
||||
for_each_possible_cpu(cpu) {
|
||||
__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
|
||||
per_cpu(fiq_stack, cpu) = NULL;
|
||||
}
|
||||
err_busy:
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fiq_glue_update_return_handler(void (*fiq_return)(void))
|
||||
{
|
||||
fiq_return_handler = fiq_return;
|
||||
if (current_handler)
|
||||
on_each_cpu(fiq_glue_setup_helper, current_handler, true);
|
||||
}
|
||||
|
||||
int fiq_glue_set_return_handler(void (*fiq_return)(void))
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fiq_glue_lock);
|
||||
if (fiq_return_handler) {
|
||||
ret = -EBUSY;
|
||||
goto err_busy;
|
||||
}
|
||||
fiq_glue_update_return_handler(fiq_return);
|
||||
ret = 0;
|
||||
err_busy:
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fiq_glue_set_return_handler);
|
||||
|
||||
int fiq_glue_clear_return_handler(void (*fiq_return)(void))
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fiq_glue_lock);
|
||||
if (WARN_ON(fiq_return_handler != fiq_return)) {
|
||||
ret = -EINVAL;
|
||||
goto err_inval;
|
||||
}
|
||||
fiq_glue_update_return_handler(NULL);
|
||||
ret = 0;
|
||||
err_inval:
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fiq_glue_clear_return_handler);
|
||||
|
||||
/**
|
||||
* fiq_glue_resume - Restore fiqs after suspend or low power idle states
|
||||
*
|
||||
* This must be called before calling local_fiq_enable after returning from a
|
||||
* power state where the fiq mode registers were lost. If a driver provided
|
||||
* a resume hook when it registered the handler it will be called.
|
||||
*/
|
||||
|
||||
void fiq_glue_resume(void)
|
||||
{
|
||||
if (!current_handler)
|
||||
return;
|
||||
fiq_glue_setup(current_handler->fiq, current_handler,
|
||||
__get_cpu_var(fiq_stack) + THREAD_START_SP,
|
||||
fiq_return_handler);
|
||||
if (current_handler->resume)
|
||||
current_handler->resume(current_handler);
|
||||
}
|
||||
|
|
@ -31,7 +31,8 @@ void __init setup_pit_timer(void)
|
|||
|
||||
static int __init init_pit_clocksource(void)
|
||||
{
|
||||
if (num_possible_cpus() > 1) /* PIT does not scale! */
|
||||
if (num_possible_cpus() > 1 || /* PIT does not scale! */
|
||||
!clockevent_state_periodic(&i8253_clockevent))
|
||||
return 0;
|
||||
|
||||
return clocksource_i8253_init();
|
||||
|
|
|
@ -17,6 +17,20 @@
|
|||
* Note: efi_info is commonly left uninitialized, but that field has a
|
||||
* private magic, so it is better to leave it unchanged.
|
||||
*/
|
||||
|
||||
#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
|
||||
|
||||
#define BOOT_PARAM_PRESERVE(struct_member) \
|
||||
{ \
|
||||
.start = offsetof(struct boot_params, struct_member), \
|
||||
.len = sizeof_mbr(struct boot_params, struct_member), \
|
||||
}
|
||||
|
||||
struct boot_params_to_save {
|
||||
unsigned int start;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
{
|
||||
/*
|
||||
|
@ -35,19 +49,39 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
|||
*/
|
||||
if (boot_params->sentinel) {
|
||||
/* fields in boot_params are left uninitialized, clear them */
|
||||
memset(&boot_params->ext_ramdisk_image, 0,
|
||||
(char *)&boot_params->efi_info -
|
||||
(char *)&boot_params->ext_ramdisk_image);
|
||||
memset(&boot_params->kbd_status, 0,
|
||||
(char *)&boot_params->hdr -
|
||||
(char *)&boot_params->kbd_status);
|
||||
memset(&boot_params->_pad7[0], 0,
|
||||
(char *)&boot_params->edd_mbr_sig_buffer[0] -
|
||||
(char *)&boot_params->_pad7[0]);
|
||||
memset(&boot_params->_pad8[0], 0,
|
||||
(char *)&boot_params->eddbuf[0] -
|
||||
(char *)&boot_params->_pad8[0]);
|
||||
memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
|
||||
static struct boot_params scratch;
|
||||
char *bp_base = (char *)boot_params;
|
||||
char *save_base = (char *)&scratch;
|
||||
int i;
|
||||
|
||||
const struct boot_params_to_save to_save[] = {
|
||||
BOOT_PARAM_PRESERVE(screen_info),
|
||||
BOOT_PARAM_PRESERVE(apm_bios_info),
|
||||
BOOT_PARAM_PRESERVE(tboot_addr),
|
||||
BOOT_PARAM_PRESERVE(ist_info),
|
||||
BOOT_PARAM_PRESERVE(hd0_info),
|
||||
BOOT_PARAM_PRESERVE(hd1_info),
|
||||
BOOT_PARAM_PRESERVE(sys_desc_table),
|
||||
BOOT_PARAM_PRESERVE(olpc_ofw_header),
|
||||
BOOT_PARAM_PRESERVE(efi_info),
|
||||
BOOT_PARAM_PRESERVE(alt_mem_k),
|
||||
BOOT_PARAM_PRESERVE(scratch),
|
||||
BOOT_PARAM_PRESERVE(e820_entries),
|
||||
BOOT_PARAM_PRESERVE(eddbuf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
|
||||
BOOT_PARAM_PRESERVE(hdr),
|
||||
BOOT_PARAM_PRESERVE(eddbuf),
|
||||
};
|
||||
|
||||
memset(&scratch, 0, sizeof(scratch));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(to_save); i++) {
|
||||
memcpy(save_base + to_save[i].start,
|
||||
bp_base + to_save[i].start, to_save[i].len);
|
||||
}
|
||||
|
||||
memcpy(boot_params, save_base, sizeof(*boot_params));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -313,6 +313,7 @@
|
|||
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
|
||||
#define MSR_AMD64_TSC_RATIO 0xc0000104
|
||||
#define MSR_AMD64_NB_CFG 0xc001001f
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_PATCH_LOADER 0xc0010020
|
||||
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
|
||||
#define MSR_AMD64_OSVW_STATUS 0xc0010141
|
||||
|
|
|
@ -196,7 +196,7 @@
|
|||
" lfence;\n" \
|
||||
" jmp 902b;\n" \
|
||||
" .align 16\n" \
|
||||
"903: addl $4, %%esp;\n" \
|
||||
"903: lea 4(%%esp), %%esp;\n" \
|
||||
" pushl %[thunk_target];\n" \
|
||||
" ret;\n" \
|
||||
" .align 16\n" \
|
||||
|
|
|
@ -115,9 +115,9 @@ static inline int v8086_mode(struct pt_regs *regs)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline bool user_64bit_mode(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
/*
|
||||
* On non-paravirt systems, this is the only long mode CPL 3
|
||||
|
@ -128,8 +128,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
|
|||
/* Headers are too twisted for this to go in paravirt.h. */
|
||||
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
|
||||
#endif
|
||||
#else /* !CONFIG_X86_64 */
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define current_user_stack_pointer() current_pt_regs()->sp
|
||||
#define compat_user_stack_pointer() current_pt_regs()->sp
|
||||
#endif
|
||||
|
|
|
@ -629,7 +629,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
|
|||
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
|
||||
|
||||
/*
|
||||
* Temporary interrupt handler.
|
||||
* Temporary interrupt handler and polled calibration function.
|
||||
*/
|
||||
static void __init lapic_cal_handler(struct clock_event_device *dev)
|
||||
{
|
||||
|
@ -713,7 +713,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
|
|||
static int __init calibrate_APIC_clock(void)
|
||||
{
|
||||
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
|
||||
void (*real_handler)(struct clock_event_device *dev);
|
||||
u64 tsc_perj = 0, tsc_start = 0;
|
||||
unsigned long jif_start;
|
||||
unsigned long deltaj;
|
||||
long delta, deltatsc;
|
||||
int pm_referenced = 0;
|
||||
|
@ -742,29 +743,65 @@ static int __init calibrate_APIC_clock(void)
|
|||
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
|
||||
"calibrating APIC timer ...\n");
|
||||
|
||||
/*
|
||||
* There are platforms w/o global clockevent devices. Instead of
|
||||
* making the calibration conditional on that, use a polling based
|
||||
* approach everywhere.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
/* Replace the global interrupt handler */
|
||||
real_handler = global_clock_event->event_handler;
|
||||
global_clock_event->event_handler = lapic_cal_handler;
|
||||
|
||||
/*
|
||||
* Setup the APIC counter to maximum. There is no way the lapic
|
||||
* can underflow in the 100ms detection time frame
|
||||
*/
|
||||
__setup_APIC_LVTT(0xffffffff, 0, 0);
|
||||
|
||||
/* Let the interrupts run */
|
||||
/*
|
||||
* Methods to terminate the calibration loop:
|
||||
* 1) Global clockevent if available (jiffies)
|
||||
* 2) TSC if available and frequency is known
|
||||
*/
|
||||
jif_start = READ_ONCE(jiffies);
|
||||
|
||||
if (tsc_khz) {
|
||||
tsc_start = rdtsc();
|
||||
tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable interrupts so the tick can fire, if a global
|
||||
* clockevent device is available
|
||||
*/
|
||||
local_irq_enable();
|
||||
|
||||
while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
|
||||
cpu_relax();
|
||||
while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
|
||||
/* Wait for a tick to elapse */
|
||||
while (1) {
|
||||
if (tsc_khz) {
|
||||
u64 tsc_now = rdtsc();
|
||||
if ((tsc_now - tsc_start) >= tsc_perj) {
|
||||
tsc_start += tsc_perj;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
unsigned long jif_now = READ_ONCE(jiffies);
|
||||
|
||||
if (time_after(jif_now, jif_start)) {
|
||||
jif_start = jif_now;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Invoke the calibration routine */
|
||||
local_irq_disable();
|
||||
lapic_cal_handler(NULL);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
/* Restore the real event handler */
|
||||
global_clock_event->event_handler = real_handler;
|
||||
|
||||
/* Build delta t1-t2 as apic timer counts down */
|
||||
delta = lapic_cal_t1 - lapic_cal_t2;
|
||||
apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
|
||||
|
@ -814,10 +851,11 @@ static int __init calibrate_APIC_clock(void)
|
|||
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
|
||||
|
||||
/*
|
||||
* PM timer calibration failed or not turned on
|
||||
* so lets try APIC timer based calibration
|
||||
* PM timer calibration failed or not turned on so lets try APIC
|
||||
* timer based calibration, if a global clockevent device is
|
||||
* available.
|
||||
*/
|
||||
if (!pm_referenced) {
|
||||
if (!pm_referenced && global_clock_event) {
|
||||
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
|
||||
|
||||
/*
|
||||
|
@ -1029,6 +1067,10 @@ void clear_local_APIC(void)
|
|||
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
|
||||
v = apic_read(APIC_LVT1);
|
||||
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
|
||||
if (!x2apic_enabled()) {
|
||||
v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
apic_write(APIC_LDR, v);
|
||||
}
|
||||
if (maxlvt >= 4) {
|
||||
v = apic_read(APIC_LVTPC);
|
||||
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
|
||||
|
|
|
@ -37,32 +37,12 @@ static int bigsmp_early_logical_apicid(int cpu)
|
|||
return early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
id = per_cpu(x86_bios_cpu_apicid, cpu);
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
* bigsmp enables physical destination mode
|
||||
* and doesn't use LDR and DFR
|
||||
*/
|
||||
static void bigsmp_init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_FLAT);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static void bigsmp_setup_apic_routing(void)
|
||||
|
|
|
@ -746,6 +746,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
|
|||
msr_set_bit(MSR_AMD64_DE_CFG, 31);
|
||||
}
|
||||
|
||||
static bool rdrand_force;
|
||||
|
||||
static int __init rdrand_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "force"))
|
||||
rdrand_force = true;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("rdrand", rdrand_cmdline);
|
||||
|
||||
static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* Saving of the MSR used to hide the RDRAND support during
|
||||
* suspend/resume is done by arch/x86/power/cpu.c, which is
|
||||
* dependent on CONFIG_PM_SLEEP.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PM_SLEEP))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The nordrand option can clear X86_FEATURE_RDRAND, so check for
|
||||
* RDRAND support using the CPUID function directly.
|
||||
*/
|
||||
if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
|
||||
return;
|
||||
|
||||
msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
|
||||
|
||||
/*
|
||||
* Verify that the CPUID change has occurred in case the kernel is
|
||||
* running virtualized and the hypervisor doesn't support the MSR.
|
||||
*/
|
||||
if (cpuid_ecx(1) & BIT(30)) {
|
||||
pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
clear_cpu_cap(c, X86_FEATURE_RDRAND);
|
||||
pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
|
||||
}
|
||||
|
||||
static void init_amd_jg(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* Some BIOS implementations do not restore proper RDRAND support
|
||||
* across suspend and resume. Check on whether to hide the RDRAND
|
||||
* instruction support via CPUID.
|
||||
*/
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 value;
|
||||
|
@ -760,6 +818,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
wrmsrl_safe(MSR_F15H_IC_CFG, value);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Some BIOS implementations do not restore proper RDRAND support
|
||||
* across suspend and resume. Check on whether to hide the RDRAND
|
||||
* instruction support via CPUID.
|
||||
*/
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
|
@ -804,6 +869,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
case 0x10: init_amd_gh(c); break;
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
case 0x16: init_amd_jg(c); break;
|
||||
case 0x17: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
|
|
|
@ -651,11 +651,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
|
|||
{
|
||||
struct thread_struct *thread = &tsk->thread;
|
||||
unsigned long val = 0;
|
||||
int index = n;
|
||||
|
||||
if (n < HBP_NUM) {
|
||||
int index = array_index_nospec(n, HBP_NUM);
|
||||
struct perf_event *bp = thread->ptrace_bps[index];
|
||||
index = array_index_nospec(index, HBP_NUM);
|
||||
|
||||
if (bp)
|
||||
val = bp->hw.info.address;
|
||||
|
|
|
@ -514,9 +514,12 @@ struct uprobe_xol_ops {
|
|||
void (*abort)(struct arch_uprobe *, struct pt_regs *);
|
||||
};
|
||||
|
||||
static inline int sizeof_long(void)
|
||||
static inline int sizeof_long(struct pt_regs *regs)
|
||||
{
|
||||
return in_ia32_syscall() ? 4 : 8;
|
||||
/*
|
||||
* Check registers for mode as in_xxx_syscall() does not apply here.
|
||||
*/
|
||||
return user_64bit_mode(regs) ? 8 : 4;
|
||||
}
|
||||
|
||||
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
|
@ -527,9 +530,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|||
|
||||
static int push_ret_address(struct pt_regs *regs, unsigned long ip)
|
||||
{
|
||||
unsigned long new_sp = regs->sp - sizeof_long();
|
||||
unsigned long new_sp = regs->sp - sizeof_long(regs);
|
||||
|
||||
if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
|
||||
if (copy_to_user((void __user *)new_sp, &ip, sizeof_long(regs)))
|
||||
return -EFAULT;
|
||||
|
||||
regs->sp = new_sp;
|
||||
|
@ -562,7 +565,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
|
|||
long correction = utask->vaddr - utask->xol_vaddr;
|
||||
regs->ip += correction;
|
||||
} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
|
||||
regs->sp += sizeof_long(); /* Pop incorrect return address */
|
||||
regs->sp += sizeof_long(regs); /* Pop incorrect return address */
|
||||
if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
|
||||
return -ERESTART;
|
||||
}
|
||||
|
@ -671,7 +674,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|||
* "call" insn was executed out-of-line. Just restore ->sp and restart.
|
||||
* We could also restore ->ip and try to call branch_emulate_op() again.
|
||||
*/
|
||||
regs->sp += sizeof_long();
|
||||
regs->sp += sizeof_long(regs);
|
||||
return -ERESTART;
|
||||
}
|
||||
|
||||
|
@ -962,7 +965,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|||
unsigned long
|
||||
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
|
||||
{
|
||||
int rasize = sizeof_long(), nleft;
|
||||
int rasize = sizeof_long(regs), nleft;
|
||||
unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
|
||||
|
||||
if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
|
||||
|
|
|
@ -5823,12 +5823,13 @@ restart:
|
|||
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
|
||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE && ctxt->tf)
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
if (!ctxt->have_exception ||
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE && ctxt->tf)
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
__kvm_set_rflags(vcpu, ctxt->eflags);
|
||||
}
|
||||
|
||||
/*
|
||||
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
unsigned int x86_family(unsigned int sig)
|
||||
{
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/tboot.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/proto.h>
|
||||
|
@ -24,7 +25,7 @@
|
|||
#include <asm/debugreg.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible unsigned long saved_context_ebx;
|
||||
|
@ -391,15 +392,14 @@ static int __init bsp_pm_check_init(void)
|
|||
|
||||
core_initcall(bsp_pm_check_init);
|
||||
|
||||
static int msr_init_context(const u32 *msr_id, const int total_num)
|
||||
static int msr_build_context(const u32 *msr_id, const int num)
|
||||
{
|
||||
int i = 0;
|
||||
struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
|
||||
struct saved_msr *msr_array;
|
||||
int total_num;
|
||||
int i, j;
|
||||
|
||||
if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
|
||||
pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
total_num = saved_msrs->num + num;
|
||||
|
||||
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
|
||||
if (!msr_array) {
|
||||
|
@ -407,19 +407,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < total_num; i++) {
|
||||
msr_array[i].info.msr_no = msr_id[i];
|
||||
if (saved_msrs->array) {
|
||||
/*
|
||||
* Multiple callbacks can invoke this function, so copy any
|
||||
* MSR save requests from previous invocations.
|
||||
*/
|
||||
memcpy(msr_array, saved_msrs->array,
|
||||
sizeof(struct saved_msr) * saved_msrs->num);
|
||||
|
||||
kfree(saved_msrs->array);
|
||||
}
|
||||
|
||||
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
|
||||
msr_array[i].info.msr_no = msr_id[j];
|
||||
msr_array[i].valid = false;
|
||||
msr_array[i].info.reg.q = 0;
|
||||
}
|
||||
saved_context.saved_msrs.num = total_num;
|
||||
saved_context.saved_msrs.array = msr_array;
|
||||
saved_msrs->num = total_num;
|
||||
saved_msrs->array = msr_array;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following section is a quirk framework for problematic BIOSen:
|
||||
* The following sections are a quirk framework for problematic BIOSen:
|
||||
* Sometimes MSRs are modified by the BIOSen after suspended to
|
||||
* RAM, this might cause unexpected behavior after wakeup.
|
||||
* Thus we save/restore these specified MSRs across suspend/resume
|
||||
|
@ -434,7 +445,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
|
|||
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
|
||||
|
||||
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
|
||||
return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
|
||||
return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
|
||||
}
|
||||
|
||||
static struct dmi_system_id msr_save_dmi_table[] = {
|
||||
|
@ -449,9 +460,58 @@ static struct dmi_system_id msr_save_dmi_table[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static int msr_save_cpuid_features(const struct x86_cpu_id *c)
|
||||
{
|
||||
u32 cpuid_msr_id[] = {
|
||||
MSR_AMD64_CPUID_FN_1,
|
||||
};
|
||||
|
||||
pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
|
||||
c->family);
|
||||
|
||||
return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id msr_save_cpu_table[] = {
|
||||
{
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.family = 0x15,
|
||||
.model = X86_MODEL_ANY,
|
||||
.feature = X86_FEATURE_ANY,
|
||||
.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
|
||||
},
|
||||
{
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.family = 0x16,
|
||||
.model = X86_MODEL_ANY,
|
||||
.feature = X86_FEATURE_ANY,
|
||||
.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
|
||||
static int pm_cpu_check(const struct x86_cpu_id *c)
|
||||
{
|
||||
const struct x86_cpu_id *m;
|
||||
int ret = 0;
|
||||
|
||||
m = x86_match_cpu(msr_save_cpu_table);
|
||||
if (m) {
|
||||
pm_cpu_match_t fn;
|
||||
|
||||
fn = (pm_cpu_match_t)m->driver_data;
|
||||
ret = fn(m);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pm_check_save_msr(void)
|
||||
{
|
||||
dmi_check_system(msr_save_dmi_table);
|
||||
pm_cpu_check(msr_save_cpu_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -703,6 +703,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|||
unsigned int offset;
|
||||
unsigned char *buf;
|
||||
|
||||
if (!qc->cursg) {
|
||||
qc->curbytes = qc->nbytes;
|
||||
return;
|
||||
}
|
||||
if (qc->curbytes == qc->nbytes - qc->sect_size)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
|
||||
|
@ -742,6 +746,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|||
|
||||
if (qc->cursg_ofs == qc->cursg->length) {
|
||||
qc->cursg = sg_next(qc->cursg);
|
||||
if (!qc->cursg)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
qc->cursg_ofs = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -967,6 +967,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
|
|||
}
|
||||
blkif->nr_ring_pages = nr_grefs;
|
||||
|
||||
err = -ENOMEM;
|
||||
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
|
@ -989,7 +990,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
|
|||
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
|
||||
if (err) {
|
||||
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
|
||||
return err;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1009,8 +1010,7 @@ fail:
|
|||
}
|
||||
kfree(req);
|
||||
}
|
||||
return -ENOMEM;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int connect_ring(struct backend_info *be)
|
||||
|
|
|
@ -142,7 +142,7 @@ enum d40_events {
|
|||
* when the DMA hw is powered off.
|
||||
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
|
||||
*/
|
||||
static u32 d40_backup_regs[] = {
|
||||
static __maybe_unused u32 d40_backup_regs[] = {
|
||||
D40_DREG_LCPA,
|
||||
D40_DREG_LCLA,
|
||||
D40_DREG_PRMSE,
|
||||
|
@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
|
|||
|
||||
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
|
||||
|
||||
static u32 d40_backup_regs_chan[] = {
|
||||
static __maybe_unused u32 d40_backup_regs_chan[] = {
|
||||
D40_CHAN_REG_SSCFG,
|
||||
D40_CHAN_REG_SSELT,
|
||||
D40_CHAN_REG_SSPTR,
|
||||
|
|
|
@ -953,9 +953,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
|
||||
lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
|
||||
GPIOLINE_FLAG_IS_OUT);
|
||||
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
|
||||
lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
|
||||
lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
|
||||
GPIOLINE_FLAG_IS_OUT);
|
||||
|
||||
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -300,8 +300,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
|||
break;
|
||||
}
|
||||
|
||||
if (retries == RETRIES)
|
||||
if (retries == RETRIES) {
|
||||
kfree(reply);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*msg_len = reply_len;
|
||||
*msg = reply;
|
||||
|
|
|
@ -26,12 +26,36 @@
|
|||
#define A4_2WHEEL_MOUSE_HACK_7 0x01
|
||||
#define A4_2WHEEL_MOUSE_HACK_B8 0x02
|
||||
|
||||
#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
|
||||
|
||||
struct a4tech_sc {
|
||||
unsigned long quirks;
|
||||
unsigned int hw_wheel;
|
||||
__s32 delayed_value;
|
||||
};
|
||||
|
||||
static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
struct hid_field *field, struct hid_usage *usage,
|
||||
unsigned long **bit, int *max)
|
||||
{
|
||||
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
|
||||
|
||||
if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
|
||||
usage->hid == A4_WHEEL_ORIENTATION) {
|
||||
/*
|
||||
* We do not want to have this usage mapped to anything as it's
|
||||
* nonstandard and doesn't really behave like an HID report.
|
||||
* It's only selecting the orientation (vertical/horizontal) of
|
||||
* the previous mouse wheel report. The input_events will be
|
||||
* generated once both reports are recorded in a4_event().
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
|
||||
struct hid_field *field, struct hid_usage *usage,
|
||||
unsigned long **bit, int *max)
|
||||
|
@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
|
|||
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
|
||||
struct input_dev *input;
|
||||
|
||||
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
|
||||
!usage->type)
|
||||
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
|
||||
return 0;
|
||||
|
||||
input = field->hidinput->input;
|
||||
|
@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (usage->hid == 0x000100b8) {
|
||||
if (usage->hid == A4_WHEEL_ORIENTATION) {
|
||||
input_event(input, EV_REL, value ? REL_HWHEEL :
|
||||
REL_WHEEL, a4->delayed_value);
|
||||
return 1;
|
||||
|
@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
|
|||
static struct hid_driver a4_driver = {
|
||||
.name = "a4tech",
|
||||
.id_table = a4_devices,
|
||||
.input_mapping = a4_input_mapping,
|
||||
.input_mapped = a4_input_mapped,
|
||||
.event = a4_event,
|
||||
.probe = a4_probe,
|
||||
|
|
|
@ -34,6 +34,8 @@
|
|||
|
||||
#include "hid-ids.h"
|
||||
|
||||
#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
|
||||
|
||||
static const signed short ff_rumble[] = {
|
||||
FF_RUMBLE,
|
||||
-1
|
||||
|
@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
|
|||
struct hid_field *ff_field = tmff->ff_field;
|
||||
int x, y;
|
||||
int left, right; /* Rumbling */
|
||||
int motor_swap;
|
||||
|
||||
switch (effect->type) {
|
||||
case FF_CONSTANT:
|
||||
|
@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
|
|||
ff_field->logical_minimum,
|
||||
ff_field->logical_maximum);
|
||||
|
||||
/* 2-in-1 strong motor is left */
|
||||
if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
|
||||
motor_swap = left;
|
||||
left = right;
|
||||
right = motor_swap;
|
||||
}
|
||||
|
||||
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
|
||||
ff_field->value[0] = left;
|
||||
ff_field->value[1] = right;
|
||||
|
@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
|
|||
.driver_data = (unsigned long)ff_rumble },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
|
||||
.driver_data = (unsigned long)ff_rumble },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
|
||||
.driver_data = (unsigned long)ff_rumble },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
|
||||
.driver_data = (unsigned long)ff_rumble },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
|
||||
|
|
|
@ -819,7 +819,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
|
|||
input_report_key(input, BTN_BASE2, (data[11] & 0x02));
|
||||
|
||||
if (data[12] & 0x80)
|
||||
input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
|
||||
input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
|
||||
else
|
||||
input_report_abs(input, ABS_WHEEL, 0);
|
||||
|
||||
|
@ -949,6 +949,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
|
|||
y >>= 1;
|
||||
distance >>= 1;
|
||||
}
|
||||
if (features->type == INTUOSHT2)
|
||||
distance = features->distance_max - distance;
|
||||
input_report_abs(input, ABS_X, x);
|
||||
input_report_abs(input, ABS_Y, y);
|
||||
input_report_abs(input, ABS_DISTANCE, distance);
|
||||
|
|
|
@ -1114,7 +1114,6 @@ int stm_source_register_device(struct device *parent,
|
|||
|
||||
err:
|
||||
put_device(&src->dev);
|
||||
kfree(src);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -72,6 +72,7 @@ struct em_i2c_device {
|
|||
struct completion msg_done;
|
||||
struct clk *sclk;
|
||||
struct i2c_client *slave;
|
||||
int irq;
|
||||
};
|
||||
|
||||
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
|
||||
|
@ -342,6 +343,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
|
|||
|
||||
writeb(0, priv->base + I2C_OFS_SVA0);
|
||||
|
||||
/*
|
||||
* Wait for interrupt to finish. New slave irqs cannot happen because we
|
||||
* cleared the slave address and, thus, only extension codes will be
|
||||
* detected which do not use the slave ptr.
|
||||
*/
|
||||
synchronize_irq(priv->irq);
|
||||
priv->slave = NULL;
|
||||
|
||||
return 0;
|
||||
|
@ -358,7 +365,7 @@ static int em_i2c_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct em_i2c_device *priv;
|
||||
struct resource *r;
|
||||
int irq, ret;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
|
@ -391,8 +398,8 @@ static int em_i2c_probe(struct platform_device *pdev)
|
|||
|
||||
em_i2c_reset(&priv->adap);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
|
||||
priv->irq = platform_get_irq(pdev, 0);
|
||||
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
|
||||
"em_i2c", priv);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
|
@ -402,7 +409,8 @@ static int em_i2c_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto err_clk;
|
||||
|
||||
dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
|
||||
dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
|
||||
priv->irq);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@
|
|||
#define SB800_PIIX4_PORT_IDX_MASK 0x06
|
||||
#define SB800_PIIX4_PORT_IDX_SHIFT 1
|
||||
|
||||
/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
|
||||
/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
|
||||
#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
|
||||
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
|
||||
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
|
||||
|
@ -355,18 +355,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
|
|||
|
||||
/* Find which register is used for port selection */
|
||||
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
|
||||
switch (PIIX4_dev->device) {
|
||||
case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
|
||||
if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
|
||||
(PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
|
||||
PIIX4_dev->revision >= 0x1F)) {
|
||||
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
|
||||
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
|
||||
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
|
||||
break;
|
||||
case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
|
||||
default:
|
||||
} else {
|
||||
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
|
||||
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
|
||||
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(&piix4_mutex_sb800);
|
||||
|
|
|
@ -589,7 +589,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
* - and wouldn't make the resulting output segment too long
|
||||
*/
|
||||
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
|
||||
(cur_len + s_length <= max_len)) {
|
||||
(max_len - cur_len >= s_length)) {
|
||||
/* ...then concatenate it with the previous one */
|
||||
cur_len += s_length;
|
||||
} else {
|
||||
|
|
|
@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
|
|||
printk(KERN_DEBUG
|
||||
"%s: %s: alloc urb for fifo %i failed",
|
||||
hw->name, __func__, fifo->fifonum);
|
||||
continue;
|
||||
}
|
||||
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
|
||||
fifo->iso[i].indx = i;
|
||||
|
@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
|
|||
static int
|
||||
setup_hfcsusb(struct hfcsusb *hw)
|
||||
{
|
||||
void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
|
||||
u_char b;
|
||||
int ret;
|
||||
|
||||
if (debug & DBG_HFC_CALL_TRACE)
|
||||
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
|
||||
|
||||
if (!dmabuf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
|
||||
|
||||
memcpy(&b, dmabuf, sizeof(u_char));
|
||||
kfree(dmabuf);
|
||||
|
||||
/* check the chip id */
|
||||
if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
|
||||
if (ret != 1) {
|
||||
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
|
||||
hw->name, __func__);
|
||||
return 1;
|
||||
|
|
|
@ -1585,7 +1585,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|||
unsigned long freed;
|
||||
|
||||
c = container_of(shrink, struct dm_bufio_client, shrinker);
|
||||
if (!dm_bufio_trylock(c))
|
||||
if (sc->gfp_mask & __GFP_FS)
|
||||
dm_bufio_lock(c);
|
||||
else if (!dm_bufio_trylock(c))
|
||||
return SHRINK_STOP;
|
||||
|
||||
freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
|
||||
|
|
|
@ -1264,7 +1264,7 @@ void dm_table_event(struct dm_table *t)
|
|||
}
|
||||
EXPORT_SYMBOL(dm_table_event);
|
||||
|
||||
sector_t dm_table_get_size(struct dm_table *t)
|
||||
inline sector_t dm_table_get_size(struct dm_table *t)
|
||||
{
|
||||
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
|
||||
}
|
||||
|
@ -1289,6 +1289,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
|
|||
unsigned int l, n = 0, k = 0;
|
||||
sector_t *node;
|
||||
|
||||
if (unlikely(sector >= dm_table_get_size(t)))
|
||||
return &t->targets[t->num_targets];
|
||||
|
||||
for (l = 0; l < t->depth; l++) {
|
||||
n = get_child(n, k);
|
||||
node = get_node(t, l, n);
|
||||
|
|
|
@ -623,39 +623,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
|
|||
|
||||
new_parent = shadow_current(s);
|
||||
|
||||
pn = dm_block_data(new_parent);
|
||||
size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
|
||||
sizeof(__le64) : s->info->value_type.size;
|
||||
|
||||
/* create & init the left block */
|
||||
r = new_block(s->info, &left);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
ln = dm_block_data(left);
|
||||
nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
|
||||
|
||||
ln->header.flags = pn->header.flags;
|
||||
ln->header.nr_entries = cpu_to_le32(nr_left);
|
||||
ln->header.max_entries = pn->header.max_entries;
|
||||
ln->header.value_size = pn->header.value_size;
|
||||
memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
|
||||
memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
|
||||
|
||||
/* create & init the right block */
|
||||
r = new_block(s->info, &right);
|
||||
if (r < 0) {
|
||||
unlock_block(s->info, left);
|
||||
return r;
|
||||
}
|
||||
|
||||
pn = dm_block_data(new_parent);
|
||||
ln = dm_block_data(left);
|
||||
rn = dm_block_data(right);
|
||||
|
||||
nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
|
||||
nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
|
||||
|
||||
ln->header.flags = pn->header.flags;
|
||||
ln->header.nr_entries = cpu_to_le32(nr_left);
|
||||
ln->header.max_entries = pn->header.max_entries;
|
||||
ln->header.value_size = pn->header.value_size;
|
||||
|
||||
rn->header.flags = pn->header.flags;
|
||||
rn->header.nr_entries = cpu_to_le32(nr_right);
|
||||
rn->header.max_entries = pn->header.max_entries;
|
||||
rn->header.value_size = pn->header.value_size;
|
||||
|
||||
memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
|
||||
memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
|
||||
|
||||
size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
|
||||
sizeof(__le64) : s->info->value_type.size;
|
||||
memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
|
||||
memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
|
||||
nr_right * size);
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
|
|||
}
|
||||
|
||||
if (smm->recursion_count == 1)
|
||||
apply_bops(smm);
|
||||
r = apply_bops(smm);
|
||||
|
||||
smm->recursion_count--;
|
||||
|
||||
|
|
|
@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
|
|||
|
||||
entry = container_of(resource, struct dbell_entry, resource);
|
||||
if (entry->run_delayed) {
|
||||
schedule_work(&entry->work);
|
||||
if (!schedule_work(&entry->work))
|
||||
vmci_resource_put(resource);
|
||||
} else {
|
||||
entry->notify_cb(entry->client_data);
|
||||
vmci_resource_put(resource);
|
||||
|
@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
|
|||
atomic_read(&dbell->active) == 1) {
|
||||
if (dbell->run_delayed) {
|
||||
vmci_resource_get(&dbell->resource);
|
||||
schedule_work(&dbell->work);
|
||||
if (!schedule_work(&dbell->work))
|
||||
vmci_resource_put(&dbell->resource);
|
||||
} else {
|
||||
dbell->notify_cb(dbell->client_data);
|
||||
}
|
||||
|
|
|
@ -1492,6 +1492,12 @@ int mmc_attach_sd(struct mmc_host *host)
|
|||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some SD cards claims an out of spec VDD voltage range. Let's treat
|
||||
* these bits as being in-valid and especially also bit7.
|
||||
*/
|
||||
ocr &= ~0x7FFF;
|
||||
|
||||
rocr = mmc_select_voltage(host, ocr);
|
||||
|
||||
/*
|
||||
|
|
|
@ -317,6 +317,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
|||
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
|
||||
/* HS200 is broken at this moment */
|
||||
host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
|
||||
|
||||
ret = sdhci_add_host(host);
|
||||
if (ret)
|
||||
goto pm_runtime_disable;
|
||||
|
|
|
@ -2131,6 +2131,15 @@ static void bond_miimon_commit(struct bonding *bond)
|
|||
bond_for_each_slave(bond, slave, iter) {
|
||||
switch (slave->new_link) {
|
||||
case BOND_LINK_NOCHANGE:
|
||||
/* For 802.3ad mode, check current slave speed and
|
||||
* duplex again in case its port was disabled after
|
||||
* invalid speed/duplex reporting but recovered before
|
||||
* link monitoring could make a decision on the actual
|
||||
* link status
|
||||
*/
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD &&
|
||||
slave->link == BOND_LINK_UP)
|
||||
bond_3ad_adapter_speed_duplex_changed(slave);
|
||||
continue;
|
||||
|
||||
case BOND_LINK_UP:
|
||||
|
|
|
@ -1095,6 +1095,8 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
|
|||
int register_candev(struct net_device *dev)
|
||||
{
|
||||
dev->rtnl_link_ops = &can_link_ops;
|
||||
netif_carrier_off(dev);
|
||||
|
||||
return register_netdev(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_candev);
|
||||
|
|
|
@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
|
|||
if (!netdev)
|
||||
continue;
|
||||
|
||||
strncpy(name, netdev->name, IFNAMSIZ);
|
||||
strlcpy(name, netdev->name, IFNAMSIZ);
|
||||
|
||||
unregister_sja1000dev(netdev);
|
||||
|
||||
|
|
|
@ -879,7 +879,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
|
|||
|
||||
dev_prev_siblings = dev->prev_siblings;
|
||||
dev->state &= ~PCAN_USB_STATE_CONNECTED;
|
||||
strncpy(name, netdev->name, IFNAMSIZ);
|
||||
strlcpy(name, netdev->name, IFNAMSIZ);
|
||||
|
||||
unregister_netdev(netdev);
|
||||
|
||||
|
|
|
@ -3263,7 +3263,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!adapter->regs) {
|
||||
dev_err(&pdev->dev, "cannot map device registers\n");
|
||||
err = -ENOMEM;
|
||||
goto out_free_adapter;
|
||||
goto out_free_adapter_nofail;
|
||||
}
|
||||
|
||||
adapter->pdev = pdev;
|
||||
|
@ -3381,6 +3381,9 @@ out_free_dev:
|
|||
if (adapter->port[i])
|
||||
free_netdev(adapter->port[i]);
|
||||
|
||||
out_free_adapter_nofail:
|
||||
kfree_skb(adapter->nofail_skb);
|
||||
|
||||
out_free_adapter:
|
||||
kfree(adapter);
|
||||
|
||||
|
|
|
@ -157,6 +157,7 @@ struct hip04_priv {
|
|||
unsigned int reg_inten;
|
||||
|
||||
struct napi_struct napi;
|
||||
struct device *dev;
|
||||
struct net_device *ndev;
|
||||
|
||||
struct tx_desc *tx_desc;
|
||||
|
@ -185,7 +186,7 @@ struct hip04_priv {
|
|||
|
||||
static inline unsigned int tx_count(unsigned int head, unsigned int tail)
|
||||
{
|
||||
return (head - tail) % (TX_DESC_NUM - 1);
|
||||
return (head - tail) % TX_DESC_NUM;
|
||||
}
|
||||
|
||||
static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
|
||||
|
@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
|
|||
}
|
||||
|
||||
if (priv->tx_phys[tx_tail]) {
|
||||
dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
|
||||
dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
|
||||
priv->tx_skb[tx_tail]->len,
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_phys[tx_tail] = 0;
|
||||
|
@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, phys)) {
|
||||
phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->dev, phys)) {
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
|
|||
u16 len;
|
||||
u32 err;
|
||||
|
||||
/* clean up tx descriptors */
|
||||
tx_remaining = hip04_tx_reclaim(ndev, false);
|
||||
|
||||
while (cnt && !last) {
|
||||
buf = priv->rx_buf[priv->rx_head];
|
||||
skb = build_skb(buf, priv->rx_buf_size);
|
||||
|
@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
|
|||
goto refill;
|
||||
}
|
||||
|
||||
dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
|
||||
dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
|
||||
RX_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
priv->rx_phys[priv->rx_head] = 0;
|
||||
|
||||
|
@ -534,9 +538,9 @@ refill:
|
|||
buf = netdev_alloc_frag(priv->rx_buf_size);
|
||||
if (!buf)
|
||||
goto done;
|
||||
phys = dma_map_single(&ndev->dev, buf,
|
||||
phys = dma_map_single(priv->dev, buf,
|
||||
RX_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, phys))
|
||||
if (dma_mapping_error(priv->dev, phys))
|
||||
goto done;
|
||||
priv->rx_buf[priv->rx_head] = buf;
|
||||
priv->rx_phys[priv->rx_head] = phys;
|
||||
|
@ -557,8 +561,7 @@ refill:
|
|||
}
|
||||
napi_complete(napi);
|
||||
done:
|
||||
/* clean up tx descriptors and start a new timer if necessary */
|
||||
tx_remaining = hip04_tx_reclaim(ndev, false);
|
||||
/* start a new timer if necessary */
|
||||
if (rx < budget && tx_remaining)
|
||||
hip04_start_tx_timer(priv);
|
||||
|
||||
|
@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
|
|||
for (i = 0; i < RX_DESC_NUM; i++) {
|
||||
dma_addr_t phys;
|
||||
|
||||
phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
|
||||
phys = dma_map_single(priv->dev, priv->rx_buf[i],
|
||||
RX_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, phys))
|
||||
if (dma_mapping_error(priv->dev, phys))
|
||||
return -EIO;
|
||||
|
||||
priv->rx_phys[i] = phys;
|
||||
|
@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
|
|||
|
||||
for (i = 0; i < RX_DESC_NUM; i++) {
|
||||
if (priv->rx_phys[i]) {
|
||||
dma_unmap_single(&ndev->dev, priv->rx_phys[i],
|
||||
dma_unmap_single(priv->dev, priv->rx_phys[i],
|
||||
RX_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
priv->rx_phys[i] = 0;
|
||||
}
|
||||
|
@ -827,6 +830,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
|
||||
priv = netdev_priv(ndev);
|
||||
priv->dev = d;
|
||||
priv->ndev = ndev;
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
|
||||
|
|
|
@ -892,6 +892,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
|
||||
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
|
||||
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
|
||||
{QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
|
||||
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
|
||||
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
|
||||
|
|
|
@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
|
|||
|
||||
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
|
||||
skb->len - 2, GFP_KERNEL);
|
||||
if (!transaction)
|
||||
return -ENOMEM;
|
||||
|
||||
transaction->aid_len = skb->data[1];
|
||||
memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
|
||||
|
|
|
@ -333,6 +333,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
|||
|
||||
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
|
||||
skb->len - 2, GFP_KERNEL);
|
||||
if (!transaction)
|
||||
return -ENOMEM;
|
||||
|
||||
transaction->aid_len = skb->data[1];
|
||||
memcpy(transaction->aid, &skb->data[2],
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#define RX_HS_UNTERMINATED_ENABLE 0x00A6
|
||||
#define RX_ENTER_HIBERN8 0x00A7
|
||||
#define RX_BYPASS_8B10B_ENABLE 0x00A8
|
||||
#define RX_TERMINATION_FORCE_ENABLE 0x0089
|
||||
#define RX_TERMINATION_FORCE_ENABLE 0x00A9
|
||||
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
|
||||
#define RX_HIBERN8TIME_CAPABILITY 0x0092
|
||||
|
||||
|
|
|
@ -44,8 +44,6 @@ config ANDROID_VSOC
|
|||
|
||||
source "drivers/staging/android/ion/Kconfig"
|
||||
|
||||
source "drivers/staging/android/fiq_debugger/Kconfig"
|
||||
|
||||
endif # if ANDROID
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
ccflags-y += -I$(src) # needed for trace events
|
||||
|
||||
obj-y += ion/
|
||||
obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/
|
||||
|
||||
obj-$(CONFIG_ASHMEM) += ashmem.o
|
||||
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
config FIQ_DEBUGGER
|
||||
bool "FIQ Mode Serial Debugger"
|
||||
default n
|
||||
depends on ARM || ARM64
|
||||
help
|
||||
The FIQ serial debugger can accept commands even when the
|
||||
kernel is unresponsive due to being stuck with interrupts
|
||||
disabled.
|
||||
|
||||
config FIQ_DEBUGGER_NO_SLEEP
|
||||
bool "Keep serial debugger active"
|
||||
depends on FIQ_DEBUGGER
|
||||
default n
|
||||
help
|
||||
Enables the serial debugger at boot. Passing
|
||||
fiq_debugger.no_sleep on the kernel commandline will
|
||||
override this config option.
|
||||
|
||||
config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
|
||||
bool "Don't disable wakeup IRQ when debugger is active"
|
||||
depends on FIQ_DEBUGGER
|
||||
default n
|
||||
help
|
||||
Don't disable the wakeup irq when enabling the uart clock. This will
|
||||
cause extra interrupts, but it makes the serial debugger usable with
|
||||
on some MSM radio builds that ignore the uart clock request in power
|
||||
collapse.
|
||||
|
||||
config FIQ_DEBUGGER_CONSOLE
|
||||
bool "Console on FIQ Serial Debugger port"
|
||||
depends on FIQ_DEBUGGER
|
||||
default n
|
||||
help
|
||||
Enables a console so that printk messages are displayed on
|
||||
the debugger serial port as the occur.
|
||||
|
||||
config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
|
||||
bool "Put the FIQ debugger into console mode by default"
|
||||
depends on FIQ_DEBUGGER_CONSOLE
|
||||
default n
|
||||
help
|
||||
If enabled, this puts the fiq debugger into console mode by default.
|
||||
Otherwise, the fiq debugger will start out in debug mode.
|
||||
|
||||
config FIQ_DEBUGGER_UART_OVERLAY
|
||||
bool "Install uart DT overlay"
|
||||
depends on FIQ_DEBUGGER
|
||||
select OF_OVERLAY
|
||||
default n
|
||||
help
|
||||
If enabled, fiq debugger is calling fiq_debugger_uart_overlay()
|
||||
that will apply overlay uart_overlay@0 to disable proper uart.
|
||||
|
||||
config FIQ_WATCHDOG
|
||||
bool
|
||||
select FIQ_DEBUGGER
|
||||
select PSTORE_RAM
|
||||
default n
|
|
@ -1,4 +0,0 @@
|
|||
obj-y += fiq_debugger.o
|
||||
obj-$(CONFIG_ARM) += fiq_debugger_arm.o
|
||||
obj-$(CONFIG_ARM64) += fiq_debugger_arm64.o
|
||||
obj-$(CONFIG_FIQ_WATCHDOG) += fiq_watchdog.o
|
File diff suppressed because it is too large
Load Diff
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* drivers/staging/android/fiq_debugger/fiq_debugger.h
|
||||
*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
* Author: Colin Cross <ccross@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
|
||||
#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
|
||||
|
||||
#include <linux/serial_core.h>
|
||||
|
||||
#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
|
||||
#define FIQ_DEBUGGER_BREAK 0x00ff0100
|
||||
|
||||
#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq"
|
||||
#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
|
||||
#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
|
||||
|
||||
/**
|
||||
* struct fiq_debugger_pdata - fiq debugger platform data
|
||||
* @uart_resume: used to restore uart state right before enabling
|
||||
* the fiq.
|
||||
* @uart_enable: Do the work necessary to communicate with the uart
|
||||
* hw (enable clocks, etc.). This must be ref-counted.
|
||||
* @uart_disable: Do the work necessary to disable the uart hw
|
||||
* (disable clocks, etc.). This must be ref-counted.
|
||||
* @uart_dev_suspend: called during PM suspend, generally not needed
|
||||
* for real fiq mode debugger.
|
||||
* @uart_dev_resume: called during PM resume, generally not needed
|
||||
* for real fiq mode debugger.
|
||||
*/
|
||||
struct fiq_debugger_pdata {
|
||||
int (*uart_init)(struct platform_device *pdev);
|
||||
void (*uart_free)(struct platform_device *pdev);
|
||||
int (*uart_resume)(struct platform_device *pdev);
|
||||
int (*uart_getc)(struct platform_device *pdev);
|
||||
void (*uart_putc)(struct platform_device *pdev, unsigned int c);
|
||||
void (*uart_flush)(struct platform_device *pdev);
|
||||
void (*uart_enable)(struct platform_device *pdev);
|
||||
void (*uart_disable)(struct platform_device *pdev);
|
||||
|
||||
int (*uart_dev_suspend)(struct platform_device *pdev);
|
||||
int (*uart_dev_resume)(struct platform_device *pdev);
|
||||
|
||||
void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
|
||||
bool enable);
|
||||
void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
|
||||
|
||||
void (*force_irq)(struct platform_device *pdev, unsigned int irq);
|
||||
void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1,240 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Google, Inc.
|
||||
* Author: Colin Cross <ccross@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
#include "fiq_debugger_priv.h"
|
||||
|
||||
static char *mode_name(unsigned cpsr)
|
||||
{
|
||||
switch (cpsr & MODE_MASK) {
|
||||
case USR_MODE: return "USR";
|
||||
case FIQ_MODE: return "FIQ";
|
||||
case IRQ_MODE: return "IRQ";
|
||||
case SVC_MODE: return "SVC";
|
||||
case ABT_MODE: return "ABT";
|
||||
case UND_MODE: return "UND";
|
||||
case SYSTEM_MODE: return "SYS";
|
||||
default: return "???";
|
||||
}
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
output->printf(output, " pc %08x cpsr %08x mode %s\n",
|
||||
regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
output->printf(output,
|
||||
" r0 %08x r1 %08x r2 %08x r3 %08x\n",
|
||||
regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
|
||||
output->printf(output,
|
||||
" r4 %08x r5 %08x r6 %08x r7 %08x\n",
|
||||
regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
|
||||
output->printf(output,
|
||||
" r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n",
|
||||
regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
|
||||
mode_name(regs->ARM_cpsr));
|
||||
output->printf(output,
|
||||
" ip %08x sp %08x lr %08x pc %08x cpsr %08x\n",
|
||||
regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
|
||||
regs->ARM_cpsr);
|
||||
}
|
||||
|
||||
struct mode_regs {
|
||||
unsigned long sp_svc;
|
||||
unsigned long lr_svc;
|
||||
unsigned long spsr_svc;
|
||||
|
||||
unsigned long sp_abt;
|
||||
unsigned long lr_abt;
|
||||
unsigned long spsr_abt;
|
||||
|
||||
unsigned long sp_und;
|
||||
unsigned long lr_und;
|
||||
unsigned long spsr_und;
|
||||
|
||||
unsigned long sp_irq;
|
||||
unsigned long lr_irq;
|
||||
unsigned long spsr_irq;
|
||||
|
||||
unsigned long r8_fiq;
|
||||
unsigned long r9_fiq;
|
||||
unsigned long r10_fiq;
|
||||
unsigned long r11_fiq;
|
||||
unsigned long r12_fiq;
|
||||
unsigned long sp_fiq;
|
||||
unsigned long lr_fiq;
|
||||
unsigned long spsr_fiq;
|
||||
};
|
||||
|
||||
static void __naked get_mode_regs(struct mode_regs *regs)
|
||||
{
|
||||
asm volatile (
|
||||
"mrs r1, cpsr\n"
|
||||
"msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
|
||||
"stmia r0!, {r13 - r14}\n"
|
||||
"mrs r2, spsr\n"
|
||||
"msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
|
||||
"stmia r0!, {r2, r13 - r14}\n"
|
||||
"mrs r2, spsr\n"
|
||||
"msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
|
||||
"stmia r0!, {r2, r13 - r14}\n"
|
||||
"mrs r2, spsr\n"
|
||||
"msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
|
||||
"stmia r0!, {r2, r13 - r14}\n"
|
||||
"mrs r2, spsr\n"
|
||||
"msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
|
||||
"stmia r0!, {r2, r8 - r14}\n"
|
||||
"mrs r2, spsr\n"
|
||||
"stmia r0!, {r2}\n"
|
||||
"msr cpsr_c, r1\n"
|
||||
"bx lr\n");
|
||||
}
|
||||
|
||||
|
||||
void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
struct mode_regs mode_regs;
|
||||
unsigned long mode = regs->ARM_cpsr & MODE_MASK;
|
||||
|
||||
fiq_debugger_dump_regs(output, regs);
|
||||
get_mode_regs(&mode_regs);
|
||||
|
||||
output->printf(output,
|
||||
"%csvc: sp %08x lr %08x spsr %08x\n",
|
||||
mode == SVC_MODE ? '*' : ' ',
|
||||
mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
|
||||
output->printf(output,
|
||||
"%cabt: sp %08x lr %08x spsr %08x\n",
|
||||
mode == ABT_MODE ? '*' : ' ',
|
||||
mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
|
||||
output->printf(output,
|
||||
"%cund: sp %08x lr %08x spsr %08x\n",
|
||||
mode == UND_MODE ? '*' : ' ',
|
||||
mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
|
||||
output->printf(output,
|
||||
"%cirq: sp %08x lr %08x spsr %08x\n",
|
||||
mode == IRQ_MODE ? '*' : ' ',
|
||||
mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
|
||||
output->printf(output,
|
||||
"%cfiq: r8 %08x r9 %08x r10 %08x r11 %08x r12 %08x\n",
|
||||
mode == FIQ_MODE ? '*' : ' ',
|
||||
mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
|
||||
mode_regs.r11_fiq, mode_regs.r12_fiq);
|
||||
output->printf(output,
|
||||
" fiq: sp %08x lr %08x spsr %08x\n",
|
||||
mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
|
||||
}
|
||||
|
||||
struct stacktrace_state {
|
||||
struct fiq_debugger_output *output;
|
||||
unsigned int depth;
|
||||
};
|
||||
|
||||
static int report_trace(struct stackframe *frame, void *d)
|
||||
{
|
||||
struct stacktrace_state *sts = d;
|
||||
|
||||
if (sts->depth) {
|
||||
sts->output->printf(sts->output,
|
||||
" pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
|
||||
frame->pc, frame->pc, frame->lr, frame->lr,
|
||||
frame->sp, frame->fp);
|
||||
sts->depth--;
|
||||
return 0;
|
||||
}
|
||||
sts->output->printf(sts->output, " ...\n");
|
||||
|
||||
return sts->depth == 0;
|
||||
}
|
||||
|
||||
struct frame_tail {
|
||||
struct frame_tail *fp;
|
||||
unsigned long sp;
|
||||
unsigned long lr;
|
||||
} __attribute__((packed));
|
||||
|
||||
static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
|
||||
struct frame_tail *tail)
|
||||
{
|
||||
struct frame_tail buftail[2];
|
||||
|
||||
/* Also check accessibility of one struct frame_tail beyond */
|
||||
if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
|
||||
output->printf(output, " invalid frame pointer %p\n",
|
||||
tail);
|
||||
return NULL;
|
||||
}
|
||||
if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
|
||||
output->printf(output,
|
||||
" failed to copy frame pointer %p\n", tail);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
output->printf(output, " %p\n", buftail[0].lr);
|
||||
|
||||
/* frame pointers should strictly progress back up the stack
|
||||
* (towards higher addresses) */
|
||||
if (tail >= buftail[0].fp)
|
||||
return NULL;
|
||||
|
||||
return buftail[0].fp-1;
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs, unsigned int depth, void *ssp)
|
||||
{
|
||||
struct frame_tail *tail;
|
||||
struct thread_info *real_thread_info = THREAD_INFO(ssp);
|
||||
struct stacktrace_state sts;
|
||||
|
||||
sts.depth = depth;
|
||||
sts.output = output;
|
||||
*current_thread_info() = *real_thread_info;
|
||||
|
||||
if (!current)
|
||||
output->printf(output, "current NULL\n");
|
||||
else
|
||||
output->printf(output, "pid: %d comm: %s\n",
|
||||
current->pid, current->comm);
|
||||
fiq_debugger_dump_regs(output, regs);
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
struct stackframe frame;
|
||||
frame.fp = regs->ARM_fp;
|
||||
frame.sp = regs->ARM_sp;
|
||||
frame.lr = regs->ARM_lr;
|
||||
frame.pc = regs->ARM_pc;
|
||||
output->printf(output,
|
||||
" pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
|
||||
regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
|
||||
regs->ARM_sp, regs->ARM_fp);
|
||||
walk_stackframe(&frame, report_trace, &sts);
|
||||
return;
|
||||
}
|
||||
|
||||
tail = ((struct frame_tail *) regs->ARM_fp) - 1;
|
||||
while (depth-- && tail && !((unsigned long) tail & 3))
|
||||
tail = user_backtrace(output, tail);
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Google, Inc.
|
||||
* Author: Colin Cross <ccross@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
#include "fiq_debugger_priv.h"
|
||||
|
||||
static char *mode_name(const struct pt_regs *regs)
|
||||
{
|
||||
if (compat_user_mode(regs)) {
|
||||
return "USR";
|
||||
} else {
|
||||
switch (processor_mode(regs)) {
|
||||
case PSR_MODE_EL0t: return "EL0t";
|
||||
case PSR_MODE_EL1t: return "EL1t";
|
||||
case PSR_MODE_EL1h: return "EL1h";
|
||||
case PSR_MODE_EL2t: return "EL2t";
|
||||
case PSR_MODE_EL2h: return "EL2h";
|
||||
default: return "???";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
|
||||
regs->pc, regs->pstate, mode_name(regs));
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
output->printf(output, " r0 %08x r1 %08x r2 %08x r3 %08x\n",
|
||||
regs->compat_usr(0), regs->compat_usr(1),
|
||||
regs->compat_usr(2), regs->compat_usr(3));
|
||||
output->printf(output, " r4 %08x r5 %08x r6 %08x r7 %08x\n",
|
||||
regs->compat_usr(4), regs->compat_usr(5),
|
||||
regs->compat_usr(6), regs->compat_usr(7));
|
||||
output->printf(output, " r8 %08x r9 %08x r10 %08x r11 %08x\n",
|
||||
regs->compat_usr(8), regs->compat_usr(9),
|
||||
regs->compat_usr(10), regs->compat_usr(11));
|
||||
output->printf(output, " ip %08x sp %08x lr %08x pc %08x\n",
|
||||
regs->compat_usr(12), regs->compat_sp,
|
||||
regs->compat_lr, regs->pc);
|
||||
output->printf(output, " cpsr %08x (%s)\n",
|
||||
regs->pstate, mode_name(regs));
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
|
||||
output->printf(output, " x0 %016lx x1 %016lx\n",
|
||||
regs->regs[0], regs->regs[1]);
|
||||
output->printf(output, " x2 %016lx x3 %016lx\n",
|
||||
regs->regs[2], regs->regs[3]);
|
||||
output->printf(output, " x4 %016lx x5 %016lx\n",
|
||||
regs->regs[4], regs->regs[5]);
|
||||
output->printf(output, " x6 %016lx x7 %016lx\n",
|
||||
regs->regs[6], regs->regs[7]);
|
||||
output->printf(output, " x8 %016lx x9 %016lx\n",
|
||||
regs->regs[8], regs->regs[9]);
|
||||
output->printf(output, " x10 %016lx x11 %016lx\n",
|
||||
regs->regs[10], regs->regs[11]);
|
||||
output->printf(output, " x12 %016lx x13 %016lx\n",
|
||||
regs->regs[12], regs->regs[13]);
|
||||
output->printf(output, " x14 %016lx x15 %016lx\n",
|
||||
regs->regs[14], regs->regs[15]);
|
||||
output->printf(output, " x16 %016lx x17 %016lx\n",
|
||||
regs->regs[16], regs->regs[17]);
|
||||
output->printf(output, " x18 %016lx x19 %016lx\n",
|
||||
regs->regs[18], regs->regs[19]);
|
||||
output->printf(output, " x20 %016lx x21 %016lx\n",
|
||||
regs->regs[20], regs->regs[21]);
|
||||
output->printf(output, " x22 %016lx x23 %016lx\n",
|
||||
regs->regs[22], regs->regs[23]);
|
||||
output->printf(output, " x24 %016lx x25 %016lx\n",
|
||||
regs->regs[24], regs->regs[25]);
|
||||
output->printf(output, " x26 %016lx x27 %016lx\n",
|
||||
regs->regs[26], regs->regs[27]);
|
||||
output->printf(output, " x28 %016lx x29 %016lx\n",
|
||||
regs->regs[28], regs->regs[29]);
|
||||
output->printf(output, " x30 %016lx sp %016lx\n",
|
||||
regs->regs[30], regs->sp);
|
||||
output->printf(output, " pc %016lx cpsr %08x (%s)\n",
|
||||
regs->pc, regs->pstate, mode_name(regs));
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
if (compat_user_mode(regs))
|
||||
fiq_debugger_dump_regs_aarch32(output, regs);
|
||||
else
|
||||
fiq_debugger_dump_regs_aarch64(output, regs);
|
||||
}
|
||||
|
||||
#define READ_SPECIAL_REG(x) ({ \
|
||||
u64 val; \
|
||||
asm volatile ("mrs %0, " # x : "=r"(val)); \
|
||||
val; \
|
||||
})
|
||||
|
||||
void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs)
|
||||
{
|
||||
u32 pstate = READ_SPECIAL_REG(CurrentEl);
|
||||
bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
|
||||
|
||||
fiq_debugger_dump_regs(output, regs);
|
||||
|
||||
output->printf(output, " sp_el0 %016lx\n",
|
||||
READ_SPECIAL_REG(sp_el0));
|
||||
|
||||
if (in_el2)
|
||||
output->printf(output, " sp_el1 %016lx\n",
|
||||
READ_SPECIAL_REG(sp_el1));
|
||||
|
||||
output->printf(output, " elr_el1 %016lx\n",
|
||||
READ_SPECIAL_REG(elr_el1));
|
||||
|
||||
output->printf(output, " spsr_el1 %08lx\n",
|
||||
READ_SPECIAL_REG(spsr_el1));
|
||||
|
||||
if (in_el2) {
|
||||
output->printf(output, " spsr_irq %08lx\n",
|
||||
READ_SPECIAL_REG(spsr_irq));
|
||||
output->printf(output, " spsr_abt %08lx\n",
|
||||
READ_SPECIAL_REG(spsr_abt));
|
||||
output->printf(output, " spsr_und %08lx\n",
|
||||
READ_SPECIAL_REG(spsr_und));
|
||||
output->printf(output, " spsr_fiq %08lx\n",
|
||||
READ_SPECIAL_REG(spsr_fiq));
|
||||
output->printf(output, " spsr_el2 %08lx\n",
|
||||
READ_SPECIAL_REG(elr_el2));
|
||||
output->printf(output, " spsr_el2 %08lx\n",
|
||||
READ_SPECIAL_REG(spsr_el2));
|
||||
}
|
||||
}
|
||||
|
||||
struct stacktrace_state {
|
||||
struct fiq_debugger_output *output;
|
||||
unsigned int depth;
|
||||
};
|
||||
|
||||
static int report_trace(struct stackframe *frame, void *d)
|
||||
{
|
||||
struct stacktrace_state *sts = d;
|
||||
|
||||
if (sts->depth) {
|
||||
sts->output->printf(sts->output, "%pF:\n", frame->pc);
|
||||
sts->output->printf(sts->output,
|
||||
" pc %016lx sp %016lx fp %016lx\n",
|
||||
frame->pc, frame->sp, frame->fp);
|
||||
sts->depth--;
|
||||
return 0;
|
||||
}
|
||||
sts->output->printf(sts->output, " ...\n");
|
||||
|
||||
return sts->depth == 0;
|
||||
}
|
||||
|
||||
void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs, unsigned int depth, void *ssp)
|
||||
{
|
||||
struct thread_info *real_thread_info = THREAD_INFO(ssp);
|
||||
struct stacktrace_state sts;
|
||||
|
||||
sts.depth = depth;
|
||||
sts.output = output;
|
||||
*current_thread_info() = *real_thread_info;
|
||||
|
||||
if (!current)
|
||||
output->printf(output, "current NULL\n");
|
||||
else
|
||||
output->printf(output, "pid: %d comm: %s\n",
|
||||
current->pid, current->comm);
|
||||
fiq_debugger_dump_regs(output, regs);
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
struct stackframe frame;
|
||||
frame.fp = regs->regs[29];
|
||||
frame.sp = regs->sp;
|
||||
frame.pc = regs->pc;
|
||||
output->printf(output, "\n");
|
||||
walk_stackframe(current, &frame, report_trace, &sts);
|
||||
}
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Google, Inc.
|
||||
* Author: Colin Cross <ccross@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _FIQ_DEBUGGER_PRIV_H_
|
||||
#define _FIQ_DEBUGGER_PRIV_H_
|
||||
|
||||
#define THREAD_INFO(sp) ((struct thread_info *) \
|
||||
((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
|
||||
|
||||
struct fiq_debugger_output {
|
||||
void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
|
||||
};
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs);
|
||||
void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs);
|
||||
void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs);
|
||||
void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
|
||||
const struct pt_regs *regs, unsigned int depth, void *ssp);
|
||||
|
||||
#endif
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
* drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
|
||||
*
|
||||
* simple lockless ringbuffer
|
||||
*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct fiq_debugger_ringbuf {
|
||||
int len;
|
||||
int head;
|
||||
int tail;
|
||||
u8 buf[];
|
||||
};
|
||||
|
||||
|
||||
static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
|
||||
{
|
||||
struct fiq_debugger_ringbuf *rbuf;
|
||||
|
||||
rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
|
||||
if (rbuf == NULL)
|
||||
return NULL;
|
||||
|
||||
rbuf->len = len;
|
||||
rbuf->head = 0;
|
||||
rbuf->tail = 0;
|
||||
smp_mb();
|
||||
|
||||
return rbuf;
|
||||
}
|
||||
|
||||
static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
|
||||
{
|
||||
kfree(rbuf);
|
||||
}
|
||||
|
||||
static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
|
||||
{
|
||||
int level = rbuf->head - rbuf->tail;
|
||||
|
||||
if (level < 0)
|
||||
level = rbuf->len + level;
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
|
||||
{
|
||||
return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
|
||||
}
|
||||
|
||||
static inline u8
|
||||
fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
|
||||
{
|
||||
return rbuf->buf[(rbuf->tail + i) % rbuf->len];
|
||||
}
|
||||
|
||||
static inline int
|
||||
fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
|
||||
{
|
||||
count = min(count, fiq_debugger_ringbuf_level(rbuf));
|
||||
|
||||
rbuf->tail = (rbuf->tail + count) % rbuf->len;
|
||||
smp_mb();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline int
|
||||
fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
|
||||
{
|
||||
if (fiq_debugger_ringbuf_room(rbuf) == 0)
|
||||
return 0;
|
||||
|
||||
rbuf->buf[rbuf->head] = datum;
|
||||
smp_mb();
|
||||
rbuf->head = (rbuf->head + 1) % rbuf->len;
|
||||
smp_mb();
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pstore_ram.h>
|
||||
|
||||
#include "fiq_watchdog.h"
|
||||
#include "fiq_debugger_priv.h"
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
|
||||
|
||||
static void fiq_watchdog_printf(struct fiq_debugger_output *output,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
char buf[256];
|
||||
va_list ap;
|
||||
int len;
|
||||
|
||||
va_start(ap, fmt);
|
||||
len = vscnprintf(buf, sizeof(buf), fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
ramoops_console_write_buf(buf, len);
|
||||
}
|
||||
|
||||
struct fiq_debugger_output fiq_watchdog_output = {
|
||||
.printf = fiq_watchdog_printf,
|
||||
};
|
||||
|
||||
void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
|
||||
{
|
||||
char msg[24];
|
||||
int len;
|
||||
|
||||
raw_spin_lock(&fiq_watchdog_lock);
|
||||
|
||||
len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
|
||||
THREAD_INFO(svc_sp)->cpu);
|
||||
ramoops_console_write_buf(msg, len);
|
||||
|
||||
fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
|
||||
|
||||
raw_spin_unlock(&fiq_watchdog_lock);
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _FIQ_WATCHDOG_H_
|
||||
#define _FIQ_WATCHDOG_H_
|
||||
|
||||
void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
|
||||
|
||||
#endif
|
|
@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
|
|||
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ci->lock, flags);
|
||||
ci->gadget.speed = USB_SPEED_UNKNOWN;
|
||||
ci->remote_wakeup = 0;
|
||||
ci->suspended = 0;
|
||||
spin_unlock_irqrestore(&ci->lock, flags);
|
||||
|
||||
/* flush all endpoints */
|
||||
gadget_for_each_ep(ep, gadget) {
|
||||
usb_ep_fifo_flush(ep);
|
||||
|
@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
|
|||
ci->status = NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ci->lock, flags);
|
||||
ci->gadget.speed = USB_SPEED_UNKNOWN;
|
||||
ci->remote_wakeup = 0;
|
||||
ci->suspended = 0;
|
||||
spin_unlock_irqrestore(&ci->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1306,6 +1306,10 @@ static int ep_disable(struct usb_ep *ep)
|
|||
return -EBUSY;
|
||||
|
||||
spin_lock_irqsave(hwep->lock, flags);
|
||||
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
||||
spin_unlock_irqrestore(hwep->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* only internal SW should disable ctrl endpts */
|
||||
|
||||
|
@ -1395,6 +1399,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
|
|||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(hwep->lock, flags);
|
||||
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
||||
spin_unlock_irqrestore(hwep->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
retval = _ep_queue(ep, req, gfp_flags);
|
||||
spin_unlock_irqrestore(hwep->lock, flags);
|
||||
return retval;
|
||||
|
@ -1418,8 +1426,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
|
|||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(hwep->lock, flags);
|
||||
|
||||
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
|
||||
if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
|
||||
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
|
||||
|
||||
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
|
||||
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
|
||||
|
@ -1490,6 +1498,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
|
|||
}
|
||||
|
||||
spin_lock_irqsave(hwep->lock, flags);
|
||||
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
||||
spin_unlock_irqrestore(hwep->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
|
||||
|
||||
|
@ -1558,6 +1570,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
|
|||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&ci->lock, flags);
|
||||
if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
|
||||
spin_unlock_irqrestore(&ci->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if (!ci->remote_wakeup) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
|
|
|
@ -597,10 +597,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
|
|||
{
|
||||
struct wdm_device *desc = file->private_data;
|
||||
|
||||
wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
|
||||
wait_event(desc->wait,
|
||||
/*
|
||||
* needs both flags. We cannot do with one
|
||||
* because resetting it would cause a race
|
||||
* with write() yet we need to signal
|
||||
* a disconnect
|
||||
*/
|
||||
!test_bit(WDM_IN_USE, &desc->flags) ||
|
||||
test_bit(WDM_DISCONNECTING, &desc->flags));
|
||||
|
||||
/* cannot dereference desc->intf if WDM_DISCONNECTING */
|
||||
if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
|
||||
if (test_bit(WDM_DISCONNECTING, &desc->flags))
|
||||
return -ENODEV;
|
||||
if (desc->werr < 0)
|
||||
dev_err(&desc->intf->dev, "Error in flush path: %d\n",
|
||||
desc->werr);
|
||||
|
||||
|
@ -968,8 +978,6 @@ static void wdm_disconnect(struct usb_interface *intf)
|
|||
spin_lock_irqsave(&desc->iuspin, flags);
|
||||
set_bit(WDM_DISCONNECTING, &desc->flags);
|
||||
set_bit(WDM_READ, &desc->flags);
|
||||
/* to terminate pending flushes */
|
||||
clear_bit(WDM_IN_USE, &desc->flags);
|
||||
spin_unlock_irqrestore(&desc->iuspin, flags);
|
||||
wake_up_all(&desc->wait);
|
||||
mutex_lock(&desc->rlock);
|
||||
|
|
|
@ -2197,6 +2197,7 @@ void composite_disconnect(struct usb_gadget *gadget)
|
|||
* disconnect callbacks?
|
||||
*/
|
||||
spin_lock_irqsave(&cdev->lock, flags);
|
||||
cdev->suspended = 0;
|
||||
if (cdev->config) {
|
||||
if (gadget->is_chipidea && !cdev->suspended) {
|
||||
spin_unlock_irqrestore(&cdev->lock, flags);
|
||||
|
|
|
@ -1653,6 +1653,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
/* see what we found out */
|
||||
temp = check_reset_complete(fotg210, wIndex, status_reg,
|
||||
fotg210_readl(fotg210, status_reg));
|
||||
|
||||
/* restart schedule */
|
||||
fotg210->command |= CMD_RUN;
|
||||
fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
|
||||
}
|
||||
|
||||
if (!(temp & (PORT_RESUME|PORT_RESET))) {
|
||||
|
|
|
@ -417,8 +417,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
|
|||
* other cases where the next software may expect clean state from the
|
||||
* "firmware". this is bus-neutral, unlike shutdown() methods.
|
||||
*/
|
||||
static void
|
||||
ohci_shutdown (struct usb_hcd *hcd)
|
||||
static void _ohci_shutdown(struct usb_hcd *hcd)
|
||||
{
|
||||
struct ohci_hcd *ohci;
|
||||
|
||||
|
@ -434,6 +433,16 @@ ohci_shutdown (struct usb_hcd *hcd)
|
|||
ohci->rh_state = OHCI_RH_HALTED;
|
||||
}
|
||||
|
||||
static void ohci_shutdown(struct usb_hcd *hcd)
|
||||
{
|
||||
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
_ohci_shutdown(hcd);
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*
|
||||
* HC functions
|
||||
*-------------------------------------------------------------------------*/
|
||||
|
@ -752,7 +761,7 @@ static void io_watchdog_func(unsigned long _ohci)
|
|||
died:
|
||||
usb_hc_died(ohci_to_hcd(ohci));
|
||||
ohci_dump(ohci);
|
||||
ohci_shutdown(ohci_to_hcd(ohci));
|
||||
_ohci_shutdown(ohci_to_hcd(ohci));
|
||||
goto done;
|
||||
} else {
|
||||
/* No write back because the done queue was empty */
|
||||
|
|
|
@ -84,7 +84,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
|
|||
return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
|
||||
of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
|
||||
of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
|
||||
of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
|
||||
of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
|
||||
}
|
||||
|
||||
static int xhci_rcar_is_gen3(struct device *dev)
|
||||
|
|
|
@ -51,7 +51,7 @@ MODULE_VERSION("1.03");
|
|||
|
||||
static int auto_delink_en = 1;
|
||||
module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
|
||||
MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
|
||||
|
||||
#ifdef CONFIG_REALTEK_AUTOPM
|
||||
static int ss_en = 1;
|
||||
|
@ -1010,12 +1010,15 @@ static int init_realtek_cr(struct us_data *us)
|
|||
goto INIT_FAIL;
|
||||
}
|
||||
|
||||
if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
|
||||
CHECK_FW_VER(chip, 0x5901))
|
||||
SET_AUTO_DELINK(chip);
|
||||
if (STATUS_LEN(chip) == 16) {
|
||||
if (SUPPORT_AUTO_DELINK(chip))
|
||||
if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
|
||||
CHECK_PID(chip, 0x0159)) {
|
||||
if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
|
||||
CHECK_FW_VER(chip, 0x5901))
|
||||
SET_AUTO_DELINK(chip);
|
||||
if (STATUS_LEN(chip) == 16) {
|
||||
if (SUPPORT_AUTO_DELINK(chip))
|
||||
SET_AUTO_DELINK(chip);
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_REALTEK_AUTOPM
|
||||
if (ss_en)
|
||||
|
|
|
@ -2119,7 +2119,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
|
|||
US_FL_IGNORE_RESIDUE ),
|
||||
|
||||
/* Reported by Michael Büsch <m@bues.ch> */
|
||||
UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
|
||||
UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
|
||||
"JMicron",
|
||||
"USB to ATA/ATAPI Bridge",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
|
|
|
@ -240,6 +240,7 @@ module_param(nowayout, bool, 0);
|
|||
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
|
||||
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
|
||||
|
||||
MODULE_ALIAS("platform:bcm2835-wdt");
|
||||
MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
|
||||
MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
90
fs/namei.c
90
fs/namei.c
|
@ -42,6 +42,9 @@
|
|||
#include "internal.h"
|
||||
#include "mount.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/namei.h>
|
||||
|
||||
/* [Feb-1997 T. Schoebel-Theuer]
|
||||
* Fundamental changes in the pathname lookup mechanisms (namei)
|
||||
* were necessary because of omirr. The reason is that omirr needs
|
||||
|
@ -789,6 +792,81 @@ static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
return dentry->d_op->d_revalidate(dentry, flags);
|
||||
}
|
||||
|
||||
#define INIT_PATH_SIZE 64
|
||||
|
||||
static void success_walk_trace(struct nameidata *nd)
|
||||
{
|
||||
struct path *pt = &nd->path;
|
||||
struct inode *i = nd->inode;
|
||||
char buf[INIT_PATH_SIZE], *try_buf;
|
||||
int cur_path_size;
|
||||
char *p;
|
||||
|
||||
/* When eBPF/ tracepoint is disabled, keep overhead low. */
|
||||
if (!trace_inodepath_enabled())
|
||||
return;
|
||||
|
||||
/* First try stack allocated buffer. */
|
||||
try_buf = buf;
|
||||
cur_path_size = INIT_PATH_SIZE;
|
||||
|
||||
while (cur_path_size <= PATH_MAX) {
|
||||
/* Free previous heap allocation if we are now trying
|
||||
* a second or later heap allocation.
|
||||
*/
|
||||
if (try_buf != buf)
|
||||
kfree(try_buf);
|
||||
|
||||
/* All but the first alloc are on the heap. */
|
||||
if (cur_path_size != INIT_PATH_SIZE) {
|
||||
try_buf = kmalloc(cur_path_size, GFP_KERNEL);
|
||||
if (!try_buf) {
|
||||
try_buf = buf;
|
||||
sprintf(try_buf, "error:buf_alloc_failed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
p = d_path(pt, try_buf, cur_path_size);
|
||||
|
||||
if (!IS_ERR(p)) {
|
||||
char *end = mangle_path(try_buf, p, "\n");
|
||||
|
||||
if (end) {
|
||||
try_buf[end - try_buf] = 0;
|
||||
break;
|
||||
} else {
|
||||
/* On mangle errors, double path size
|
||||
* till PATH_MAX.
|
||||
*/
|
||||
cur_path_size = cur_path_size << 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (PTR_ERR(p) == -ENAMETOOLONG) {
|
||||
/* If d_path complains that name is too long,
|
||||
* then double path size till PATH_MAX.
|
||||
*/
|
||||
cur_path_size = cur_path_size << 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
sprintf(try_buf, "error:d_path_failed_%lu",
|
||||
-1 * PTR_ERR(p));
|
||||
break;
|
||||
}
|
||||
|
||||
if (cur_path_size > PATH_MAX)
|
||||
sprintf(try_buf, "error:d_path_name_too_long");
|
||||
|
||||
trace_inodepath(i, try_buf);
|
||||
|
||||
if (try_buf != buf)
|
||||
kfree(try_buf);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* complete_walk - successful completion of path walk
|
||||
* @nd: pointer nameidata
|
||||
|
@ -811,15 +889,21 @@ static int complete_walk(struct nameidata *nd)
|
|||
return -ECHILD;
|
||||
}
|
||||
|
||||
if (likely(!(nd->flags & LOOKUP_JUMPED)))
|
||||
if (likely(!(nd->flags & LOOKUP_JUMPED))) {
|
||||
success_walk_trace(nd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
|
||||
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) {
|
||||
success_walk_trace(nd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
|
||||
if (status > 0)
|
||||
if (status > 0) {
|
||||
success_walk_trace(nd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!status)
|
||||
status = -ESTALE;
|
||||
|
|
|
@ -434,7 +434,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
|
|||
|
||||
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
|
||||
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
||||
extern void nfs4_purge_state_owners(struct nfs_server *);
|
||||
extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
|
||||
extern void nfs4_free_state_owners(struct list_head *head);
|
||||
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
||||
extern void nfs4_put_open_state(struct nfs4_state *);
|
||||
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
|
||||
|
|
|
@ -781,9 +781,12 @@ found:
|
|||
|
||||
static void nfs4_destroy_server(struct nfs_server *server)
|
||||
{
|
||||
LIST_HEAD(freeme);
|
||||
|
||||
nfs_server_return_all_delegations(server);
|
||||
unset_pnfs_layoutdriver(server);
|
||||
nfs4_purge_state_owners(server);
|
||||
nfs4_purge_state_owners(server, &freeme);
|
||||
nfs4_free_state_owners(&freeme);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -611,24 +611,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
|
|||
/**
|
||||
* nfs4_purge_state_owners - Release all cached state owners
|
||||
* @server: nfs_server with cached state owners to release
|
||||
* @head: resulting list of state owners
|
||||
*
|
||||
* Called at umount time. Remaining state owners will be on
|
||||
* the LRU with ref count of zero.
|
||||
* Note that the state owners are not freed, but are added
|
||||
* to the list @head, which can later be used as an argument
|
||||
* to nfs4_free_state_owners.
|
||||
*/
|
||||
void nfs4_purge_state_owners(struct nfs_server *server)
|
||||
void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
struct nfs4_state_owner *sp, *tmp;
|
||||
LIST_HEAD(doomed);
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
|
||||
list_move(&sp->so_lru, &doomed);
|
||||
list_move(&sp->so_lru, head);
|
||||
nfs4_remove_state_owner_locked(sp);
|
||||
}
|
||||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
|
||||
/**
|
||||
* nfs4_purge_state_owners - Release all cached state owners
|
||||
* @head: resulting list of state owners
|
||||
*
|
||||
* Frees a list of state owners that was generated by
|
||||
* nfs4_purge_state_owners
|
||||
*/
|
||||
void nfs4_free_state_owners(struct list_head *head)
|
||||
{
|
||||
struct nfs4_state_owner *sp, *tmp;
|
||||
|
||||
list_for_each_entry_safe(sp, tmp, head, so_lru) {
|
||||
list_del(&sp->so_lru);
|
||||
nfs4_free_state_owner(sp);
|
||||
}
|
||||
|
@ -1764,12 +1779,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
|
|||
struct nfs4_state_owner *sp;
|
||||
struct nfs_server *server;
|
||||
struct rb_node *pos;
|
||||
LIST_HEAD(freeme);
|
||||
int status = 0;
|
||||
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
|
||||
nfs4_purge_state_owners(server);
|
||||
nfs4_purge_state_owners(server, &freeme);
|
||||
spin_lock(&clp->cl_lock);
|
||||
for (pos = rb_first(&server->state_owners);
|
||||
pos != NULL;
|
||||
|
@ -1798,6 +1814,7 @@ restart:
|
|||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
nfs4_free_state_owners(&freeme);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -464,6 +464,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
|||
/* len == 0 means wake all */
|
||||
struct userfaultfd_wake_range range = { .len = 0, };
|
||||
unsigned long new_flags;
|
||||
bool still_valid;
|
||||
|
||||
ACCESS_ONCE(ctx->released) = true;
|
||||
|
||||
|
@ -479,8 +480,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
|||
* taking the mmap_sem for writing.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
if (!mmget_still_valid(mm))
|
||||
goto skip_mm;
|
||||
still_valid = mmget_still_valid(mm);
|
||||
prev = NULL;
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
cond_resched();
|
||||
|
@ -491,22 +491,23 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
|||
continue;
|
||||
}
|
||||
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
|
||||
prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
|
||||
new_flags, vma->anon_vma,
|
||||
vma->vm_file, vma->vm_pgoff,
|
||||
vma_policy(vma),
|
||||
NULL_VM_UFFD_CTX,
|
||||
vma_get_anon_name(vma));
|
||||
if (prev)
|
||||
vma = prev;
|
||||
else
|
||||
prev = vma;
|
||||
if (still_valid) {
|
||||
prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
|
||||
new_flags, vma->anon_vma,
|
||||
vma->vm_file, vma->vm_pgoff,
|
||||
vma_policy(vma),
|
||||
NULL_VM_UFFD_CTX,
|
||||
vma_get_anon_name(vma));
|
||||
if (prev)
|
||||
vma = prev;
|
||||
else
|
||||
prev = vma;
|
||||
}
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
vm_write_end(vma);
|
||||
}
|
||||
skip_mm:
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
wakeup:
|
||||
|
|
|
@ -774,6 +774,7 @@ xfs_setattr_nonsize(
|
|||
|
||||
out_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
out_dqrele:
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
|
|
@ -1628,6 +1628,10 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
|
|||
{
|
||||
struct sk_buff *skb = tcp_send_head(sk);
|
||||
|
||||
/* empty retransmit queue, for example due to zero window */
|
||||
if (skb == tcp_write_queue_head(sk))
|
||||
return NULL;
|
||||
|
||||
return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM namei
|
||||
|
||||
#if !defined(_TRACE_INODEPATH_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_INODEPATH_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kdev_t.h>
|
||||
|
||||
TRACE_EVENT(inodepath,
|
||||
TP_PROTO(struct inode *inode, char *path),
|
||||
|
||||
TP_ARGS(inode, path),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
/* dev_t and ino_t are arch dependent bit width
|
||||
* so just use 64-bit
|
||||
*/
|
||||
__field(unsigned long, ino)
|
||||
__field(unsigned long, dev)
|
||||
__string(path, path)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__assign_str(path, path);
|
||||
),
|
||||
|
||||
TP_printk("dev %d:%d ino=%lu path=%s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino, __get_str(path))
|
||||
);
|
||||
#endif /* _TRACE_INODEPATH_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -1188,6 +1188,7 @@ config FAIR_GROUP_SCHED
|
|||
config CFS_BANDWIDTH
|
||||
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
|
||||
depends on FAIR_GROUP_SCHED
|
||||
depends on !SCHED_WALT
|
||||
default n
|
||||
help
|
||||
This option allows users to define CPU bandwidth rates (limits) for
|
||||
|
|
|
@ -267,6 +267,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void irq_sysfs_del(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* If irq_sysfs_init() has not yet been invoked (early boot), then
|
||||
* irq_kobj_base is NULL and the descriptor was never added.
|
||||
* kobject_del() complains about a object with no parent, so make
|
||||
* it conditional.
|
||||
*/
|
||||
if (irq_kobj_base)
|
||||
kobject_del(&desc->kobj);
|
||||
}
|
||||
|
||||
static int __init irq_sysfs_init(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
@ -297,6 +309,7 @@ static struct kobj_type irq_kobj_type = {
|
|||
};
|
||||
|
||||
static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
|
||||
static void irq_sysfs_del(struct irq_desc *desc) {}
|
||||
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
|
@ -406,7 +419,7 @@ static void free_desc(unsigned int irq)
|
|||
* The sysfs entry must be serialized against a concurrent
|
||||
* irq_sysfs_init() as well.
|
||||
*/
|
||||
kobject_del(&desc->kobj);
|
||||
irq_sysfs_del(desc);
|
||||
delete_irq_desc(irq);
|
||||
|
||||
/*
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/userfaultfd_k.h>
|
||||
#include <linux/page_idle.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/page_owner.h>
|
||||
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
@ -1951,6 +1952,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
|||
}
|
||||
|
||||
ClearPageCompound(head);
|
||||
|
||||
split_page_owner(head, HPAGE_PMD_ORDER);
|
||||
|
||||
/* See comment in __split_huge_page_tail() */
|
||||
if (PageAnon(head)) {
|
||||
page_ref_inc(head);
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include <linux/zpool.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#define ZSPAGE_MAGIC 0x58
|
||||
|
@ -266,6 +267,10 @@ struct zs_pool {
|
|||
#ifdef CONFIG_COMPACTION
|
||||
struct inode *inode;
|
||||
struct work_struct free_work;
|
||||
/* A wait queue for when migration races with async_free_zspage() */
|
||||
wait_queue_head_t migration_wait;
|
||||
atomic_long_t isolated_pages;
|
||||
bool destroying;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -1959,6 +1964,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
|
|||
zspage->isolated--;
|
||||
}
|
||||
|
||||
static void putback_zspage_deferred(struct zs_pool *pool,
|
||||
struct size_class *class,
|
||||
struct zspage *zspage)
|
||||
{
|
||||
enum fullness_group fg;
|
||||
|
||||
fg = putback_zspage(class, zspage);
|
||||
if (fg == ZS_EMPTY)
|
||||
schedule_work(&pool->free_work);
|
||||
|
||||
}
|
||||
|
||||
static inline void zs_pool_dec_isolated(struct zs_pool *pool)
|
||||
{
|
||||
VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
|
||||
atomic_long_dec(&pool->isolated_pages);
|
||||
/*
|
||||
* There's no possibility of racing, since wait_for_isolated_drain()
|
||||
* checks the isolated count under &class->lock after enqueuing
|
||||
* on migration_wait.
|
||||
*/
|
||||
if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
|
||||
wake_up_all(&pool->migration_wait);
|
||||
}
|
||||
|
||||
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
|
||||
struct page *newpage, struct page *oldpage)
|
||||
{
|
||||
|
@ -2028,6 +2058,7 @@ bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
|||
*/
|
||||
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
|
||||
get_zspage_mapping(zspage, &class_idx, &fullness);
|
||||
atomic_long_inc(&pool->isolated_pages);
|
||||
remove_zspage(class, zspage, fullness);
|
||||
}
|
||||
|
||||
|
@ -2116,8 +2147,16 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
|
|||
* Page migration is done so let's putback isolated zspage to
|
||||
* the list if @page is final isolated subpage in the zspage.
|
||||
*/
|
||||
if (!is_zspage_isolated(zspage))
|
||||
putback_zspage(class, zspage);
|
||||
if (!is_zspage_isolated(zspage)) {
|
||||
/*
|
||||
* We cannot race with zs_destroy_pool() here because we wait
|
||||
* for isolation to hit zero before we start destroying.
|
||||
* Also, we ensure that everyone can see pool->destroying before
|
||||
* we start waiting.
|
||||
*/
|
||||
putback_zspage_deferred(pool, class, zspage);
|
||||
zs_pool_dec_isolated(pool);
|
||||
}
|
||||
|
||||
reset_page(page);
|
||||
put_page(page);
|
||||
|
@ -2164,13 +2203,12 @@ void zs_page_putback(struct page *page)
|
|||
spin_lock(&class->lock);
|
||||
dec_zspage_isolation(zspage);
|
||||
if (!is_zspage_isolated(zspage)) {
|
||||
fg = putback_zspage(class, zspage);
|
||||
/*
|
||||
* Due to page_lock, we cannot free zspage immediately
|
||||
* so let's defer.
|
||||
*/
|
||||
if (fg == ZS_EMPTY)
|
||||
schedule_work(&pool->free_work);
|
||||
putback_zspage_deferred(pool, class, zspage);
|
||||
zs_pool_dec_isolated(pool);
|
||||
}
|
||||
spin_unlock(&class->lock);
|
||||
}
|
||||
|
@ -2194,8 +2232,36 @@ static int zs_register_migration(struct zs_pool *pool)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool pool_isolated_are_drained(struct zs_pool *pool)
|
||||
{
|
||||
return atomic_long_read(&pool->isolated_pages) == 0;
|
||||
}
|
||||
|
||||
/* Function for resolving migration */
|
||||
static void wait_for_isolated_drain(struct zs_pool *pool)
|
||||
{
|
||||
|
||||
/*
|
||||
* We're in the process of destroying the pool, so there are no
|
||||
* active allocations. zs_page_isolate() fails for completely free
|
||||
* zspages, so we need only wait for the zs_pool's isolated
|
||||
* count to hit zero.
|
||||
*/
|
||||
wait_event(pool->migration_wait,
|
||||
pool_isolated_are_drained(pool));
|
||||
}
|
||||
|
||||
static void zs_unregister_migration(struct zs_pool *pool)
|
||||
{
|
||||
pool->destroying = true;
|
||||
/*
|
||||
* We need a memory barrier here to ensure global visibility of
|
||||
* pool->destroying. Thus pool->isolated pages will either be 0 in which
|
||||
* case we don't care, or it will be > 0 and pool->destroying will
|
||||
* ensure that we wake up once isolation hits 0.
|
||||
*/
|
||||
smp_mb();
|
||||
wait_for_isolated_drain(pool); /* This can block */
|
||||
flush_work(&pool->free_work);
|
||||
iput(pool->inode);
|
||||
}
|
||||
|
@ -2442,6 +2508,10 @@ struct zs_pool *zs_create_pool(const char *name)
|
|||
if (!pool->name)
|
||||
goto err;
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
init_waitqueue_head(&pool->migration_wait);
|
||||
#endif
|
||||
|
||||
if (create_cache(pool))
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -2288,8 +2288,10 @@ static int compat_do_replace(struct net *net, void __user *user,
|
|||
state.buf_kern_len = size64;
|
||||
|
||||
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
|
||||
if (WARN_ON(ret < 0))
|
||||
if (WARN_ON(ret < 0)) {
|
||||
vfree(entries_tmp);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
vfree(entries_tmp);
|
||||
tmp.entries_size = size64;
|
||||
|
|
|
@ -118,7 +118,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|||
int err = 0;
|
||||
long vm_wait = 0;
|
||||
long current_timeo = *timeo_p;
|
||||
bool noblock = (*timeo_p ? false : true);
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (sk_stream_memory_free(sk))
|
||||
|
@ -131,11 +130,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|||
|
||||
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
goto do_error;
|
||||
if (!*timeo_p) {
|
||||
if (noblock)
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
goto do_nonblock;
|
||||
}
|
||||
if (!*timeo_p)
|
||||
goto do_eagain;
|
||||
if (signal_pending(current))
|
||||
goto do_interrupted;
|
||||
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
||||
|
@ -167,7 +163,13 @@ out:
|
|||
do_error:
|
||||
err = -EPIPE;
|
||||
goto out;
|
||||
do_nonblock:
|
||||
do_eagain:
|
||||
/* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
|
||||
* be generated later.
|
||||
* When TCP receives ACK packets that make room, tcp_check_space()
|
||||
* only calls tcp_new_space() if SOCK_NOSPACE is set.
|
||||
*/
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
do_interrupted:
|
||||
|
|
|
@ -1418,6 +1418,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
|
|||
if (is_multicast_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
||||
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
|
||||
sdata->vif.type == NL80211_IFTYPE_STATION &&
|
||||
!sdata->u.mgd.associated)
|
||||
return -EINVAL;
|
||||
|
||||
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
|
||||
if (!sta)
|
||||
return -ENOMEM;
|
||||
|
@ -1425,10 +1430,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
|
|||
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
|
||||
sta->sta.tdls = true;
|
||||
|
||||
if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
|
||||
!sdata->u.mgd.associated)
|
||||
return -EINVAL;
|
||||
|
||||
err = sta_apply_parameters(local, sta, params);
|
||||
if (err) {
|
||||
sta_info_free(local, sta);
|
||||
|
|
|
@ -2172,7 +2172,7 @@ static void reg_process_pending_hints(void)
|
|||
|
||||
/* When last_request->processed becomes true this will be rescheduled */
|
||||
if (lr && !lr->processed) {
|
||||
reg_process_hint(lr);
|
||||
pr_debug("Pending regulatory request, waiting for it to be processed...\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1822,8 +1822,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
|
|||
if (cptr->type == USER_CLIENT) {
|
||||
info->input_pool = cptr->data.user.fifo_pool_size;
|
||||
info->input_free = info->input_pool;
|
||||
if (cptr->data.user.fifo)
|
||||
info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
|
||||
info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
|
||||
} else {
|
||||
info->input_pool = 0;
|
||||
info->input_free = 0;
|
||||
|
|
|
@ -278,3 +278,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get the number of unused cells safely */
|
||||
int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cells;
|
||||
|
||||
if (!f)
|
||||
return 0;
|
||||
|
||||
snd_use_lock_use(&f->use_lock);
|
||||
spin_lock_irqsave(&f->lock, flags);
|
||||
cells = snd_seq_unused_cells(f->pool);
|
||||
spin_unlock_irqrestore(&f->lock, flags);
|
||||
snd_use_lock_free(&f->use_lock);
|
||||
return cells;
|
||||
}
|
||||
|
|
|
@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
|
|||
/* resize pool in fifo */
|
||||
int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
|
||||
|
||||
/* get the number of unused cells safely */
|
||||
int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1158,6 +1158,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
|
||||
struct snd_pcm_hw_rule *rule)
|
||||
{
|
||||
struct davinci_mcasp_ruledata *rd = rule->private;
|
||||
struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
|
||||
struct snd_mask nfmt;
|
||||
int i, slot_width;
|
||||
|
||||
snd_mask_none(&nfmt);
|
||||
slot_width = rd->mcasp->slot_width;
|
||||
|
||||
for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
|
||||
if (snd_mask_test(fmt, i)) {
|
||||
if (snd_pcm_format_width(i) <= slot_width) {
|
||||
snd_mask_set(&nfmt, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return snd_mask_refine(fmt, &nfmt);
|
||||
}
|
||||
|
||||
static const unsigned int davinci_mcasp_dai_rates[] = {
|
||||
8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
|
||||
88200, 96000, 176400, 192000,
|
||||
|
@ -1251,7 +1273,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
|
|||
struct davinci_mcasp_ruledata *ruledata =
|
||||
&mcasp->ruledata[substream->stream];
|
||||
u32 max_channels = 0;
|
||||
int i, dir;
|
||||
int i, dir, ret;
|
||||
int tdm_slots = mcasp->tdm_slots;
|
||||
|
||||
/* Do not allow more then one stream per direction */
|
||||
|
@ -1280,6 +1302,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
|
|||
max_channels++;
|
||||
}
|
||||
ruledata->serializers = max_channels;
|
||||
ruledata->mcasp = mcasp;
|
||||
max_channels *= tdm_slots;
|
||||
/*
|
||||
* If the already active stream has less channels than the calculated
|
||||
|
@ -1305,20 +1328,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
|
|||
0, SNDRV_PCM_HW_PARAM_CHANNELS,
|
||||
&mcasp->chconstr[substream->stream]);
|
||||
|
||||
if (mcasp->slot_width)
|
||||
snd_pcm_hw_constraint_minmax(substream->runtime,
|
||||
SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
|
||||
8, mcasp->slot_width);
|
||||
if (mcasp->slot_width) {
|
||||
/* Only allow formats require <= slot_width bits on the bus */
|
||||
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_FORMAT,
|
||||
davinci_mcasp_hw_rule_slot_width,
|
||||
ruledata,
|
||||
SNDRV_PCM_HW_PARAM_FORMAT, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we rely on implicit BCLK divider setting we should
|
||||
* set constraints based on what we can provide.
|
||||
*/
|
||||
if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
|
||||
int ret;
|
||||
|
||||
ruledata->mcasp = mcasp;
|
||||
|
||||
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_RATE,
|
||||
davinci_mcasp_hw_rule_rate,
|
||||
|
|
|
@ -1106,8 +1106,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
|
|||
list_add_tail(&widget->work_list, list);
|
||||
|
||||
if (custom_stop_condition && custom_stop_condition(widget, dir)) {
|
||||
widget->endpoints[dir] = 1;
|
||||
return widget->endpoints[dir];
|
||||
list = NULL;
|
||||
custom_stop_condition = NULL;
|
||||
}
|
||||
|
||||
if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
|
||||
|
@ -1144,8 +1144,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
|
|||
*
|
||||
* Optionally, can be supplied with a function acting as a stopping condition.
|
||||
* This function takes the dapm widget currently being examined and the walk
|
||||
* direction as an arguments, it should return true if the walk should be
|
||||
* stopped and false otherwise.
|
||||
* direction as an arguments, it should return true if widgets from that point
|
||||
* in the graph onwards should not be added to the widget list.
|
||||
*/
|
||||
static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
|
||||
struct list_head *list,
|
||||
|
|
|
@ -552,6 +552,15 @@ int line6_init_pcm(struct usb_line6 *line6,
|
|||
line6pcm->volume_monitor = 255;
|
||||
line6pcm->line6 = line6;
|
||||
|
||||
spin_lock_init(&line6pcm->out.lock);
|
||||
spin_lock_init(&line6pcm->in.lock);
|
||||
line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
|
||||
|
||||
line6->line6pcm = line6pcm;
|
||||
|
||||
pcm->private_data = line6pcm;
|
||||
pcm->private_free = line6_cleanup_pcm;
|
||||
|
||||
line6pcm->max_packet_size_in =
|
||||
usb_maxpacket(line6->usbdev,
|
||||
usb_rcvisocpipe(line6->usbdev, ep_read), 0);
|
||||
|
@ -564,15 +573,6 @@ int line6_init_pcm(struct usb_line6 *line6,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_init(&line6pcm->out.lock);
|
||||
spin_lock_init(&line6pcm->in.lock);
|
||||
line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
|
||||
|
||||
line6->line6pcm = line6pcm;
|
||||
|
||||
pcm->private_data = line6pcm;
|
||||
pcm->private_free = line6_cleanup_pcm;
|
||||
|
||||
err = line6_create_audio_out_urbs(line6pcm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
|
|
@ -83,6 +83,7 @@ struct mixer_build {
|
|||
unsigned char *buffer;
|
||||
unsigned int buflen;
|
||||
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
|
||||
DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
|
||||
struct usb_audio_term oterm;
|
||||
const struct usbmix_name_map *map;
|
||||
const struct usbmix_selector_map *selector_map;
|
||||
|
@ -722,15 +723,24 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
|
|||
* parse the source unit recursively until it reaches to a terminal
|
||||
* or a branched unit.
|
||||
*/
|
||||
static int check_input_term(struct mixer_build *state, int id,
|
||||
static int __check_input_term(struct mixer_build *state, int id,
|
||||
struct usb_audio_term *term)
|
||||
{
|
||||
int err;
|
||||
void *p1;
|
||||
unsigned char *hdr;
|
||||
|
||||
memset(term, 0, sizeof(*term));
|
||||
while ((p1 = find_audio_control_unit(state, id)) != NULL) {
|
||||
unsigned char *hdr = p1;
|
||||
for (;;) {
|
||||
/* a loop in the terminal chain? */
|
||||
if (test_and_set_bit(id, state->termbitmap))
|
||||
return -EINVAL;
|
||||
|
||||
p1 = find_audio_control_unit(state, id);
|
||||
if (!p1)
|
||||
break;
|
||||
|
||||
hdr = p1;
|
||||
term->id = id;
|
||||
switch (hdr[2]) {
|
||||
case UAC_INPUT_TERMINAL:
|
||||
|
@ -745,7 +755,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
|
||||
/* call recursively to verify that the
|
||||
* referenced clock entity is valid */
|
||||
err = check_input_term(state, d->bCSourceID, term);
|
||||
err = __check_input_term(state, d->bCSourceID, term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -760,7 +770,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
} else { /* UAC_VERSION_3 */
|
||||
struct uac3_input_terminal_descriptor *d = p1;
|
||||
|
||||
err = check_input_term(state,
|
||||
err = __check_input_term(state,
|
||||
d->bCSourceID, term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
@ -791,7 +801,17 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
term->name = uac_mixer_unit_iMixer(d);
|
||||
return 0;
|
||||
}
|
||||
case UAC_SELECTOR_UNIT:
|
||||
case UAC_SELECTOR_UNIT: {
|
||||
struct uac_selector_unit_descriptor *d = p1;
|
||||
/* call recursively to retrieve the channel info */
|
||||
err = __check_input_term(state, d->baSourceID[0], term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
term->type = d->bDescriptorSubtype << 16; /* virtual type */
|
||||
term->id = id;
|
||||
term->name = uac_selector_unit_iSelector(d);
|
||||
return 0;
|
||||
}
|
||||
/* UAC3_MIXER_UNIT_V3 */
|
||||
case UAC2_CLOCK_SELECTOR:
|
||||
/* UAC3_CLOCK_SOURCE */ {
|
||||
|
@ -818,7 +838,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
} else {
|
||||
struct uac_selector_unit_descriptor *d = p1;
|
||||
/* call recursively to retrieve channel info */
|
||||
err = check_input_term(state,
|
||||
err = __check_input_term(state,
|
||||
d->baSourceID[0], term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
@ -882,6 +902,15 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
||||
static int check_input_term(struct mixer_build *state, int id,
|
||||
struct usb_audio_term *term)
|
||||
{
|
||||
memset(term, 0, sizeof(*term));
|
||||
memset(state->termbitmap, 0, sizeof(state->termbitmap));
|
||||
return __check_input_term(state, id, term);
|
||||
}
|
||||
|
||||
/*
|
||||
* Feature Unit
|
||||
*/
|
||||
|
|
|
@ -1379,6 +1379,8 @@ int main(int argc, char *argv[])
|
|||
daemonize = 0;
|
||||
break;
|
||||
case 'h':
|
||||
print_usage(argv);
|
||||
exit(0);
|
||||
default:
|
||||
print_usage(argv);
|
||||
exit(EXIT_FAILURE);
|
||||
|
|
|
@ -164,6 +164,8 @@ int main(int argc, char *argv[])
|
|||
daemonize = 0;
|
||||
break;
|
||||
case 'h':
|
||||
print_usage(argv);
|
||||
exit(0);
|
||||
default:
|
||||
print_usage(argv);
|
||||
exit(EXIT_FAILURE);
|
||||
|
|
|
@ -373,8 +373,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
|
|||
|
||||
/* Allocate and initialize all memory on CPU#0: */
|
||||
if (init_cpu0) {
|
||||
orig_mask = bind_to_node(0);
|
||||
bind_to_memnode(0);
|
||||
int node = numa_node_of_cpu(0);
|
||||
|
||||
orig_mask = bind_to_node(node);
|
||||
bind_to_memnode(node);
|
||||
}
|
||||
|
||||
bytes = bytes0 + HPSIZE;
|
||||
|
|
|
@ -315,6 +315,7 @@ static struct fixed {
|
|||
{ "inst_retired.any_p", "event=0xc0" },
|
||||
{ "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
|
||||
{ "cpu_clk_unhalted.thread", "event=0x3c" },
|
||||
{ "cpu_clk_unhalted.core", "event=0x3c" },
|
||||
{ "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
|
||||
{ NULL, NULL},
|
||||
};
|
||||
|
|
|
@ -12,32 +12,6 @@
|
|||
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
|
||||
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
|
||||
|
||||
#if defined(__s390x__)
|
||||
/* Return true if kvm module is available and loaded. Test this
|
||||
* and retun success when trace point kvm_s390_create_vm
|
||||
* exists. Otherwise this test always fails.
|
||||
*/
|
||||
static bool kvm_s390_create_vm_valid(void)
|
||||
{
|
||||
char *eventfile;
|
||||
bool rc = false;
|
||||
|
||||
eventfile = get_events_file("kvm-s390");
|
||||
|
||||
if (eventfile) {
|
||||
DIR *mydir = opendir(eventfile);
|
||||
|
||||
if (mydir) {
|
||||
rc = true;
|
||||
closedir(mydir);
|
||||
}
|
||||
put_events_file(eventfile);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__first(evlist);
|
||||
|
@ -1619,7 +1593,6 @@ static struct evlist_test test__events[] = {
|
|||
{
|
||||
.name = "kvm-s390:kvm_s390_create_vm",
|
||||
.check = test__checkevent_tracepoint,
|
||||
.valid = kvm_s390_create_vm_valid,
|
||||
.id = 100,
|
||||
},
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
CONFIG_KVM=y
|
||||
CONFIG_KVM_INTEL=y
|
||||
CONFIG_KVM_AMD=y
|
|
@ -120,6 +120,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
|||
return value;
|
||||
}
|
||||
|
||||
static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
|
||||
{
|
||||
return (vgic_irq_is_sgi(irq->intid) &&
|
||||
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
}
|
||||
|
||||
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
|
@ -130,6 +136,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
|||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
/* GICD_ISPENDR0 SGI bits are WI */
|
||||
if (is_vgic_v2_sgi(vcpu, irq)) {
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&irq->irq_lock);
|
||||
irq->pending = true;
|
||||
if (irq->config == VGIC_CONFIG_LEVEL)
|
||||
|
@ -150,6 +162,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
/* GICD_ICPENDR0 SGI bits are WI */
|
||||
if (is_vgic_v2_sgi(vcpu, irq)) {
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&irq->irq_lock);
|
||||
|
||||
if (irq->config == VGIC_CONFIG_LEVEL) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue