This is the 4.9.132 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlu9oeMACgkQONu9yGCS aT67eBAAzlv0eCFCDx9uk1JJssoDZZDN20KFUGvIkjOs8Xz2oBhRzAF3zp2sfPNR AHq5Tawk3PPPejq37lajcBu2ap9HdEK0LfU8PT4zuK/BDakCvqBmxAdNVI6iIXPu O8+MLeAbBWKj37sMTpbnnXO3KDrpmNV6TB/KXpl4q4ZPEUExkOnxmF5peUPQdgTU J6RVF+kRYyr62WFnPfdzLxmUjTM90GMLduR6Bh83LeBk+V9yWv9ryoPK4wxxnUKv KcCWEhHDNL44R2ZAECVs6m7U/PP1pg8jYBxCDxeR78RZRdM5whbBRRjrOB+of/4Y Q5WKpLYA4uHRQ7/uvNttNQrrgZOyhi9JcbDNATGP8pfSXD3cbFrj/6x1Fhz3dv3k 0hsU6Z2odhP/CJqm/uU7/S0yyIplSM5F4eLIwk8Qo8Be976//FlWL09pgq4w1hkX kcXLipBJSnRauTI/bckCtru8xMnPvLLYvPo9Y8z2TjwK10nQYUIY9m4OsiFwlSgt 6rDpbDx8OWa36Sla3yZWJhlcaJCLnYJdriHu56veerRUtabLUqJ15AthxVbIWPm1 z7IZRzqAj+Ju82Q11hoMdzIu0XJ4lJzatnjCRRK6F5XLqKrrKo7XO7Af6Q1j6y1T AofQNqBueEGbyR8HE1Z6D/E4fjMmqvPL61+DgsJ+aWsLn4WhFhU= =naum -----END PGP SIGNATURE----- Merge 4.9.132 into android-4.9 Changes in 4.9.132 serial: mvebu-uart: Fix reporting of effective CSIZE to userspace time: Introduce jiffies64_to_nsecs() mac80211: Run TXQ teardown code before de-registering interfaces KVM: PPC: Book3S HV: Don't truncate HPTE index in xlate function mac80211: correct use of IEEE80211_VHT_CAP_RXSTBC_X mac80211_hwsim: correct use of IEEE80211_VHT_CAP_RXSTBC_X gpio: adp5588: Fix sleep-in-atomic-context bug mac80211: mesh: fix HWMP sequence numbering to follow standard net: hns: add netif_carrier_off before change speed and duplex cfg80211: nl80211_update_ft_ies() to validate NL80211_ATTR_IE gpio: Fix crash due to registration race ARC: atomics: unbork atomic_fetch_##op() RAID10 BUG_ON in raise_barrier when force is true and conf->barrier is 0 i2c: uniphier: issue STOP only for last message or I2C_M_STOP i2c: uniphier-f: issue STOP only for last message or I2C_M_STOP net: cadence: Fix a sleep-in-atomic-context bug in macb_halt_tx() fs/cifs: don't translate SFM_SLASH (U+F026) to backslash cfg80211: fix a type issue in ieee80211_chandef_to_operating_class() mac80211: fix a race between restart and CSA flows mac80211: Fix station bandwidth setting after channel switch mac80211: don't Tx a deauth frame if the AP forbade Tx mac80211: shorten the IBSS debug messages tools/vm/slabinfo.c: fix sign-compare warning tools/vm/page-types.c: fix "defined but not used" warning mm: madvise(MADV_DODUMP): allow hugetlbfs pages HID: add support for Apple Magic Keyboards usb: gadget: fotg210-udc: Fix memory leak of fotg210->ep[i] HID: hid-saitek: Add device ID for RAT 7 Contagion perf evsel: Fix potential null pointer dereference in perf_evsel__new_idx() perf probe powerpc: Ignore SyS symbols irrespective of endianness RDMA/ucma: check fd type in ucma_migrate_id() USB: yurex: Check for truncation in yurex_read() nvmet-rdma: fix possible bogus dereference under heavy load net/mlx5: Consider PCI domain in search for next dev drm/nouveau/TBDdevinit: don't fail when PMU/PRE_OS is missing from VBIOS dm raid: fix rebuild of specific devices by updating superblock fs/cifs: suppress a string overflow warning net: ena: fix driver when PAGE_SIZE == 64kB perf/x86/intel: Add support/quirk for the MISPREDICT bit on Knights Landing CPUs dm thin metadata: try to avoid ever aborting transactions arch/hexagon: fix kernel/dma.c build warning hexagon: modify ffs() and fls() to return int arm64: jump_label.h: use asm_volatile_goto macro instead of "asm goto" r8169: Clear RTL_FLAG_TASK_*_PENDING when clearing RTL_FLAG_TASK_ENABLED s390/qeth: use vzalloc for QUERY OAT buffer s390/qeth: don't dump past end of unknown HW header cifs: read overflow in is_valid_oplock_break() xen/manage: don't complain about an empty value in control/sysrq node xen: avoid crash in disable_hotplug_cpu xen: fix GCC warning and remove duplicate EVTCHN_ROW/EVTCHN_COL usage sysfs: Do not return POSIX ACL xattrs via listxattr smb2: fix missing files in root share directory listing ALSA: hda/realtek - Cannot adjust speaker's volume on Dell XPS 27 7760 crypto: qat - Fix KASAN stack-out-of-bounds bug in adf_probe() crypto: mxs-dcp - Fix wait logic on chan threads gpiolib: Free the last requested descriptor proc: restrict kernel stack dumps to root ocfs2: fix locking for res->tracking and dlm->tracking_list dm thin metadata: fix __udivdi3 undefined on 32-bit Linux 4.9.132 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
38f2b4a8c2
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 131
|
||||
SUBLEVEL = 132
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|||
"1: llock %[orig], [%[ctr]] \n" \
|
||||
" " #asm_op " %[val], %[orig], %[i] \n" \
|
||||
" scond %[val], [%[ctr]] \n" \
|
||||
" \n" \
|
||||
" bnz 1b \n" \
|
||||
: [val] "=&r" (val), \
|
||||
[orig] "=&r" (orig) \
|
||||
: [ctr] "r" (&v->counter), \
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
||||
{
|
||||
asm goto("1: nop\n\t"
|
||||
asm_volatile_goto("1: nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align 3\n\t"
|
||||
".quad 1b, %l[l_yes], %c0\n\t"
|
||||
|
@ -42,7 +42,7 @@ l_yes:
|
|||
|
||||
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
||||
{
|
||||
asm goto("1: b %l[l_yes]\n\t"
|
||||
asm_volatile_goto("1: b %l[l_yes]\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align 3\n\t"
|
||||
".quad 1b, %l[l_yes], %c0\n\t"
|
||||
|
|
|
@ -211,7 +211,7 @@ static inline long ffz(int x)
|
|||
* This is defined the same way as ffs.
|
||||
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
||||
*/
|
||||
static inline long fls(int x)
|
||||
static inline int fls(int x)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -232,7 +232,7 @@ static inline long fls(int x)
|
|||
* the libc and compiler builtin ffs routines, therefore
|
||||
* differs in spirit from the above ffz (man ffs).
|
||||
*/
|
||||
static inline long ffs(int x)
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
panic("Can't create %s() memory pool!", __func__);
|
||||
else
|
||||
gen_pool_add(coherent_pool,
|
||||
pfn_to_virt(max_low_pfn),
|
||||
(unsigned long)pfn_to_virt(max_low_pfn),
|
||||
hexagon_coherent_pool_size, -1);
|
||||
}
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
unsigned long pp, key;
|
||||
unsigned long v, gr;
|
||||
__be64 *hptep;
|
||||
int index;
|
||||
long int index;
|
||||
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
|
||||
|
||||
/* Get SLB entry */
|
||||
|
|
|
@ -1195,4 +1195,8 @@ void intel_pmu_lbr_init_knl(void)
|
|||
|
||||
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
|
||||
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
|
||||
|
||||
/* Knights Landing does have MISPREDICT bit */
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
|
||||
x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ struct dcp {
|
|||
struct dcp_coherent_block *coh;
|
||||
|
||||
struct completion completion[DCP_MAX_CHANS];
|
||||
struct mutex mutex[DCP_MAX_CHANS];
|
||||
spinlock_t lock[DCP_MAX_CHANS];
|
||||
struct task_struct *thread[DCP_MAX_CHANS];
|
||||
struct crypto_queue queue[DCP_MAX_CHANS];
|
||||
};
|
||||
|
@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
|
|||
|
||||
int ret;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
|
|||
if (arq) {
|
||||
ret = mxs_dcp_aes_block_crypt(arq);
|
||||
arq->complete(arq, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
|||
rctx->ecb = ecb;
|
||||
actx->chan = DCP_CHAN_CRYPTO;
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
|
||||
|
@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
|
|||
struct ahash_request *req;
|
||||
int ret, fini;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
|
|||
ret = dcp_sha_req_to_buf(arq);
|
||||
fini = rctx->fini;
|
||||
arq->complete(arq, ret);
|
||||
if (!fini)
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
|
|||
rctx->init = 1;
|
||||
}
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
mutex_unlock(&actx->mutex);
|
||||
|
@ -979,7 +986,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, sdcp);
|
||||
|
||||
for (i = 0; i < DCP_MAX_CHANS; i++) {
|
||||
mutex_init(&sdcp->mutex[i]);
|
||||
spin_lock_init(&sdcp->lock[i]);
|
||||
init_completion(&sdcp->completion[i]);
|
||||
crypto_init_queue(&sdcp->queue[i], 50);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C3XXX_PCI_DEVICE_ID:
|
||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C3XXXIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C62X_PCI_DEVICE_ID:
|
||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C62XIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||
|
@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -41,6 +41,8 @@ struct adp5588_gpio {
|
|||
uint8_t int_en[3];
|
||||
uint8_t irq_mask[3];
|
||||
uint8_t irq_stat[3];
|
||||
uint8_t int_input_en[3];
|
||||
uint8_t int_lvl_cached[3];
|
||||
};
|
||||
|
||||
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
|
||||
|
@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
|
|||
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
|
||||
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
|
||||
if (dev->int_input_en[i]) {
|
||||
mutex_lock(&dev->lock);
|
||||
dev->dir[i] &= ~dev->int_input_en[i];
|
||||
dev->int_input_en[i] = 0;
|
||||
adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
|
||||
dev->dir[i]);
|
||||
mutex_unlock(&dev->lock);
|
||||
}
|
||||
|
||||
if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
|
||||
dev->int_lvl_cached[i] = dev->int_lvl[i];
|
||||
adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
|
||||
dev->int_lvl[i]);
|
||||
}
|
||||
|
||||
if (dev->int_en[i] ^ dev->irq_mask[i]) {
|
||||
dev->int_en[i] = dev->irq_mask[i];
|
||||
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
|
||||
dev->int_en[i]);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->irq_lock);
|
||||
}
|
||||
|
@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
|
|||
else
|
||||
return -EINVAL;
|
||||
|
||||
adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
|
||||
adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
|
||||
dev->int_lvl[bank]);
|
||||
dev->int_input_en[bank] |= bit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
|
|||
struct of_phandle_args *gpiospec = data;
|
||||
|
||||
return chip->gpiodev->dev.of_node == gpiospec->np &&
|
||||
chip->of_xlate &&
|
||||
chip->of_xlate(chip, gpiospec, NULL) >= 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -471,7 +471,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
|||
if (ret)
|
||||
goto out_free_descs;
|
||||
lh->descs[i] = desc;
|
||||
count = i;
|
||||
count = i + 1;
|
||||
|
||||
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
|
|
@ -161,7 +161,8 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
|
|||
}
|
||||
|
||||
/* load and execute some other ucode image (bios therm?) */
|
||||
return pmu_load(init, 0x01, post, NULL, NULL);
|
||||
pmu_load(init, 0x01, post, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_devinit_func
|
||||
|
|
|
@ -333,7 +333,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
|||
struct hid_field *field, struct hid_usage *usage,
|
||||
unsigned long **bit, int *max)
|
||||
{
|
||||
if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
|
||||
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
|
||||
usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
|
||||
/* The fn key on Apple USB keyboards */
|
||||
set_bit(EV_REP, hi->input->evbit);
|
||||
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
|
||||
|
@ -476,6 +477,12 @@ static const struct hid_device_id apple_devices[] = {
|
|||
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
|
||||
.driver_data = APPLE_HAS_FN },
|
||||
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
|
||||
.driver_data = APPLE_HAS_FN },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
|
||||
.driver_data = APPLE_HAS_FN },
|
||||
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
|
||||
.driver_data = APPLE_HAS_FN },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
|
||||
.driver_data = APPLE_HAS_FN },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
|
||||
|
|
|
@ -83,6 +83,7 @@
|
|||
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
|
||||
|
||||
#define USB_VENDOR_ID_APPLE 0x05ac
|
||||
#define BT_VENDOR_ID_APPLE 0x004c
|
||||
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
|
||||
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
|
||||
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
|
||||
|
@ -152,6 +153,7 @@
|
|||
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
|
||||
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
|
||||
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
|
||||
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
|
||||
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
|
||||
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
|
||||
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
|
||||
|
@ -888,6 +890,7 @@
|
|||
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
|
||||
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
|
||||
#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
|
||||
#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
|
||||
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
|
||||
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
|
||||
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
|
||||
|
|
|
@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
|
|||
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
|
||||
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
|
||||
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
|
||||
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
|
||||
|
|
|
@ -400,11 +400,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
|
|||
return ret;
|
||||
|
||||
for (msg = msgs; msg < emsg; msg++) {
|
||||
/* If next message is read, skip the stop condition */
|
||||
bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
|
||||
/* but, force it if I2C_M_STOP is set */
|
||||
if (msg->flags & I2C_M_STOP)
|
||||
stop = true;
|
||||
/* Emit STOP if it is the last message or I2C_M_STOP is set. */
|
||||
bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
|
||||
|
||||
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
|
||||
if (ret)
|
||||
|
|
|
@ -247,11 +247,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
|
|||
return ret;
|
||||
|
||||
for (msg = msgs; msg < emsg; msg++) {
|
||||
/* If next message is read, skip the stop condition */
|
||||
bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
|
||||
/* but, force it if I2C_M_STOP is set */
|
||||
if (msg->flags & I2C_M_STOP)
|
||||
stop = true;
|
||||
/* Emit STOP if it is the last message or I2C_M_STOP is set. */
|
||||
bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
|
||||
|
||||
ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
|
||||
if (ret)
|
||||
|
|
|
@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
|
|||
static DEFINE_IDR(ctx_idr);
|
||||
static DEFINE_IDR(multicast_idr);
|
||||
|
||||
static const struct file_operations ucma_fops;
|
||||
|
||||
static inline struct ucma_context *_ucma_find_context(int id,
|
||||
struct ucma_file *file)
|
||||
{
|
||||
|
@ -1545,6 +1547,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
|
|||
f = fdget(cmd.fd);
|
||||
if (!f.file)
|
||||
return -ENOENT;
|
||||
if (f.file->f_op != &ucma_fops) {
|
||||
ret = -EINVAL;
|
||||
goto file_put;
|
||||
}
|
||||
|
||||
/* Validate current fd and prevent destruction of id. */
|
||||
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
|
||||
|
|
|
@ -2880,6 +2880,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
|
||||
rs_set_new(rs);
|
||||
} else if (rs_is_recovering(rs)) {
|
||||
/* Rebuild particular devices */
|
||||
if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
|
||||
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
|
||||
rs_setup_recovery(rs, MaxSector);
|
||||
}
|
||||
/* A recovering raid set may be resized */
|
||||
; /* skip setup rs */
|
||||
} else if (rs_is_reshaping(rs)) {
|
||||
|
|
|
@ -189,6 +189,12 @@ struct dm_pool_metadata {
|
|||
unsigned long flags;
|
||||
sector_t data_block_size;
|
||||
|
||||
/*
|
||||
* We reserve a section of the metadata for commit overhead.
|
||||
* All reported space does *not* include this.
|
||||
*/
|
||||
dm_block_t metadata_reserve;
|
||||
|
||||
/*
|
||||
* Set if a transaction has to be aborted but the attempt to roll back
|
||||
* to the previous (good) transaction failed. The only pool metadata
|
||||
|
@ -827,6 +833,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
|
|||
return dm_tm_commit(pmd->tm, sblock);
|
||||
}
|
||||
|
||||
static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
int r;
|
||||
dm_block_t total;
|
||||
dm_block_t max_blocks = 4096; /* 16M */
|
||||
|
||||
r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
|
||||
if (r) {
|
||||
DMERR("could not get size of metadata device");
|
||||
pmd->metadata_reserve = max_blocks;
|
||||
} else
|
||||
pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
|
||||
}
|
||||
|
||||
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
||||
sector_t data_block_size,
|
||||
bool format_device)
|
||||
|
@ -860,6 +880,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
|||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
__set_metadata_reserve(pmd);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
|
@ -1831,6 +1853,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
|
|||
down_read(&pmd->root_lock);
|
||||
if (!pmd->fail_io)
|
||||
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
|
||||
|
||||
if (!r) {
|
||||
if (*result < pmd->metadata_reserve)
|
||||
*result = 0;
|
||||
else
|
||||
*result -= pmd->metadata_reserve;
|
||||
}
|
||||
up_read(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
|
@ -1943,8 +1972,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
|
|||
int r = -EINVAL;
|
||||
|
||||
down_write(&pmd->root_lock);
|
||||
if (!pmd->fail_io)
|
||||
if (!pmd->fail_io) {
|
||||
r = __resize_space_map(pmd->metadata_sm, new_count);
|
||||
if (!r)
|
||||
__set_metadata_reserve(pmd);
|
||||
}
|
||||
up_write(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
|
|||
enum pool_mode {
|
||||
PM_WRITE, /* metadata may be changed */
|
||||
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
|
||||
|
||||
/*
|
||||
* Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
|
||||
*/
|
||||
PM_OUT_OF_METADATA_SPACE,
|
||||
PM_READ_ONLY, /* metadata may not be changed */
|
||||
|
||||
PM_FAIL, /* all I/O fails */
|
||||
};
|
||||
|
||||
|
@ -1386,7 +1392,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
|||
|
||||
static void requeue_bios(struct pool *pool);
|
||||
|
||||
static void check_for_space(struct pool *pool)
|
||||
static bool is_read_only_pool_mode(enum pool_mode mode)
|
||||
{
|
||||
return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
|
||||
}
|
||||
|
||||
static bool is_read_only(struct pool *pool)
|
||||
{
|
||||
return is_read_only_pool_mode(get_pool_mode(pool));
|
||||
}
|
||||
|
||||
static void check_for_metadata_space(struct pool *pool)
|
||||
{
|
||||
int r;
|
||||
const char *ooms_reason = NULL;
|
||||
dm_block_t nr_free;
|
||||
|
||||
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
|
||||
if (r)
|
||||
ooms_reason = "Could not get free metadata blocks";
|
||||
else if (!nr_free)
|
||||
ooms_reason = "No free metadata blocks";
|
||||
|
||||
if (ooms_reason && !is_read_only(pool)) {
|
||||
DMERR("%s", ooms_reason);
|
||||
set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
|
||||
}
|
||||
}
|
||||
|
||||
static void check_for_data_space(struct pool *pool)
|
||||
{
|
||||
int r;
|
||||
dm_block_t nr_free;
|
||||
|
@ -1412,14 +1446,16 @@ static int commit(struct pool *pool)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (get_pool_mode(pool) >= PM_READ_ONLY)
|
||||
if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
|
||||
return -EINVAL;
|
||||
|
||||
r = dm_pool_commit_metadata(pool->pmd);
|
||||
if (r)
|
||||
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
|
||||
else
|
||||
check_for_space(pool);
|
||||
else {
|
||||
check_for_metadata_space(pool);
|
||||
check_for_data_space(pool);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1485,6 +1521,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
|
||||
if (r) {
|
||||
metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!free_blocks) {
|
||||
/* Let's commit before we use up the metadata reserve. */
|
||||
r = commit(pool);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1516,6 +1565,7 @@ static int should_error_unserviceable_bio(struct pool *pool)
|
|||
case PM_OUT_OF_DATA_SPACE:
|
||||
return pool->pf.error_if_no_space ? -ENOSPC : 0;
|
||||
|
||||
case PM_OUT_OF_METADATA_SPACE:
|
||||
case PM_READ_ONLY:
|
||||
case PM_FAIL:
|
||||
return -EIO;
|
||||
|
@ -2479,8 +2529,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
error_retry_list(pool);
|
||||
break;
|
||||
|
||||
case PM_OUT_OF_METADATA_SPACE:
|
||||
case PM_READ_ONLY:
|
||||
if (old_mode != new_mode)
|
||||
if (!is_read_only_pool_mode(old_mode))
|
||||
notify_of_pool_mode_change(pool, "read-only");
|
||||
dm_pool_metadata_read_only(pool->pmd);
|
||||
pool->process_bio = process_bio_read_only;
|
||||
|
@ -3418,6 +3469,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
|
|||
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
|
||||
dm_device_name(pool->pool_md),
|
||||
sb_metadata_dev_size, metadata_dev_size);
|
||||
|
||||
if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
|
||||
set_pool_mode(pool, PM_WRITE);
|
||||
|
||||
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
|
||||
if (r) {
|
||||
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
|
||||
|
@ -3721,7 +3776,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
|
|||
struct pool_c *pt = ti->private;
|
||||
struct pool *pool = pt->pool;
|
||||
|
||||
if (get_pool_mode(pool) >= PM_READ_ONLY) {
|
||||
if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
|
||||
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
|
||||
dm_device_name(pool->pool_md));
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -3795,6 +3850,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
|
|||
dm_block_t nr_blocks_data;
|
||||
dm_block_t nr_blocks_metadata;
|
||||
dm_block_t held_root;
|
||||
enum pool_mode mode;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
char buf2[BDEVNAME_SIZE];
|
||||
struct pool_c *pt = ti->private;
|
||||
|
@ -3865,9 +3921,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
|
|||
else
|
||||
DMEMIT("- ");
|
||||
|
||||
if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
|
||||
mode = get_pool_mode(pool);
|
||||
if (mode == PM_OUT_OF_DATA_SPACE)
|
||||
DMEMIT("out_of_data_space ");
|
||||
else if (pool->pf.mode == PM_READ_ONLY)
|
||||
else if (is_read_only_pool_mode(mode))
|
||||
DMEMIT("ro ");
|
||||
else
|
||||
DMEMIT("rw ");
|
||||
|
|
|
@ -4381,11 +4381,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|||
allow_barrier(conf);
|
||||
}
|
||||
|
||||
raise_barrier(conf, 0);
|
||||
read_more:
|
||||
/* Now schedule reads for blocks from sector_nr to last */
|
||||
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
|
||||
r10_bio->state = 0;
|
||||
raise_barrier(conf, sectors_done != 0);
|
||||
raise_barrier(conf, 1);
|
||||
atomic_set(&r10_bio->remaining, 0);
|
||||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = sector_nr;
|
||||
|
@ -4492,6 +4493,8 @@ bio_full:
|
|||
if (sector_nr <= last)
|
||||
goto read_more;
|
||||
|
||||
lower_barrier(conf);
|
||||
|
||||
/* Now that we have done the whole section we can
|
||||
* update reshape_progress
|
||||
*/
|
||||
|
|
|
@ -422,7 +422,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
|
||||
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
|
||||
u64_stats_update_begin(&rx_ring->syncp);
|
||||
|
@ -439,7 +439,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
|
|||
rx_info->page_offset = 0;
|
||||
ena_buf = &rx_info->ena_buf;
|
||||
ena_buf->paddr = dma;
|
||||
ena_buf->len = PAGE_SIZE;
|
||||
ena_buf->len = ENA_PAGE_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
|
|||
return;
|
||||
}
|
||||
|
||||
dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
|
||||
dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
__free_page(page);
|
||||
|
@ -849,10 +849,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
|
|||
do {
|
||||
dma_unmap_page(rx_ring->dev,
|
||||
dma_unmap_addr(&rx_info->ena_buf, paddr),
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
ENA_PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
|
||||
rx_info->page_offset, len, PAGE_SIZE);
|
||||
rx_info->page_offset, len, ENA_PAGE_SIZE);
|
||||
|
||||
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
|
||||
"rx skb updated. len %d. data_len %d\n",
|
||||
|
|
|
@ -321,4 +321,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
|
|||
|
||||
int ena_get_sset_count(struct net_device *netdev, int sset);
|
||||
|
||||
/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
|
||||
* driver passas 0.
|
||||
* Since the max packet size the ENA handles is ~9kB limit the buffer length to
|
||||
* 16kB.
|
||||
*/
|
||||
#if PAGE_SIZE > SZ_16K
|
||||
#define ENA_PAGE_SIZE SZ_16K
|
||||
#else
|
||||
#define ENA_PAGE_SIZE PAGE_SIZE
|
||||
#endif
|
||||
|
||||
#endif /* !(ENA_H) */
|
||||
|
|
|
@ -517,7 +517,7 @@ static int macb_halt_tx(struct macb *bp)
|
|||
if (!(status & MACB_BIT(TGO)))
|
||||
return 0;
|
||||
|
||||
usleep_range(10, 250);
|
||||
udelay(250);
|
||||
} while (time_before(halt_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
|
|||
}
|
||||
|
||||
if (h->dev->ops->adjust_link) {
|
||||
netif_carrier_off(net_dev);
|
||||
h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
|
||||
netif_carrier_on(net_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -288,16 +288,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
|
|||
}
|
||||
}
|
||||
|
||||
static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
|
||||
static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return (u16)((dev->pdev->bus->number << 8) |
|
||||
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
|
||||
(dev->pdev->bus->number << 8) |
|
||||
PCI_SLOT(dev->pdev->devfn));
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u16 pci_id = mlx5_gen_pci_id(dev);
|
||||
u32 pci_id = mlx5_gen_pci_id(dev);
|
||||
struct mlx5_core_dev *res = NULL;
|
||||
struct mlx5_core_dev *tmp_dev;
|
||||
struct mlx5_priv *priv;
|
||||
|
|
|
@ -760,7 +760,7 @@ struct rtl8169_tc_offsets {
|
|||
};
|
||||
|
||||
enum rtl_flag {
|
||||
RTL_FLAG_TASK_ENABLED,
|
||||
RTL_FLAG_TASK_ENABLED = 0,
|
||||
RTL_FLAG_TASK_SLOW_PENDING,
|
||||
RTL_FLAG_TASK_RESET_PENDING,
|
||||
RTL_FLAG_TASK_PHY_PENDING,
|
||||
|
@ -7637,7 +7637,8 @@ static int rtl8169_close(struct net_device *dev)
|
|||
rtl8169_update_counters(dev);
|
||||
|
||||
rtl_lock_work(tp);
|
||||
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
|
||||
/* Clear all task flags */
|
||||
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
|
||||
|
||||
rtl8169_down(dev);
|
||||
rtl_unlock_work(tp);
|
||||
|
@ -7820,7 +7821,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
|
|||
|
||||
rtl_lock_work(tp);
|
||||
napi_disable(&tp->napi);
|
||||
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
|
||||
/* Clear all task flags */
|
||||
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
|
||||
|
||||
rtl_unlock_work(tp);
|
||||
|
||||
rtl_pll_power_down(tp);
|
||||
|
|
|
@ -2569,9 +2569,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
|||
IEEE80211_VHT_CAP_SHORT_GI_80 |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_TXSTBC |
|
||||
IEEE80211_VHT_CAP_RXSTBC_1 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_2 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_3 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_4 |
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
|
||||
sband->vht_cap.vht_mcs.rx_mcs_map =
|
||||
|
|
|
@ -65,6 +65,7 @@ struct nvmet_rdma_rsp {
|
|||
|
||||
struct nvmet_req req;
|
||||
|
||||
bool allocated;
|
||||
u8 n_rdma;
|
||||
u32 flags;
|
||||
u32 invalidate_rkey;
|
||||
|
@ -167,11 +168,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->rsps_lock, flags);
|
||||
rsp = list_first_entry(&queue->free_rsps,
|
||||
rsp = list_first_entry_or_null(&queue->free_rsps,
|
||||
struct nvmet_rdma_rsp, free_list);
|
||||
list_del(&rsp->free_list);
|
||||
if (likely(rsp))
|
||||
list_del(&rsp->free_list);
|
||||
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
||||
|
||||
if (unlikely(!rsp)) {
|
||||
rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
|
||||
if (unlikely(!rsp))
|
||||
return NULL;
|
||||
rsp->allocated = true;
|
||||
}
|
||||
|
||||
return rsp;
|
||||
}
|
||||
|
||||
|
@ -180,6 +189,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (rsp->allocated) {
|
||||
kfree(rsp);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
|
||||
list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
|
||||
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
|
||||
|
@ -755,6 +769,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
|
||||
cmd->queue = queue;
|
||||
rsp = nvmet_rdma_get_rsp(queue);
|
||||
if (unlikely(!rsp)) {
|
||||
/*
|
||||
* we get here only under memory pressure,
|
||||
* silently drop and have the host retry
|
||||
* as we can't even fail it.
|
||||
*/
|
||||
nvmet_rdma_post_recv(queue->dev, cmd);
|
||||
return;
|
||||
}
|
||||
rsp->queue = queue;
|
||||
rsp->cmd = cmd;
|
||||
rsp->flags = 0;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/netdev_features.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <net/iucv/af_iucv.h>
|
||||
#include <net/dsfield.h>
|
||||
|
@ -4715,7 +4716,7 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
|
|||
|
||||
priv.buffer_len = oat_data.buffer_len;
|
||||
priv.response_len = 0;
|
||||
priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
|
||||
priv.buffer = vzalloc(oat_data.buffer_len);
|
||||
if (!priv.buffer) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -4756,7 +4757,7 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
|
|||
rc = -EFAULT;
|
||||
|
||||
out_free:
|
||||
kfree(priv.buffer);
|
||||
vfree(priv.buffer);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -491,7 +491,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
|
|||
default:
|
||||
dev_kfree_skb_any(skb);
|
||||
QETH_CARD_TEXT(card, 3, "inbunkno");
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
|
||||
continue;
|
||||
}
|
||||
work_done++;
|
||||
|
|
|
@ -1836,7 +1836,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
|
|||
default:
|
||||
dev_kfree_skb_any(skb);
|
||||
QETH_CARD_TEXT(card, 3, "inbunkno");
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
|
||||
continue;
|
||||
}
|
||||
work_done++;
|
||||
|
|
|
@ -327,8 +327,10 @@ static void mvebu_uart_set_termios(struct uart_port *port,
|
|||
if ((termios->c_cflag & CREAD) == 0)
|
||||
port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
|
||||
|
||||
if (old)
|
||||
if (old) {
|
||||
tty_termios_copy_hw(termios, old);
|
||||
termios->c_cflag |= CS8;
|
||||
}
|
||||
|
||||
baud = uart_get_baud_rate(port, termios, old, 0, 460800);
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
|
|
|
@ -1066,12 +1066,15 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
|
|||
static int fotg210_udc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
usb_del_gadget_udc(&fotg210->gadget);
|
||||
iounmap(fotg210->reg);
|
||||
free_irq(platform_get_irq(pdev, 0), fotg210);
|
||||
|
||||
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
|
||||
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
|
||||
kfree(fotg210->ep[i]);
|
||||
kfree(fotg210);
|
||||
|
||||
return 0;
|
||||
|
@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
|
|||
/* initialize udc */
|
||||
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
|
||||
if (fotg210 == NULL)
|
||||
goto err_alloc;
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
|
||||
_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
|
||||
|
@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
|
|||
fotg210->reg = ioremap(res->start, resource_size(res));
|
||||
if (fotg210->reg == NULL) {
|
||||
pr_err("ioremap error.\n");
|
||||
goto err_map;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
spin_lock_init(&fotg210->lock);
|
||||
|
@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
|
|||
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
|
||||
GFP_KERNEL);
|
||||
if (fotg210->ep0_req == NULL)
|
||||
goto err_req;
|
||||
goto err_map;
|
||||
|
||||
fotg210_init(fotg210);
|
||||
|
||||
|
@ -1190,12 +1193,14 @@ err_req:
|
|||
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
|
||||
|
||||
err_map:
|
||||
if (fotg210->reg)
|
||||
iounmap(fotg210->reg);
|
||||
iounmap(fotg210->reg);
|
||||
|
||||
err_alloc:
|
||||
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
|
||||
kfree(fotg210->ep[i]);
|
||||
kfree(fotg210);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -423,6 +423,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
|
|||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
mutex_unlock(&dev->io_mutex);
|
||||
|
||||
if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
|
||||
return -EIO;
|
||||
|
||||
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,15 +18,16 @@ static void enable_hotplug_cpu(int cpu)
|
|||
|
||||
static void disable_hotplug_cpu(int cpu)
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
lock_device_hotplug();
|
||||
if (!cpu_is_hotpluggable(cpu))
|
||||
return;
|
||||
lock_device_hotplug();
|
||||
if (cpu_online(cpu))
|
||||
device_offline(get_cpu_device(cpu));
|
||||
unlock_device_hotplug();
|
||||
}
|
||||
if (cpu_present(cpu))
|
||||
if (!cpu_online(cpu) && cpu_present(cpu)) {
|
||||
xen_arch_unregister_cpu(cpu);
|
||||
|
||||
set_cpu_present(cpu, false);
|
||||
set_cpu_present(cpu, false);
|
||||
}
|
||||
unlock_device_hotplug();
|
||||
}
|
||||
|
||||
static int vcpu_online(unsigned int cpu)
|
||||
|
|
|
@ -139,7 +139,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
|
|||
clear_evtchn_to_irq_row(row);
|
||||
}
|
||||
|
||||
evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
|
||||
evtchn_to_irq[row][col] = irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -282,9 +282,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
|
|||
/*
|
||||
* The Xenstore watch fires directly after registering it and
|
||||
* after a suspend/resume cycle. So ENOENT is no error but
|
||||
* might happen in those cases.
|
||||
* might happen in those cases. ERANGE is observed when we get
|
||||
* an empty value (''), this happens when we acknowledge the
|
||||
* request by writing '\0' below.
|
||||
*/
|
||||
if (err != -ENOENT)
|
||||
if (err != -ENOENT && err != -ERANGE)
|
||||
pr_err("Error %d reading sysrq code in control/sysrq\n",
|
||||
err);
|
||||
xenbus_transaction_end(xbt, 1);
|
||||
|
|
|
@ -101,9 +101,6 @@ convert_sfm_char(const __u16 src_char, char *target)
|
|||
case SFM_LESSTHAN:
|
||||
*target = '<';
|
||||
break;
|
||||
case SFM_SLASH:
|
||||
*target = '\\';
|
||||
break;
|
||||
case SFM_SPACE:
|
||||
*target = ' ';
|
||||
break;
|
||||
|
|
|
@ -577,10 +577,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
|
|||
}
|
||||
|
||||
count = 0;
|
||||
/*
|
||||
* We know that all the name entries in the protocols array
|
||||
* are short (< 16 bytes anyway) and are NUL terminated.
|
||||
*/
|
||||
for (i = 0; i < CIFS_NUM_PROT; i++) {
|
||||
strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
|
||||
count += strlen(protocols[i].name) + 1;
|
||||
/* null at end of source and target buffers anyway */
|
||||
size_t len = strlen(protocols[i].name) + 1;
|
||||
|
||||
memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
|
||||
count += len;
|
||||
}
|
||||
inc_rfc1001_len(pSMB, count);
|
||||
pSMB->ByteCount = cpu_to_le16(count);
|
||||
|
|
|
@ -406,9 +406,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
|
|||
(struct smb_com_transaction_change_notify_rsp *)buf;
|
||||
struct file_notify_information *pnotify;
|
||||
__u32 data_offset = 0;
|
||||
size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
|
||||
|
||||
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
|
||||
data_offset = le32_to_cpu(pSMBr->DataOffset);
|
||||
|
||||
if (data_offset >
|
||||
len - sizeof(struct file_notify_information)) {
|
||||
cifs_dbg(FYI, "invalid data_offset %u\n",
|
||||
data_offset);
|
||||
return true;
|
||||
}
|
||||
pnotify = (struct file_notify_information *)
|
||||
((char *)&pSMBr->hdr.Protocol + data_offset);
|
||||
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
|
||||
|
|
|
@ -989,7 +989,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
}
|
||||
|
||||
srch_inf->entries_in_buffer = 0;
|
||||
srch_inf->index_of_last_entry = 0;
|
||||
srch_inf->index_of_last_entry = 2;
|
||||
|
||||
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
|
||||
fid->volatile_fid, 0, srch_inf);
|
||||
|
|
|
@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
|
|||
|
||||
res->last_used = 0;
|
||||
|
||||
spin_lock(&dlm->spinlock);
|
||||
spin_lock(&dlm->track_lock);
|
||||
list_add_tail(&res->tracking, &dlm->tracking_list);
|
||||
spin_unlock(&dlm->spinlock);
|
||||
spin_unlock(&dlm->track_lock);
|
||||
|
||||
memset(res->lvb, 0, DLM_LVB_LEN);
|
||||
memset(res->refmap, 0, sizeof(res->refmap));
|
||||
|
|
|
@ -455,6 +455,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
|||
int err;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The ability to racily run the kernel stack unwinder on a running task
|
||||
* and then observe the unwinder output is scary; while it is useful for
|
||||
* debugging kernel issues, it can also allow an attacker to leak kernel
|
||||
* stack contents.
|
||||
* Doing this in a manner that is at least safe from races would require
|
||||
* some work to ensure that the remote task can not be scheduled; and
|
||||
* even then, this would still expose the unwinder as local attack
|
||||
* surface.
|
||||
* Therefore, this interface is restricted to root.
|
||||
*/
|
||||
if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
|
24
fs/xattr.c
24
fs/xattr.c
|
@ -953,17 +953,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
|
|||
int err = 0;
|
||||
|
||||
#ifdef CONFIG_FS_POSIX_ACL
|
||||
if (inode->i_acl) {
|
||||
err = xattr_list_one(&buffer, &remaining_size,
|
||||
XATTR_NAME_POSIX_ACL_ACCESS);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (inode->i_default_acl) {
|
||||
err = xattr_list_one(&buffer, &remaining_size,
|
||||
XATTR_NAME_POSIX_ACL_DEFAULT);
|
||||
if (err)
|
||||
return err;
|
||||
if (IS_POSIXACL(inode)) {
|
||||
if (inode->i_acl) {
|
||||
err = xattr_list_one(&buffer, &remaining_size,
|
||||
XATTR_NAME_POSIX_ACL_ACCESS);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (inode->i_default_acl) {
|
||||
err = xattr_list_one(&buffer, &remaining_size,
|
||||
XATTR_NAME_POSIX_ACL_DEFAULT);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -292,6 +292,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
|
|||
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
extern u64 jiffies64_to_nsecs(u64 j);
|
||||
|
||||
extern unsigned long __msecs_to_jiffies(const unsigned int m);
|
||||
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
||||
/*
|
||||
|
|
|
@ -704,6 +704,16 @@ u64 nsec_to_clock_t(u64 x)
|
|||
#endif
|
||||
}
|
||||
|
||||
u64 jiffies64_to_nsecs(u64 j)
|
||||
{
|
||||
#if !(NSEC_PER_SEC % HZ)
|
||||
return (NSEC_PER_SEC / HZ) * j;
|
||||
# else
|
||||
return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(jiffies64_to_nsecs);
|
||||
|
||||
/**
|
||||
* nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
|
||||
*
|
||||
|
|
|
@ -98,6 +98,12 @@ define timeconst(hz) {
|
|||
print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
|
||||
print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
|
||||
print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
|
||||
|
||||
cd=gcd(hz,1000000000)
|
||||
print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n"
|
||||
print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n"
|
||||
print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
|
||||
print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n"
|
||||
print "\n"
|
||||
|
||||
print "#endif /* KERNEL_TIMECONST_H */\n"
|
||||
|
|
|
@ -81,7 +81,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
|
|||
new_flags |= VM_DONTDUMP;
|
||||
break;
|
||||
case MADV_DODUMP:
|
||||
if (new_flags & VM_SPECIAL) {
|
||||
if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -948,8 +948,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
if (len < IEEE80211_DEAUTH_FRAME_LEN)
|
||||
return;
|
||||
|
||||
ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
|
||||
mgmt->sa, mgmt->da, mgmt->bssid, reason);
|
||||
ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
|
||||
ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
|
||||
sta_info_destroy_addr(sdata, mgmt->sa);
|
||||
}
|
||||
|
||||
|
@ -967,9 +967,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
|
||||
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
|
||||
|
||||
ibss_dbg(sdata,
|
||||
"RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
|
||||
mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
|
||||
ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
|
||||
ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
|
||||
mgmt->bssid, auth_transaction);
|
||||
|
||||
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
|
||||
return;
|
||||
|
@ -1176,10 +1176,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
|
|||
rx_timestamp = drv_get_tsf(local, sdata);
|
||||
}
|
||||
|
||||
ibss_dbg(sdata,
|
||||
"RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
|
||||
ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
|
||||
mgmt->sa, mgmt->bssid,
|
||||
(unsigned long long)rx_timestamp,
|
||||
(unsigned long long)rx_timestamp);
|
||||
ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
|
||||
(unsigned long long)beacon_timestamp,
|
||||
(unsigned long long)(rx_timestamp - beacon_timestamp),
|
||||
jiffies);
|
||||
|
@ -1538,9 +1538,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
tx_last_beacon = drv_tx_last_beacon(local);
|
||||
|
||||
ibss_dbg(sdata,
|
||||
"RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
|
||||
mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
|
||||
ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
|
||||
ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
|
||||
mgmt->bssid, tx_last_beacon);
|
||||
|
||||
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
|
||||
return;
|
||||
|
|
|
@ -254,8 +254,27 @@ static void ieee80211_restart_work(struct work_struct *work)
|
|||
"%s called with hardware scan in progress\n", __func__);
|
||||
|
||||
rtnl_lock();
|
||||
list_for_each_entry(sdata, &local->interfaces, list)
|
||||
list_for_each_entry(sdata, &local->interfaces, list) {
|
||||
/*
|
||||
* XXX: there may be more work for other vif types and even
|
||||
* for station mode: a good thing would be to run most of
|
||||
* the iface type's dependent _stop (ieee80211_mg_stop,
|
||||
* ieee80211_ibss_stop) etc...
|
||||
* For now, fix only the specific bug that was seen: race
|
||||
* between csa_connection_drop_work and us.
|
||||
*/
|
||||
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
|
||||
/*
|
||||
* This worker is scheduled from the iface worker that
|
||||
* runs on mac80211's workqueue, so we can't be
|
||||
* scheduling this worker after the cancel right here.
|
||||
* The exception is ieee80211_chswitch_done.
|
||||
* Then we can have a race...
|
||||
*/
|
||||
cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
|
||||
}
|
||||
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
|
||||
}
|
||||
ieee80211_scan_cancel(local);
|
||||
|
||||
/* make sure any new ROC will consider local->in_reconfig */
|
||||
|
@ -466,10 +485,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
|
|||
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_80 |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_1 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_2 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_3 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_4 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_MASK |
|
||||
IEEE80211_VHT_CAP_TXSTBC |
|
||||
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
|
||||
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
|
||||
|
@ -1164,6 +1180,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
|
|||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
unregister_inet6addr_notifier(&local->ifa6_notifier);
|
||||
#endif
|
||||
ieee80211_txq_teardown_flows(local);
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
|
@ -1191,7 +1208,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
|
|||
skb_queue_purge(&local->skb_queue);
|
||||
skb_queue_purge(&local->skb_queue_unreliable);
|
||||
skb_queue_purge(&local->skb_queue_tdls_chsw);
|
||||
ieee80211_txq_teardown_flows(local);
|
||||
|
||||
destroy_workqueue(local->workqueue);
|
||||
wiphy_unregister(local->hw.wiphy);
|
||||
|
|
|
@ -563,6 +563,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
|
|||
forward = false;
|
||||
reply = true;
|
||||
target_metric = 0;
|
||||
|
||||
if (SN_GT(target_sn, ifmsh->sn))
|
||||
ifmsh->sn = target_sn;
|
||||
|
||||
if (time_after(jiffies, ifmsh->last_sn_update +
|
||||
net_traversal_jiffies(sdata)) ||
|
||||
time_before(jiffies, ifmsh->last_sn_update)) {
|
||||
|
|
|
@ -989,6 +989,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
|
|||
*/
|
||||
|
||||
if (sdata->reserved_chanctx) {
|
||||
struct ieee80211_supported_band *sband = NULL;
|
||||
struct sta_info *mgd_sta = NULL;
|
||||
enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
|
||||
|
||||
/*
|
||||
* with multi-vif csa driver may call ieee80211_csa_finish()
|
||||
* many times while waiting for other interfaces to use their
|
||||
|
@ -997,6 +1001,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
|
|||
if (sdata->reserved_ready)
|
||||
goto out;
|
||||
|
||||
if (sdata->vif.bss_conf.chandef.width !=
|
||||
sdata->csa_chandef.width) {
|
||||
/*
|
||||
* For managed interface, we need to also update the AP
|
||||
* station bandwidth and align the rate scale algorithm
|
||||
* on the bandwidth change. Here we only consider the
|
||||
* bandwidth of the new channel definition (as channel
|
||||
* switch flow does not have the full HT/VHT/HE
|
||||
* information), assuming that if additional changes are
|
||||
* required they would be done as part of the processing
|
||||
* of the next beacon from the AP.
|
||||
*/
|
||||
switch (sdata->csa_chandef.width) {
|
||||
case NL80211_CHAN_WIDTH_20_NOHT:
|
||||
case NL80211_CHAN_WIDTH_20:
|
||||
default:
|
||||
bw = IEEE80211_STA_RX_BW_20;
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_40:
|
||||
bw = IEEE80211_STA_RX_BW_40;
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_80:
|
||||
bw = IEEE80211_STA_RX_BW_80;
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_80P80:
|
||||
case NL80211_CHAN_WIDTH_160:
|
||||
bw = IEEE80211_STA_RX_BW_160;
|
||||
break;
|
||||
}
|
||||
|
||||
mgd_sta = sta_info_get(sdata, ifmgd->bssid);
|
||||
sband =
|
||||
local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
|
||||
}
|
||||
|
||||
if (sdata->vif.bss_conf.chandef.width >
|
||||
sdata->csa_chandef.width) {
|
||||
mgd_sta->sta.bandwidth = bw;
|
||||
rate_control_rate_update(local, sband, mgd_sta,
|
||||
IEEE80211_RC_BW_CHANGED);
|
||||
}
|
||||
|
||||
ret = ieee80211_vif_use_reserved_context(sdata);
|
||||
if (ret) {
|
||||
sdata_info(sdata,
|
||||
|
@ -1007,6 +1053,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (sdata->vif.bss_conf.chandef.width <
|
||||
sdata->csa_chandef.width) {
|
||||
mgd_sta->sta.bandwidth = bw;
|
||||
rate_control_rate_update(local, sband, mgd_sta,
|
||||
IEEE80211_RC_BW_CHANGED);
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1229,6 +1282,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|||
cbss->beacon_interval));
|
||||
return;
|
||||
drop_connection:
|
||||
/*
|
||||
* This is just so that the disconnect flow will know that
|
||||
* we were trying to switch channel and failed. In case the
|
||||
* mode is 1 (we are not allowed to Tx), we will know not to
|
||||
* send a deauthentication frame. Those two fields will be
|
||||
* reset when the disconnection worker runs.
|
||||
*/
|
||||
sdata->vif.csa_active = true;
|
||||
sdata->csa_block_tx = csa_ie.mode;
|
||||
|
||||
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
|
||||
mutex_unlock(&local->chanctx_mtx);
|
||||
mutex_unlock(&local->mtx);
|
||||
|
@ -2401,6 +2464,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
|
|||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
|
||||
bool tx;
|
||||
|
||||
sdata_lock(sdata);
|
||||
if (!ifmgd->associated) {
|
||||
|
@ -2408,6 +2472,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
|
|||
return;
|
||||
}
|
||||
|
||||
tx = !sdata->csa_block_tx;
|
||||
|
||||
/* AP is probably out of range (or not reachable for another reason) so
|
||||
* remove the bss struct for that AP.
|
||||
*/
|
||||
|
@ -2415,7 +2481,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
|
|||
|
||||
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
|
||||
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
|
||||
true, frame_buf);
|
||||
tx, frame_buf);
|
||||
mutex_lock(&local->mtx);
|
||||
sdata->vif.csa_active = false;
|
||||
ifmgd->csa_waiting_bcn = false;
|
||||
|
@ -2426,7 +2492,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
|
|||
}
|
||||
mutex_unlock(&local->mtx);
|
||||
|
||||
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
|
||||
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
|
||||
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
|
||||
|
||||
sdata_unlock(sdata);
|
||||
|
|
|
@ -11148,6 +11148,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
if (!info->attrs[NL80211_ATTR_MDID] ||
|
||||
!info->attrs[NL80211_ATTR_IE] ||
|
||||
!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1432,7 +1432,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
|
|||
u8 *op_class)
|
||||
{
|
||||
u8 vht_opclass;
|
||||
u16 freq = chandef->center_freq1;
|
||||
u32 freq = chandef->center_freq1;
|
||||
|
||||
if (freq >= 2412 && freq <= 2472) {
|
||||
if (chandef->width > NL80211_CHAN_WIDTH_40)
|
||||
|
|
|
@ -5698,6 +5698,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
|
||||
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
|
||||
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
||||
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
|
||||
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
|
||||
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
|
||||
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
|
||||
|
|
|
@ -21,15 +21,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
|
|||
|
||||
#endif
|
||||
|
||||
#if !defined(_CALL_ELF) || _CALL_ELF != 2
|
||||
int arch__choose_best_symbol(struct symbol *syma,
|
||||
struct symbol *symb __maybe_unused)
|
||||
{
|
||||
char *sym = syma->name;
|
||||
|
||||
#if !defined(_CALL_ELF) || _CALL_ELF != 2
|
||||
/* Skip over any initial dot */
|
||||
if (*sym == '.')
|
||||
sym++;
|
||||
#endif
|
||||
|
||||
/* Avoid "SyS" kernel syscall aliases */
|
||||
if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
|
||||
|
@ -40,6 +41,7 @@ int arch__choose_best_symbol(struct symbol *syma,
|
|||
return SYMBOL_A;
|
||||
}
|
||||
|
||||
#if !defined(_CALL_ELF) || _CALL_ELF != 2
|
||||
/* Allow matching against dot variants */
|
||||
int arch__compare_symbol_names(const char *namea, const char *nameb)
|
||||
{
|
||||
|
|
|
@ -241,8 +241,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
|
|||
{
|
||||
struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
|
||||
|
||||
if (evsel != NULL)
|
||||
perf_evsel__init(evsel, attr, idx);
|
||||
if (!evsel)
|
||||
return NULL;
|
||||
perf_evsel__init(evsel, attr, idx);
|
||||
|
||||
if (perf_evsel__is_bpf_output(evsel)) {
|
||||
evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
|
||||
|
|
|
@ -155,12 +155,6 @@ static const char * const page_flag_names[] = {
|
|||
};
|
||||
|
||||
|
||||
static const char * const debugfs_known_mountpoints[] = {
|
||||
"/sys/kernel/debug",
|
||||
"/debug",
|
||||
0,
|
||||
};
|
||||
|
||||
/*
|
||||
* data structures
|
||||
*/
|
||||
|
|
|
@ -29,8 +29,8 @@ struct slabinfo {
|
|||
int alias;
|
||||
int refs;
|
||||
int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
|
||||
int hwcache_align, object_size, objs_per_slab;
|
||||
int sanity_checks, slab_size, store_user, trace;
|
||||
unsigned int hwcache_align, object_size, objs_per_slab;
|
||||
unsigned int sanity_checks, slab_size, store_user, trace;
|
||||
int order, poison, reclaim_account, red_zone;
|
||||
unsigned long partial, objects, slabs, objects_partial, objects_total;
|
||||
unsigned long alloc_fastpath, alloc_slowpath;
|
||||
|
|
Loading…
Reference in New Issue