Merge the linux-stable linux-3.4.y branch
This brings us up to Linux 3.4.112. The upstream bluetooth changes were omitted as we have so far been doing so. A patch to bring back all the upstream changes can be done. Some compilation fixes have been made as well, like porting fixes in fs/exec.c, and removal of extraneous variables in sysctl_net_ipv4.c Change-Id: Ic277eeb5f774d8d5a30a11455584323772917d42
This commit is contained in:
commit
b88e85e0fe
|
@ -24,17 +24,33 @@ For monitoring and control pktgen creates:
|
|||
/proc/net/pktgen/ethX
|
||||
|
||||
|
||||
Viewing threads
|
||||
===============
|
||||
/proc/net/pktgen/kpktgend_0
|
||||
Name: kpktgend_0 max_before_softirq: 10000
|
||||
Running:
|
||||
Stopped: eth1
|
||||
Result: OK: max_before_softirq=10000
|
||||
Kernel threads
|
||||
==============
|
||||
Pktgen creates a thread for each CPU with affinity to that CPU.
|
||||
Which is controlled through procfile /proc/net/pktgen/kpktgend_X.
|
||||
|
||||
Most important the devices assigned to thread. Note! A device can only belong
|
||||
to one thread.
|
||||
Example: /proc/net/pktgen/kpktgend_0
|
||||
|
||||
Running:
|
||||
Stopped: eth4@0
|
||||
Result: OK: add_device=eth4@0
|
||||
|
||||
Most important are the devices assigned to the thread.
|
||||
|
||||
The two basic thread commands are:
|
||||
* add_device DEVICE@NAME -- adds a single device
|
||||
* rem_device_all -- remove all associated devices
|
||||
|
||||
When adding a device to a thread, a corrosponding procfile is created
|
||||
which is used for configuring this device. Thus, device names need to
|
||||
be unique.
|
||||
|
||||
To support adding the same device to multiple threads, which is useful
|
||||
with multi queue NICs, a the device naming scheme is extended with "@":
|
||||
device@something
|
||||
|
||||
The part after "@" can be anything, but it is custom to use the thread
|
||||
number.
|
||||
|
||||
Viewing devices
|
||||
===============
|
||||
|
@ -42,29 +58,32 @@ Viewing devices
|
|||
Parm section holds configured info. Current hold running stats.
|
||||
Result is printed after run or after interruption. Example:
|
||||
|
||||
/proc/net/pktgen/eth1
|
||||
/proc/net/pktgen/eth4@0
|
||||
|
||||
Params: count 10000000 min_pkt_size: 60 max_pkt_size: 60
|
||||
frags: 0 delay: 0 clone_skb: 1000000 ifname: eth1
|
||||
Params: count 100000 min_pkt_size: 60 max_pkt_size: 60
|
||||
frags: 0 delay: 0 clone_skb: 64 ifname: eth4@0
|
||||
flows: 0 flowlen: 0
|
||||
dst_min: 10.10.11.2 dst_max:
|
||||
src_min: src_max:
|
||||
src_mac: 00:00:00:00:00:00 dst_mac: 00:04:23:AC:FD:82
|
||||
udp_src_min: 9 udp_src_max: 9 udp_dst_min: 9 udp_dst_max: 9
|
||||
src_mac_count: 0 dst_mac_count: 0
|
||||
Flags:
|
||||
Current:
|
||||
pkts-sofar: 10000000 errors: 39664
|
||||
started: 1103053986245187us stopped: 1103053999346329us idle: 880401us
|
||||
seq_num: 10000011 cur_dst_mac_offset: 0 cur_src_mac_offset: 0
|
||||
cur_saddr: 0x10a0a0a cur_daddr: 0x20b0a0a
|
||||
cur_udp_dst: 9 cur_udp_src: 9
|
||||
queue_map_min: 0 queue_map_max: 0
|
||||
dst_min: 192.168.81.2 dst_max:
|
||||
src_min: src_max:
|
||||
src_mac: 90:e2:ba:0a:56:b4 dst_mac: 00:1b:21:3c:9d:f8
|
||||
udp_src_min: 9 udp_src_max: 109 udp_dst_min: 9 udp_dst_max: 9
|
||||
src_mac_count: 0 dst_mac_count: 0
|
||||
Flags: UDPSRC_RND NO_TIMESTAMP QUEUE_MAP_CPU
|
||||
Current:
|
||||
pkts-sofar: 100000 errors: 0
|
||||
started: 623913381008us stopped: 623913396439us idle: 25us
|
||||
seq_num: 100001 cur_dst_mac_offset: 0 cur_src_mac_offset: 0
|
||||
cur_saddr: 192.168.8.3 cur_daddr: 192.168.81.2
|
||||
cur_udp_dst: 9 cur_udp_src: 42
|
||||
cur_queue_map:
|
||||
flows: 0
|
||||
Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
|
||||
763292pps 390Mb/sec (390805504bps) errors: 39664
|
||||
Result: OK: 15430(c15405d25) usec, 100000 (60byte,0frags)
|
||||
6480562pps 3110Mb/sec (3110669760bps) errors: 0
|
||||
|
||||
Configuring threads and devices
|
||||
================================
|
||||
|
||||
Configuring devices
|
||||
===================
|
||||
This is done via the /proc interface easiest done via pgset in the scripts
|
||||
|
||||
Examples:
|
||||
|
@ -177,6 +196,8 @@ Note when adding devices to a specific CPU there good idea to also assign
|
|||
/proc/irq/XX/smp_affinity so the TX-interrupts gets bound to the same CPU.
|
||||
as this reduces cache bouncing when freeing skb's.
|
||||
|
||||
Plus using the device flag QUEUE_MAP_CPU, which maps the SKBs TX queue
|
||||
to the running threads CPU (directly from smp_processor_id()).
|
||||
|
||||
Current commands and configuration options
|
||||
==========================================
|
||||
|
|
|
@ -62,11 +62,10 @@ Socket Interface
|
|||
================
|
||||
|
||||
AF_RDS, PF_RDS, SOL_RDS
|
||||
These constants haven't been assigned yet, because RDS isn't in
|
||||
mainline yet. Currently, the kernel module assigns some constant
|
||||
and publishes it to user space through two sysctl files
|
||||
/proc/sys/net/rds/pf_rds
|
||||
/proc/sys/net/rds/sol_rds
|
||||
AF_RDS and PF_RDS are the domain type to be used with socket(2)
|
||||
to create RDS sockets. SOL_RDS is the socket-level to be used
|
||||
with setsockopt(2) and getsockopt(2) for RDS specific socket
|
||||
options.
|
||||
|
||||
fd = socket(PF_RDS, SOCK_SEQPACKET, 0);
|
||||
This creates a new, unbound RDS socket.
|
||||
|
|
|
@ -72,7 +72,6 @@ static struct pinctrl_desc foo_desc = {
|
|||
.name = "foo",
|
||||
.pins = foo_pins,
|
||||
.npins = ARRAY_SIZE(foo_pins),
|
||||
.maxpin = 63,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -166,8 +165,8 @@ static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
|
|||
}
|
||||
|
||||
static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
|
||||
unsigned ** const pins,
|
||||
unsigned * const num_pins)
|
||||
const unsigned **pins,
|
||||
unsigned *num_pins)
|
||||
{
|
||||
*pins = (unsigned *) foo_groups[selector].pins;
|
||||
*num_pins = foo_groups[selector].num_pins;
|
||||
|
@ -1043,7 +1042,7 @@ The semantics of the pinctrl APIs are:
|
|||
|
||||
Usually the pin control core handled the get/put pair and call out to the
|
||||
device drivers bookkeeping operations, like checking available functions and
|
||||
the associated pins, whereas the enable/disable pass on to the pin controller
|
||||
the associated pins, whereas select_state pass on to the pin controller
|
||||
driver which takes care of activating and/or deactivating the mux setting by
|
||||
quickly poking some registers.
|
||||
|
||||
|
@ -1089,8 +1088,9 @@ function, but with different named in the mapping as described under
|
|||
"Advanced mapping" above. So that for an SPI device, we have two states named
|
||||
"pos-A" and "pos-B".
|
||||
|
||||
This snippet first muxes the function in the pins defined by group A, enables
|
||||
it, disables and releases it, and muxes it in on the pins defined by group B:
|
||||
This snippet first initializes a state object for both groups (in foo_probe()),
|
||||
then muxes the function in the pins defined by group A, and finally muxes it in
|
||||
on the pins defined by group B:
|
||||
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 107
|
||||
SUBLEVEL = 112
|
||||
EXTRAVERSION =
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ static int pci_mmap_resource(struct kobject *kobj,
|
|||
if (iomem_is_exclusive(res->start))
|
||||
return -EINVAL;
|
||||
|
||||
pcibios_resource_to_bus(pdev, &bar, res);
|
||||
pcibios_resource_to_bus(pdev->bus, &bar, res);
|
||||
vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0));
|
||||
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
|
||||
|
||||
|
@ -140,7 +140,7 @@ static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num)
|
|||
long dense_offset;
|
||||
unsigned long sparse_size;
|
||||
|
||||
pcibios_resource_to_bus(pdev, &bar, &pdev->resource[num]);
|
||||
pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]);
|
||||
|
||||
/* All core logic chips have 4G sparse address space, except
|
||||
CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM
|
||||
|
|
|
@ -53,6 +53,14 @@ endif
|
|||
|
||||
comma = ,
|
||||
|
||||
#
|
||||
# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
|
||||
# later may result in code being generated that handles signed short and signed
|
||||
# char struct members incorrectly. So disable it.
|
||||
# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
|
||||
#
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
|
||||
|
||||
# This selects which instruction set is used.
|
||||
# Note that GCC does not numerically define an architecture version
|
||||
# macro, but instead defines a whole series of macros which makes
|
||||
|
|
|
@ -208,7 +208,7 @@
|
|||
|
||||
fec: fec@1002b000 {
|
||||
compatible = "fsl,imx27-fec";
|
||||
reg = <0x1002b000 0x4000>;
|
||||
reg = <0x1002b000 0x1000>;
|
||||
interrupts = <50>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -116,7 +116,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
|
|||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
/* When the program starts, a1 contains a pointer to a function to be
|
||||
registered with atexit, as per the SVR4 ABI. A value of 0 means we
|
||||
|
|
|
@ -440,12 +440,17 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
|
|||
*/
|
||||
thumb = handler & 1;
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
/*
|
||||
* Clear the If-Then Thumb-2 execution state
|
||||
* ARM spec requires this to be all 000s in ARM mode
|
||||
* Snapdragon S4/Krait misbehaves on a Thumb=>ARM
|
||||
* signal transition without this.
|
||||
* Clear the If-Then Thumb-2 execution state. ARM spec
|
||||
* requires this to be all 000s in ARM mode. Snapdragon
|
||||
* S4/Krait misbehaves on a Thumb=>ARM signal transition
|
||||
* without this.
|
||||
*
|
||||
* We must do this whenever we are running on a Thumb-2
|
||||
* capable CPU, which includes ARMv6T2. However, we elect
|
||||
* to do this whenever we're on an ARMv6 or later CPU for
|
||||
* simplicity.
|
||||
*/
|
||||
cpsr &= ~PSR_IT_MASK;
|
||||
#endif
|
||||
|
|
|
@ -38,7 +38,7 @@ static inline void at91rm9200_standby(void)
|
|||
" mcr p15, 0, %0, c7, c0, 4\n\t"
|
||||
" str %5, [%1, %2]"
|
||||
:
|
||||
: "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
|
||||
: "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
|
||||
"r" (1), "r" (AT91RM9200_SDRAMC_SRR),
|
||||
"r" (lpr));
|
||||
}
|
||||
|
|
|
@ -718,4 +718,13 @@ config PXA_HAVE_ISA_IRQS
|
|||
config PXA310_ULPI
|
||||
bool
|
||||
|
||||
config PXA_SYSTEMS_CPLDS
|
||||
tristate "Motherboard cplds"
|
||||
default ARCH_LUBBOCK || MACH_MAINSTONE
|
||||
help
|
||||
This driver supports the Lubbock and Mainstone multifunction chip
|
||||
found on the pxa25x development platform system (Lubbock) and pxa27x
|
||||
development platform system (Mainstone). This IO board supports the
|
||||
interrupts handling, ethernet controller, flash chips, etc ...
|
||||
|
||||
endif
|
||||
|
|
|
@ -103,4 +103,5 @@ led-$(CONFIG_ARCH_PXA_IDP) += leds-idp.o
|
|||
|
||||
obj-$(CONFIG_LEDS) += $(led-y)
|
||||
|
||||
obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
|
||||
obj-$(CONFIG_TOSA_BT) += tosa-bt.o
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/i2c/pxa-i2c.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/regulator/machine.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/ads7846.h>
|
||||
#include <linux/spi/corgi_lcd.h>
|
||||
|
@ -711,6 +712,8 @@ static void __init corgi_init(void)
|
|||
sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
|
||||
|
||||
platform_add_devices(devices, ARRAY_SIZE(devices));
|
||||
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
static void __init fixup_corgi(struct tag *tags, char **cmdline,
|
||||
|
|
|
@ -881,6 +881,8 @@ static void __init hx4700_init(void)
|
|||
mdelay(10);
|
||||
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
|
||||
mdelay(10);
|
||||
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
MACHINE_START(H4700, "HP iPAQ HX4700")
|
||||
|
|
|
@ -35,7 +35,9 @@
|
|||
#define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100)
|
||||
|
||||
/* Board specific IRQs */
|
||||
#define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x))
|
||||
#define LUBBOCK_NR_IRQS IRQ_BOARD_START
|
||||
|
||||
#define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x))
|
||||
#define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0)
|
||||
#define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1)
|
||||
#define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */
|
||||
|
@ -45,8 +47,7 @@
|
|||
#define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */
|
||||
#define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6)
|
||||
|
||||
#define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16)
|
||||
#define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55)
|
||||
#define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set);
|
||||
|
|
|
@ -120,7 +120,9 @@
|
|||
#define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */
|
||||
|
||||
/* board specific IRQs */
|
||||
#define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x))
|
||||
#define MAINSTONE_NR_IRQS IRQ_BOARD_START
|
||||
|
||||
#define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x))
|
||||
#define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0)
|
||||
#define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1)
|
||||
#define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2)
|
||||
|
@ -136,6 +138,4 @@
|
|||
#define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14)
|
||||
#define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15)
|
||||
|
||||
#define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/machine.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -120,84 +121,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
|
|||
}
|
||||
EXPORT_SYMBOL(lubbock_set_misc_wr);
|
||||
|
||||
static unsigned long lubbock_irq_enabled;
|
||||
|
||||
static void lubbock_mask_irq(struct irq_data *d)
|
||||
{
|
||||
int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
|
||||
LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
|
||||
}
|
||||
|
||||
static void lubbock_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
|
||||
/* the irq can be acknowledged only if deasserted, so it's done here */
|
||||
LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
|
||||
LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
|
||||
}
|
||||
|
||||
static struct irq_chip lubbock_irq_chip = {
|
||||
.name = "FPGA",
|
||||
.irq_ack = lubbock_mask_irq,
|
||||
.irq_mask = lubbock_mask_irq,
|
||||
.irq_unmask = lubbock_unmask_irq,
|
||||
};
|
||||
|
||||
static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
|
||||
do {
|
||||
/* clear our parent irq */
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
if (likely(pending)) {
|
||||
irq = LUBBOCK_IRQ(0) + __ffs(pending);
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
|
||||
} while (pending);
|
||||
}
|
||||
|
||||
static void __init lubbock_init_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
pxa25x_init_irq();
|
||||
|
||||
/* setup extra lubbock irqs */
|
||||
for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
|
||||
irq_set_chip_and_handler(irq, &lubbock_irq_chip,
|
||||
handle_level_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
|
||||
irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
|
||||
irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static void lubbock_irq_resume(void)
|
||||
{
|
||||
LUB_IRQ_MASK_EN = lubbock_irq_enabled;
|
||||
}
|
||||
|
||||
static struct syscore_ops lubbock_irq_syscore_ops = {
|
||||
.resume = lubbock_irq_resume,
|
||||
};
|
||||
|
||||
static int __init lubbock_irq_device_init(void)
|
||||
{
|
||||
if (machine_is_lubbock()) {
|
||||
register_syscore_ops(&lubbock_irq_syscore_ops);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
device_initcall(lubbock_irq_device_init);
|
||||
|
||||
#endif
|
||||
|
||||
static int lubbock_udc_is_connected(void)
|
||||
{
|
||||
return (LUB_MISC_RD & (1 << 9)) == 0;
|
||||
|
@ -380,11 +303,38 @@ static struct platform_device lubbock_flash_device[2] = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct resource lubbock_cplds_resources[] = {
|
||||
[0] = {
|
||||
.start = LUBBOCK_FPGA_PHYS + 0xc0,
|
||||
.end = LUBBOCK_FPGA_PHYS + 0xe0 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = PXA_GPIO_TO_IRQ(0),
|
||||
.end = PXA_GPIO_TO_IRQ(0),
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
|
||||
},
|
||||
[2] = {
|
||||
.start = LUBBOCK_IRQ(0),
|
||||
.end = LUBBOCK_IRQ(6),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device lubbock_cplds_device = {
|
||||
.name = "pxa_cplds_irqs",
|
||||
.id = -1,
|
||||
.resource = &lubbock_cplds_resources[0],
|
||||
.num_resources = 3,
|
||||
};
|
||||
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
&sa1111_device,
|
||||
&smc91x_device,
|
||||
&lubbock_flash_device[0],
|
||||
&lubbock_flash_device[1],
|
||||
&lubbock_cplds_device,
|
||||
};
|
||||
|
||||
static struct pxafb_mode_info sharp_lm8v31_mode = {
|
||||
|
@ -553,7 +503,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
|
|||
/* Maintainer: MontaVista Software Inc. */
|
||||
.map_io = lubbock_map_io,
|
||||
.nr_irqs = LUBBOCK_NR_IRQS,
|
||||
.init_irq = lubbock_init_irq,
|
||||
.init_irq = pxa25x_init_irq,
|
||||
.handle_irq = pxa25x_handle_irq,
|
||||
.timer = &pxa_timer,
|
||||
.init_machine = lubbock_init,
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/machine.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
@ -120,92 +121,6 @@ static unsigned long mainstone_pin_config[] = {
|
|||
GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
|
||||
};
|
||||
|
||||
static unsigned long mainstone_irq_enabled;
|
||||
|
||||
static void mainstone_mask_irq(struct irq_data *d)
|
||||
{
|
||||
int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
|
||||
MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
|
||||
}
|
||||
|
||||
static void mainstone_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
|
||||
/* the irq can be acknowledged only if deasserted, so it's done here */
|
||||
MST_INTSETCLR &= ~(1 << mainstone_irq);
|
||||
MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
|
||||
}
|
||||
|
||||
static struct irq_chip mainstone_irq_chip = {
|
||||
.name = "FPGA",
|
||||
.irq_ack = mainstone_mask_irq,
|
||||
.irq_mask = mainstone_mask_irq,
|
||||
.irq_unmask = mainstone_unmask_irq,
|
||||
};
|
||||
|
||||
static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
|
||||
do {
|
||||
/* clear useless edge notification */
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
if (likely(pending)) {
|
||||
irq = MAINSTONE_IRQ(0) + __ffs(pending);
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
pending = MST_INTSETCLR & mainstone_irq_enabled;
|
||||
} while (pending);
|
||||
}
|
||||
|
||||
static void __init mainstone_init_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
pxa27x_init_irq();
|
||||
|
||||
/* setup extra Mainstone irqs */
|
||||
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
|
||||
irq_set_chip_and_handler(irq, &mainstone_irq_chip,
|
||||
handle_level_irq);
|
||||
if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
|
||||
else
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
set_irq_flags(MAINSTONE_IRQ(8), 0);
|
||||
set_irq_flags(MAINSTONE_IRQ(12), 0);
|
||||
|
||||
MST_INTMSKENA = 0;
|
||||
MST_INTSETCLR = 0;
|
||||
|
||||
irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
|
||||
irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static void mainstone_irq_resume(void)
|
||||
{
|
||||
MST_INTMSKENA = mainstone_irq_enabled;
|
||||
}
|
||||
|
||||
static struct syscore_ops mainstone_irq_syscore_ops = {
|
||||
.resume = mainstone_irq_resume,
|
||||
};
|
||||
|
||||
static int __init mainstone_irq_device_init(void)
|
||||
{
|
||||
if (machine_is_mainstone())
|
||||
register_syscore_ops(&mainstone_irq_syscore_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(mainstone_irq_device_init);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
static struct resource smc91x_resources[] = {
|
||||
[0] = {
|
||||
.start = (MST_ETH_PHYS + 0x300),
|
||||
|
@ -483,11 +398,37 @@ static struct platform_device mst_gpio_keys_device = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct resource mst_cplds_resources[] = {
|
||||
[0] = {
|
||||
.start = MST_FPGA_PHYS + 0xc0,
|
||||
.end = MST_FPGA_PHYS + 0xe0 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = PXA_GPIO_TO_IRQ(0),
|
||||
.end = PXA_GPIO_TO_IRQ(0),
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
|
||||
},
|
||||
[2] = {
|
||||
.start = MAINSTONE_IRQ(0),
|
||||
.end = MAINSTONE_IRQ(15),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device mst_cplds_device = {
|
||||
.name = "pxa_cplds_irqs",
|
||||
.id = -1,
|
||||
.resource = &mst_cplds_resources[0],
|
||||
.num_resources = 3,
|
||||
};
|
||||
|
||||
static struct platform_device *platform_devices[] __initdata = {
|
||||
&smc91x_device,
|
||||
&mst_flash_device[0],
|
||||
&mst_flash_device[1],
|
||||
&mst_gpio_keys_device,
|
||||
&mst_cplds_device,
|
||||
};
|
||||
|
||||
static struct pxaohci_platform_data mainstone_ohci_platform_data = {
|
||||
|
@ -618,7 +559,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
|
|||
.atag_offset = 0x100, /* BLOB boot parameter setting */
|
||||
.map_io = mainstone_map_io,
|
||||
.nr_irqs = MAINSTONE_NR_IRQS,
|
||||
.init_irq = mainstone_init_irq,
|
||||
.init_irq = pxa27x_init_irq,
|
||||
.handle_irq = pxa27x_handle_irq,
|
||||
.timer = &pxa_timer,
|
||||
.init_machine = mainstone_init,
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/gpio.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c/pxa-i2c.h>
|
||||
#include <linux/regulator/machine.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/ads7846.h>
|
||||
#include <linux/spi/pxa2xx_spi.h>
|
||||
|
@ -452,6 +453,7 @@ static void __init poodle_init(void)
|
|||
pxa_set_i2c_info(NULL);
|
||||
i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
|
||||
poodle_init_spi();
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
static void __init fixup_poodle(struct tag *tags, char **cmdline,
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
* Intel Reference Systems cplds
|
||||
*
|
||||
* Copyright (C) 2014 Robert Jarzmik
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Cplds motherboard driver, supporting lubbock and mainstone SoC board.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/mfd/core.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#define FPGA_IRQ_MASK_EN 0x0
|
||||
#define FPGA_IRQ_SET_CLR 0x10
|
||||
|
||||
#define CPLDS_NB_IRQ 32
|
||||
|
||||
struct cplds {
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
unsigned int irq_mask;
|
||||
struct gpio_desc *gpio0;
|
||||
struct irq_domain *irqdomain;
|
||||
};
|
||||
|
||||
static irqreturn_t cplds_irq_handler(int in_irq, void *d)
|
||||
{
|
||||
struct cplds *fpga = d;
|
||||
unsigned long pending;
|
||||
unsigned int bit;
|
||||
|
||||
pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
|
||||
for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
|
||||
generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void cplds_irq_mask_ack(struct irq_data *d)
|
||||
{
|
||||
struct cplds *fpga = irq_data_get_irq_chip_data(d);
|
||||
unsigned int cplds_irq = irqd_to_hwirq(d);
|
||||
unsigned int set, bit = BIT(cplds_irq);
|
||||
|
||||
fpga->irq_mask &= ~bit;
|
||||
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
|
||||
set = readl(fpga->base + FPGA_IRQ_SET_CLR);
|
||||
writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
|
||||
}
|
||||
|
||||
static void cplds_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct cplds *fpga = irq_data_get_irq_chip_data(d);
|
||||
unsigned int cplds_irq = irqd_to_hwirq(d);
|
||||
unsigned int bit = BIT(cplds_irq);
|
||||
|
||||
fpga->irq_mask |= bit;
|
||||
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
|
||||
}
|
||||
|
||||
static struct irq_chip cplds_irq_chip = {
|
||||
.name = "pxa_cplds",
|
||||
.irq_mask_ack = cplds_irq_mask_ack,
|
||||
.irq_unmask = cplds_irq_unmask,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct cplds *fpga = d->host_data;
|
||||
|
||||
irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, fpga);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops cplds_irq_domain_ops = {
|
||||
.xlate = irq_domain_xlate_twocell,
|
||||
.map = cplds_irq_domain_map,
|
||||
};
|
||||
|
||||
static int cplds_resume(struct platform_device *pdev)
|
||||
{
|
||||
struct cplds *fpga = platform_get_drvdata(pdev);
|
||||
|
||||
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cplds_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res;
|
||||
struct cplds *fpga;
|
||||
int ret;
|
||||
unsigned int base_irq = 0;
|
||||
unsigned long irqflags = 0;
|
||||
|
||||
fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
|
||||
if (!fpga)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (res) {
|
||||
fpga->irq = (unsigned int)res->start;
|
||||
irqflags = res->flags;
|
||||
}
|
||||
if (!fpga->irq)
|
||||
return -ENODEV;
|
||||
|
||||
base_irq = platform_get_irq(pdev, 1);
|
||||
if (base_irq < 0)
|
||||
base_irq = 0;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
fpga->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(fpga->base))
|
||||
return PTR_ERR(fpga->base);
|
||||
|
||||
platform_set_drvdata(pdev, fpga);
|
||||
|
||||
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
|
||||
writel(0, fpga->base + FPGA_IRQ_SET_CLR);
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
|
||||
irqflags, dev_name(&pdev->dev), fpga);
|
||||
if (ret == -ENOSYS)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "couldn't request main irq%d: %d\n",
|
||||
fpga->irq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
irq_set_irq_wake(fpga->irq, 1);
|
||||
fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
|
||||
CPLDS_NB_IRQ,
|
||||
&cplds_irq_domain_ops, fpga);
|
||||
if (!fpga->irqdomain)
|
||||
return -ENODEV;
|
||||
|
||||
if (base_irq) {
|
||||
ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
|
||||
CPLDS_NB_IRQ);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
|
||||
base_irq, base_irq + CPLDS_NB_IRQ);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cplds_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct cplds *fpga = platform_get_drvdata(pdev);
|
||||
|
||||
irq_set_chip_and_handler(fpga->irq, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id cplds_id_table[] = {
|
||||
{ .compatible = "intel,lubbock-cplds-irqs", },
|
||||
{ .compatible = "intel,mainstone-cplds-irqs", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, cplds_id_table);
|
||||
|
||||
static struct platform_driver cplds_driver = {
|
||||
.driver = {
|
||||
.name = "pxa_cplds_irqs",
|
||||
.of_match_table = of_match_ptr(cplds_id_table),
|
||||
},
|
||||
.probe = cplds_probe,
|
||||
.remove = cplds_remove,
|
||||
.resume = cplds_resume,
|
||||
};
|
||||
|
||||
module_platform_driver(cplds_driver);
|
||||
|
||||
MODULE_DESCRIPTION("PXA Cplds interrupts driver");
|
||||
MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -968,6 +968,8 @@ static void __init spitz_init(void)
|
|||
spitz_nor_init();
|
||||
spitz_nand_init();
|
||||
spitz_i2c_init();
|
||||
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
static void __init spitz_fixup(struct tag *tags, char **cmdline,
|
||||
|
|
|
@ -80,6 +80,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
|
|||
/*
|
||||
* Ensure not to come back here if it wasn't intended
|
||||
*/
|
||||
RCSR = RCSR_SMR;
|
||||
PSPR = 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
|
||||
|
||||
#define FLAG_NEED_X_RESET (1 << 0)
|
||||
#define FLAG_IMM_OVERFLOW (1 << 1)
|
||||
|
||||
struct jit_ctx {
|
||||
const struct sk_filter *skf;
|
||||
|
@ -286,6 +287,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
|
|||
/* PC in ARM mode == address of the instruction + 8 */
|
||||
imm = offset - (8 + ctx->idx * 4);
|
||||
|
||||
if (imm & ~0xfff) {
|
||||
/*
|
||||
* literal pool is too far, signal it into flags. we
|
||||
* can only detect it on the second pass unfortunately.
|
||||
*/
|
||||
ctx->flags |= FLAG_IMM_OVERFLOW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return imm;
|
||||
}
|
||||
|
||||
|
@ -817,6 +827,14 @@ b_epilogue:
|
|||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ctx->flags & FLAG_IMM_OVERFLOW)
|
||||
/*
|
||||
* this instruction generated an overflow when
|
||||
* trying to access the literal pool, so
|
||||
* delegate this filter to the kernel interpreter.
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* compute offsets only during the first pass */
|
||||
|
@ -876,7 +894,14 @@ void bpf_jit_compile(struct sk_filter *fp)
|
|||
|
||||
ctx.idx = 0;
|
||||
build_prologue(&ctx);
|
||||
build_body(&ctx);
|
||||
if (build_body(&ctx) < 0) {
|
||||
#if __LINUX_ARM_ARCH__ < 7
|
||||
if (ctx.imm_count)
|
||||
kfree(ctx.imms);
|
||||
#endif
|
||||
module_free(NULL, ctx.target);
|
||||
goto out;
|
||||
}
|
||||
build_epilogue(&ctx);
|
||||
|
||||
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
|
||||
|
|
|
@ -49,7 +49,7 @@ u64 sched_clock(void)
|
|||
return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
|
||||
}
|
||||
|
||||
void time_init(void)
|
||||
void __init time_init(void)
|
||||
{
|
||||
u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
|
||||
|
||||
|
|
|
@ -4,4 +4,34 @@
|
|||
#define __ALIGN .align 4
|
||||
#define __ALIGN_STR ".align 4"
|
||||
|
||||
/*
|
||||
* Make sure the compiler doesn't do anything stupid with the
|
||||
* arguments on the stack - they are owned by the *caller*, not
|
||||
* the callee. This just fools gcc into not spilling into them,
|
||||
* and keeps it from doing tailcall recursion and/or using the
|
||||
* stack slots for temporaries, since they are live and "used"
|
||||
* all the way to the end of the function.
|
||||
*/
|
||||
#define asmlinkage_protect(n, ret, args...) \
|
||||
__asmlinkage_protect##n(ret, ##args)
|
||||
#define __asmlinkage_protect_n(ret, args...) \
|
||||
__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
|
||||
#define __asmlinkage_protect0(ret) \
|
||||
__asmlinkage_protect_n(ret)
|
||||
#define __asmlinkage_protect1(ret, arg1) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1))
|
||||
#define __asmlinkage_protect2(ret, arg1, arg2) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
|
||||
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
|
||||
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4))
|
||||
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4), "m" (arg5))
|
||||
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4), "m" (arg5), "m" (arg6))
|
||||
|
||||
#endif
|
||||
|
|
|
@ -153,8 +153,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|||
* Make sure the buddy is global too (if it's !none,
|
||||
* it better already be global)
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* For SMP, multiple CPUs can race, so we need to do
|
||||
* this atomically.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define LL_INSN "lld"
|
||||
#define SC_INSN "scd"
|
||||
#else /* CONFIG_32BIT */
|
||||
#define LL_INSN "ll"
|
||||
#define SC_INSN "sc"
|
||||
#endif
|
||||
unsigned long page_global = _PAGE_GLOBAL;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n"
|
||||
" .set noreorder\n"
|
||||
"1: " LL_INSN " %[tmp], %[buddy]\n"
|
||||
" bnez %[tmp], 2f\n"
|
||||
" or %[tmp], %[tmp], %[global]\n"
|
||||
" " SC_INSN " %[tmp], %[buddy]\n"
|
||||
" beqz %[tmp], 1b\n"
|
||||
" nop\n"
|
||||
"2:\n"
|
||||
" .set pop"
|
||||
: [buddy] "+m" (buddy->pte),
|
||||
[tmp] "=&r" (tmp)
|
||||
: [global] "r" (page_global));
|
||||
#else /* !CONFIG_SMP */
|
||||
if (pte_none(*buddy))
|
||||
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ void __init init_IRQ(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_STACKOVERFLOW
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
static inline void check_stack_overflow(void)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
|
|
@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
unsigned int real_len;
|
||||
cpumask_t mask;
|
||||
cpumask_t allowed, mask;
|
||||
int retval;
|
||||
struct task_struct *p;
|
||||
|
||||
|
@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
|
||||
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
||||
cpumask_and(&mask, &allowed, cpu_active_mask);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
|
|
|
@ -71,7 +71,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
|||
else
|
||||
#endif
|
||||
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
|
||||
dma_flag = __GFP_DMA;
|
||||
else
|
||||
#endif
|
||||
|
|
|
@ -31,6 +31,8 @@ LEAF(swsusp_arch_suspend)
|
|||
END(swsusp_arch_suspend)
|
||||
|
||||
LEAF(swsusp_arch_resume)
|
||||
/* Avoid TLB mismatch during and after kernel resume */
|
||||
jal local_flush_tlb_all
|
||||
PTR_L t0, restore_pblist
|
||||
0:
|
||||
PTR_L t1, PBE_ADDRESS(t0) /* source */
|
||||
|
@ -44,7 +46,6 @@ LEAF(swsusp_arch_resume)
|
|||
bne t1, t3, 1b
|
||||
PTR_L t0, PBE_NEXT(t0)
|
||||
bnez t0, 0b
|
||||
jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
|
||||
PTR_LA t0, saved_regs
|
||||
PTR_L ra, PT_R31(t0)
|
||||
PTR_L sp, PT_R29(t0)
|
||||
|
|
|
@ -50,6 +50,7 @@ ethernet@b0000 {
|
|||
fsl,num_tx_queues = <0x8>;
|
||||
fsl,magic-packet;
|
||||
local-mac-address = [ 00 00 00 00 00 00 ];
|
||||
ranges;
|
||||
|
||||
queue-group@b0000 {
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -50,6 +50,7 @@ ethernet@b1000 {
|
|||
fsl,num_tx_queues = <0x8>;
|
||||
fsl,magic-packet;
|
||||
local-mac-address = [ 00 00 00 00 00 00 ];
|
||||
ranges;
|
||||
|
||||
queue-group@b1000 {
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -49,6 +49,7 @@ ethernet@b2000 {
|
|||
fsl,num_tx_queues = <0x8>;
|
||||
fsl,magic-packet;
|
||||
local-mac-address = [ 00 00 00 00 00 00 ];
|
||||
ranges;
|
||||
|
||||
queue-group@b2000 {
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -253,6 +253,7 @@ extern void rtas_power_off(void);
|
|||
extern void rtas_halt(void);
|
||||
extern void rtas_os_term(char *str);
|
||||
extern int rtas_get_sensor(int sensor, int index, int *state);
|
||||
extern int rtas_get_sensor_fast(int sensor, int index, int *state);
|
||||
extern int rtas_get_power_level(int powerdomain, int *level);
|
||||
extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
|
||||
extern bool rtas_indicator_present(int token, int *maxindex);
|
||||
|
|
|
@ -62,11 +62,21 @@ struct cache_type_info {
|
|||
};
|
||||
|
||||
/* These are used to index the cache_type_info array. */
|
||||
#define CACHE_TYPE_UNIFIED 0
|
||||
#define CACHE_TYPE_INSTRUCTION 1
|
||||
#define CACHE_TYPE_DATA 2
|
||||
#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
|
||||
#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
|
||||
#define CACHE_TYPE_INSTRUCTION 2
|
||||
#define CACHE_TYPE_DATA 3
|
||||
|
||||
static const struct cache_type_info cache_type_info[] = {
|
||||
{
|
||||
/* Embedded systems that use cache-size, cache-block-size,
|
||||
* etc. for the Unified (typically L2) cache. */
|
||||
.name = "Unified",
|
||||
.size_prop = "cache-size",
|
||||
.line_size_props = { "cache-line-size",
|
||||
"cache-block-size", },
|
||||
.nr_sets_prop = "cache-sets",
|
||||
},
|
||||
{
|
||||
/* PowerPC Processor binding says the [di]-cache-*
|
||||
* must be equal on unified caches, so just use
|
||||
|
@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
|
|||
{
|
||||
struct cache *iter;
|
||||
|
||||
if (cache->type == CACHE_TYPE_UNIFIED)
|
||||
if (cache->type == CACHE_TYPE_UNIFIED ||
|
||||
cache->type == CACHE_TYPE_UNIFIED_D)
|
||||
return cache;
|
||||
|
||||
list_for_each_entry(iter, &cache_list, list)
|
||||
|
@ -324,15 +335,27 @@ static bool cache_node_is_unified(const struct device_node *np)
|
|||
return of_get_property(np, "cache-unified", NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unified caches can have two different sets of tags. Most embedded
|
||||
* use cache-size, etc. for the unified cache size, but open firmware systems
|
||||
* use d-cache-size, etc. Check on initialization for which type we have, and
|
||||
* return the appropriate structure type. Assume it's embedded if it isn't
|
||||
* open firmware. If it's yet a 3rd type, then there will be missing entries
|
||||
* in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
|
||||
* to be extended further.
|
||||
*/
|
||||
static int cache_is_unified_d(const struct device_node *np)
|
||||
{
|
||||
return of_get_property(np,
|
||||
cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
|
||||
CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
|
||||
{
|
||||
struct cache *cache;
|
||||
|
||||
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
|
||||
|
||||
cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
|
||||
|
||||
return cache;
|
||||
return new_cache(cache_is_unified_d(node), level, node);
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
|
||||
|
|
|
@ -111,7 +111,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
|
|||
res->name = pci_name(dev);
|
||||
region.start = base;
|
||||
region.end = base + size - 1;
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -276,7 +276,7 @@ void __devinit of_scan_pci_bridge(struct pci_dev *dev)
|
|||
res->flags = flags;
|
||||
region.start = of_read_number(&ranges[1], 2);
|
||||
region.end = region.start + size - 1;
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
}
|
||||
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
|
||||
bus->number);
|
||||
|
|
|
@ -585,6 +585,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
|
|||
}
|
||||
EXPORT_SYMBOL(rtas_get_sensor);
|
||||
|
||||
int rtas_get_sensor_fast(int sensor, int index, int *state)
|
||||
{
|
||||
int token = rtas_token("get-sensor-state");
|
||||
int rc;
|
||||
|
||||
if (token == RTAS_UNKNOWN_SERVICE)
|
||||
return -ENOENT;
|
||||
|
||||
rc = rtas_call(token, 2, 2, state, sensor, index);
|
||||
WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
|
||||
rc <= RTAS_EXTENDED_DELAY_MAX));
|
||||
|
||||
if (rc < 0)
|
||||
return rtas_error_rc(rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool rtas_indicator_present(int token, int *maxindex)
|
||||
{
|
||||
int proplen, count, i;
|
||||
|
@ -1025,6 +1042,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
|
|||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!rtas.entry)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -213,6 +213,7 @@ SECTIONS
|
|||
*(.opd)
|
||||
}
|
||||
|
||||
. = ALIGN(256);
|
||||
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
||||
__toc_start = .;
|
||||
*(.got)
|
||||
|
|
|
@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
|||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
for (;;) {
|
||||
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||
fp = (unsigned long __user *) sp;
|
||||
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
||||
return;
|
||||
|
|
|
@ -392,7 +392,7 @@ static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
|
|||
|
||||
/* Setup IO segments */
|
||||
if (io_res.start < io_res.end) {
|
||||
pcibios_resource_to_bus(dev, ®ion, &io_res);
|
||||
pcibios_resource_to_bus(dev->bus, ®ion, &io_res);
|
||||
pos = region.start;
|
||||
i = pos / phb->ioda.io_segsize;
|
||||
while(i < phb->ioda.total_pe && pos <= region.end) {
|
||||
|
@ -422,7 +422,7 @@ static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
|
|||
|
||||
/* Setup M32 segments */
|
||||
if (m32_res.start < m32_res.end) {
|
||||
pcibios_resource_to_bus(dev, ®ion, &m32_res);
|
||||
pcibios_resource_to_bus(dev->bus, ®ion, &m32_res);
|
||||
pos = region.start;
|
||||
i = pos / phb->ioda.m32_segsize;
|
||||
while(i < phb->ioda.total_pe && pos <= region.end) {
|
||||
|
|
|
@ -137,6 +137,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
|||
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
struct msi_desc *entry;
|
||||
irq_hw_number_t hwirq;
|
||||
|
||||
if (WARN_ON(!phb))
|
||||
return;
|
||||
|
@ -144,9 +145,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
|||
list_for_each_entry(entry, &pdev->msi_list, list) {
|
||||
if (entry->irq == NO_IRQ)
|
||||
continue;
|
||||
hwirq = virq_to_hw(entry->irq);
|
||||
irq_set_msi_desc(entry->irq, NULL);
|
||||
pnv_put_msi(phb, virq_to_hw(entry->irq));
|
||||
irq_dispose_mapping(entry->irq);
|
||||
pnv_put_msi(phb, hwirq);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PCI_MSI */
|
||||
|
|
|
@ -416,6 +416,12 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
|
|||
goto out;
|
||||
}
|
||||
|
||||
rc = dlpar_acquire_drc(drc_index);
|
||||
if (rc) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dn = dlpar_configure_connector(drc_index);
|
||||
if (!dn) {
|
||||
rc = -EINVAL;
|
||||
|
@ -436,13 +442,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
|
|||
kfree(dn->full_name);
|
||||
dn->full_name = cpu_name;
|
||||
|
||||
rc = dlpar_acquire_drc(drc_index);
|
||||
if (rc) {
|
||||
dlpar_free_cc_nodes(dn);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = dlpar_attach_node(dn);
|
||||
if (rc) {
|
||||
dlpar_release_drc(drc_index);
|
||||
|
|
|
@ -187,7 +187,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
|
|||
int state;
|
||||
int critical;
|
||||
|
||||
status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
|
||||
status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
|
||||
&state);
|
||||
|
||||
if (state > 3)
|
||||
critical = 1; /* Time Critical */
|
||||
|
|
|
@ -155,7 +155,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
|
|||
}
|
||||
|
||||
*kaddr = (void *)(bank->ph_addr + offset);
|
||||
*pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
|
||||
*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -108,15 +108,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
|
|||
{
|
||||
struct msi_desc *entry;
|
||||
struct fsl_msi *msi_data;
|
||||
irq_hw_number_t hwirq;
|
||||
|
||||
list_for_each_entry(entry, &pdev->msi_list, list) {
|
||||
if (entry->irq == NO_IRQ)
|
||||
continue;
|
||||
hwirq = virq_to_hw(entry->irq);
|
||||
msi_data = irq_get_chip_data(entry->irq);
|
||||
irq_set_msi_desc(entry->irq, NULL);
|
||||
msi_bitmap_free_hwirqs(&msi_data->bitmap,
|
||||
virq_to_hw(entry->irq), 1);
|
||||
irq_dispose_mapping(entry->irq);
|
||||
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
|
@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
|
|||
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
irq_hw_number_t hwirq;
|
||||
|
||||
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
|
||||
|
||||
|
@ -81,10 +82,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
|||
if (entry->irq == NO_IRQ)
|
||||
continue;
|
||||
|
||||
hwirq = virq_to_hw(entry->irq);
|
||||
irq_set_msi_desc(entry->irq, NULL);
|
||||
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
||||
virq_to_hw(entry->irq), ALLOC_CHUNK);
|
||||
irq_dispose_mapping(entry->irq);
|
||||
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
|
@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
|
|||
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
irq_hw_number_t hwirq;
|
||||
|
||||
list_for_each_entry(entry, &pdev->msi_list, list) {
|
||||
if (entry->irq == NO_IRQ)
|
||||
continue;
|
||||
|
||||
hwirq = virq_to_hw(entry->irq);
|
||||
irq_set_msi_desc(entry->irq, NULL);
|
||||
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
||||
virq_to_hw(entry->irq), 1);
|
||||
irq_dispose_mapping(entry->irq);
|
||||
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
|
@ -114,16 +114,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
|
|||
{
|
||||
struct msi_desc *entry;
|
||||
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
|
||||
irq_hw_number_t hwirq;
|
||||
|
||||
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
if (entry->irq == NO_IRQ)
|
||||
continue;
|
||||
hwirq = virq_to_hw(entry->irq);
|
||||
irq_set_msi_desc(entry->irq, NULL);
|
||||
msi_bitmap_free_hwirqs(&msi_data->bitmap,
|
||||
virq_to_hw(entry->irq), 1);
|
||||
irq_dispose_mapping(entry->irq);
|
||||
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,11 +16,12 @@
|
|||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
struct ghash_ctx {
|
||||
u8 icv[16];
|
||||
u8 key[16];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
u8 icv[GHASH_BLOCK_SIZE];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
u8 buffer[GHASH_BLOCK_SIZE];
|
||||
u32 bytes;
|
||||
};
|
||||
|
@ -28,8 +29,10 @@ struct ghash_desc_ctx {
|
|||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
memset(dctx, 0, sizeof(*dctx));
|
||||
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||
}
|
||||
|
||||
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
||||
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
|
|||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
unsigned int n;
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
|
@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
src += n;
|
||||
|
||||
if (!dctx->bytes) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
|
||||
GHASH_BLOCK_SIZE);
|
||||
BUG_ON(ret != GHASH_BLOCK_SIZE);
|
||||
}
|
||||
|
@ -78,7 +79,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
|
||||
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
|
||||
if (n) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
|
||||
BUG_ON(ret != n);
|
||||
src += n;
|
||||
srclen -= n;
|
||||
|
@ -92,7 +93,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
static int ghash_flush(struct ghash_desc_ctx *dctx)
|
||||
{
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
|
@ -102,20 +103,19 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|||
|
||||
memset(pos, 0, dctx->bytes);
|
||||
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
|
||||
BUG_ON(ret != GHASH_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
dctx->bytes = 0;
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
ghash_flush(ctx, dctx);
|
||||
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
||||
ghash_flush(dctx);
|
||||
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ asmlinkage void execve_tail(void)
|
|||
{
|
||||
current->thread.fp_regs.fpc = 0;
|
||||
if (MACHINE_HAS_IEEE)
|
||||
asm volatile("sfpc %0,%0" : : "d" (0));
|
||||
asm volatile("sfpc %0" : : "d" (0));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
|
|||
jno .Lesa2
|
||||
ahi %r15,-80
|
||||
stmh %r6,%r15,96(%r15) # store upper register halves
|
||||
basr %r13,0
|
||||
lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
|
||||
.Lesa2:
|
||||
#endif
|
||||
lr %r10,%r2 # save string pointer
|
||||
|
@ -293,6 +295,8 @@ ENTRY(_sclp_print_early)
|
|||
#endif
|
||||
lm %r6,%r15,120(%r15) # restore registers
|
||||
br %r14
|
||||
.Lzeroes:
|
||||
.fill 64,4,0
|
||||
|
||||
.LwritedataS4:
|
||||
.long 0x00760005 # SCLP command for write data
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include <linux/pfn.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
|
||||
/*
|
||||
|
@ -137,6 +139,8 @@ int pfn_is_nosave(unsigned long pfn)
|
|||
{
|
||||
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
|
||||
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
|
||||
unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
|
||||
unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
|
||||
|
||||
/* Always save lowcore pages (LC protection might be enabled). */
|
||||
if (pfn <= LC_PAGES)
|
||||
|
@ -144,6 +148,8 @@ int pfn_is_nosave(unsigned long pfn)
|
|||
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
|
||||
return 1;
|
||||
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
|
||||
if (pfn >= stext_pfn && pfn <= eshared_pfn)
|
||||
return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
|
||||
if (tprot(PFN_PHYS(pfn)))
|
||||
return 1;
|
||||
return 0;
|
||||
|
|
|
@ -347,7 +347,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.sie_block->ecb = 6;
|
||||
vcpu->arch.sie_block->eca = 0xC1002001U;
|
||||
vcpu->arch.sie_block->fac = (int) (long) facilities;
|
||||
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
|
||||
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
|
||||
(unsigned long) vcpu);
|
||||
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
|
||||
|
|
|
@ -218,6 +218,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
|||
for (n = mem->count - 1; n > 0 ; n--)
|
||||
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
|
||||
|
||||
memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
|
||||
mem->vm[0].cpus_total = cpus;
|
||||
mem->vm[0].cpus_configured = cpus;
|
||||
mem->vm[0].cpus_standby = 0;
|
||||
|
|
|
@ -409,7 +409,7 @@ static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
|
|||
res2.flags = res->flags;
|
||||
region.start = base;
|
||||
region.end = limit + 0xfff;
|
||||
pcibios_bus_to_resource(dev, &res2, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, &res2, ®ion);
|
||||
if (!res->start)
|
||||
res->start = res2.start;
|
||||
if (!res->end)
|
||||
|
@ -427,7 +427,7 @@ static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
|
|||
IORESOURCE_MEM);
|
||||
region.start = base;
|
||||
region.end = limit + 0xfffff;
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
}
|
||||
|
||||
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
|
||||
|
@ -458,7 +458,7 @@ static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
|
|||
IORESOURCE_MEM | IORESOURCE_PREFETCH);
|
||||
region.start = base;
|
||||
region.end = limit + 0xfffff;
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -480,7 +480,7 @@ static void __devinit apb_fake_ranges(struct pci_dev *dev,
|
|||
res->flags = IORESOURCE_IO;
|
||||
region.start = (first << 21);
|
||||
region.end = (last << 21) + ((1 << 21) - 1);
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
|
||||
pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
|
||||
apb_calc_first_last(map, &first, &last);
|
||||
|
@ -488,7 +488,7 @@ static void __devinit apb_fake_ranges(struct pci_dev *dev,
|
|||
res->flags = IORESOURCE_MEM;
|
||||
region.start = (first << 29);
|
||||
region.end = (last << 29) + ((1 << 29) - 1);
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
}
|
||||
|
||||
static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
|
||||
|
@ -579,7 +579,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
|
|||
res->flags = flags;
|
||||
region.start = GET_64BIT(ranges, 1);
|
||||
region.end = region.start + size - 1;
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
pcibios_bus_to_resource(dev->bus, res, ®ion);
|
||||
}
|
||||
after_ranges:
|
||||
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
|
||||
|
|
|
@ -972,7 +972,7 @@ static void __init load_hv_initrd(void)
|
|||
|
||||
void __init free_initrd_mem(unsigned long begin, unsigned long end)
|
||||
{
|
||||
free_bootmem(__pa(begin), end - begin);
|
||||
free_bootmem_late(__pa(begin), end - begin);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -131,7 +131,7 @@ config SBUS
|
|||
bool
|
||||
|
||||
config NEED_DMA_MAP_STATE
|
||||
def_bool (X86_64 || INTEL_IOMMU || DMA_API_DEBUG)
|
||||
def_bool (X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB)
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
def_bool y
|
||||
|
|
|
@ -1203,7 +1203,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
|||
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
assoc = (src + req->cryptlen + auth_tag_len);
|
||||
assoc = (src + req->cryptlen);
|
||||
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
|
||||
scatterwalk_map_and_copy(assoc, req->assoc, 0,
|
||||
req->assoclen, 0);
|
||||
|
@ -1228,7 +1228,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
|||
scatterwalk_done(&src_sg_walk, 0, 0);
|
||||
scatterwalk_done(&assoc_sg_walk, 0, 0);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
|
||||
scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
|
||||
kfree(src);
|
||||
}
|
||||
return retval;
|
||||
|
|
|
@ -292,6 +292,7 @@ static struct ahash_alg ghash_async_alg = {
|
|||
.cra_name = "ghash",
|
||||
.cra_driver_name = "ghash-clmulni",
|
||||
.cra_priority = 400,
|
||||
.cra_ctxsize = sizeof(struct ghash_async_ctx),
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||
.cra_type = &crypto_ahash_type,
|
||||
|
|
|
@ -279,21 +279,6 @@ static inline void clear_LDT(void)
|
|||
set_ldt(NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* load one particular LDT into the current CPU
|
||||
*/
|
||||
static inline void load_LDT_nolock(mm_context_t *pc)
|
||||
{
|
||||
set_ldt(pc->ldt, pc->size);
|
||||
}
|
||||
|
||||
static inline void load_LDT(mm_context_t *pc)
|
||||
{
|
||||
preempt_disable();
|
||||
load_LDT_nolock(pc);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline unsigned long get_desc_base(const struct desc_struct *desc)
|
||||
{
|
||||
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
|
||||
|
|
|
@ -23,8 +23,32 @@ extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
|
|||
extern void math_state_restore(void);
|
||||
|
||||
extern bool irq_fpu_usable(void);
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
|
||||
/*
|
||||
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled
|
||||
* and they don't touch the preempt state on their own.
|
||||
* If you enable preemption after __kernel_fpu_begin(), preempt notifier
|
||||
* should call the __kernel_fpu_end() to prevent the kernel/user FPU
|
||||
* state from getting corrupted. KVM for example uses this model.
|
||||
*
|
||||
* All other cases use kernel_fpu_begin/end() which disable preemption
|
||||
* during kernel FPU usage.
|
||||
*/
|
||||
extern void __kernel_fpu_begin(void);
|
||||
extern void __kernel_fpu_end(void);
|
||||
|
||||
static inline void kernel_fpu_begin(void)
|
||||
{
|
||||
WARN_ON_ONCE(!irq_fpu_usable());
|
||||
preempt_disable();
|
||||
__kernel_fpu_begin();
|
||||
}
|
||||
|
||||
static inline void kernel_fpu_end(void)
|
||||
{
|
||||
__kernel_fpu_end();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Some instructions like VIA's padlock instructions generate a spurious
|
||||
|
|
|
@ -79,11 +79,12 @@ struct iommu_table_entry {
|
|||
* d). Similar to the 'init', except that this gets called from pci_iommu_init
|
||||
* where we do have a memory allocator.
|
||||
*
|
||||
* The standard vs the _FINISH differs in that the _FINISH variant will
|
||||
* continue detecting other IOMMUs in the call list after the
|
||||
* the detection routine returns a positive number. The _FINISH will
|
||||
* stop the execution chain. Both will still call the 'init' and
|
||||
* 'late_init' functions if they are set.
|
||||
* The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
|
||||
* in that the former will continue detecting other IOMMUs in the call
|
||||
* list after the detection routine returns a positive number, while the
|
||||
* latter will stop the execution chain upon first successful detection.
|
||||
* Both variants will still call the 'init' and 'late_init' functions if
|
||||
* they are set.
|
||||
*/
|
||||
#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \
|
||||
__IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
|
||||
|
|
|
@ -87,6 +87,7 @@
|
|||
#define GP_VECTOR 13
|
||||
#define PF_VECTOR 14
|
||||
#define MF_VECTOR 16
|
||||
#define AC_VECTOR 17
|
||||
#define MC_VECTOR 18
|
||||
|
||||
#define SELECTOR_TI_MASK (1 << 2)
|
||||
|
@ -509,7 +510,7 @@ struct kvm_arch {
|
|||
struct kvm_pic *vpic;
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_pit *vpit;
|
||||
int vapics_in_nmi_mode;
|
||||
atomic_t vapics_in_nmi_mode;
|
||||
|
||||
unsigned int tss_addr;
|
||||
struct page *apic_access_page;
|
||||
|
|
|
@ -9,8 +9,7 @@
|
|||
* we put the segment information here.
|
||||
*/
|
||||
typedef struct {
|
||||
void *ldt;
|
||||
int size;
|
||||
struct ldt_struct *ldt;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* True if mm supports a task running in 32 bit compatibility mode. */
|
||||
|
|
|
@ -15,6 +15,51 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|||
}
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
/*
|
||||
* ldt_structs can be allocated, used, and freed, but they are never
|
||||
* modified while live.
|
||||
*/
|
||||
struct ldt_struct {
|
||||
/*
|
||||
* Xen requires page-aligned LDTs with special permissions. This is
|
||||
* needed to prevent us from installing evil descriptors such as
|
||||
* call gates. On native, we could merge the ldt_struct and LDT
|
||||
* allocations, but it's not worth trying to optimize.
|
||||
*/
|
||||
struct desc_struct *entries;
|
||||
int size;
|
||||
};
|
||||
|
||||
static inline void load_mm_ldt(struct mm_struct *mm)
|
||||
{
|
||||
struct ldt_struct *ldt;
|
||||
|
||||
/* smp_read_barrier_depends synchronizes with barrier in install_ldt */
|
||||
ldt = ACCESS_ONCE(mm->context.ldt);
|
||||
smp_read_barrier_depends();
|
||||
|
||||
/*
|
||||
* Any change to mm->context.ldt is followed by an IPI to all
|
||||
* CPUs with the mm active. The LDT will not be freed until
|
||||
* after the IPI is handled by all such CPUs. This means that,
|
||||
* if the ldt_struct changes before we return, the values we see
|
||||
* will be safe, and the new values will be loaded before we run
|
||||
* any user code.
|
||||
*
|
||||
* NB: don't try to convert this to use RCU without extreme care.
|
||||
* We would still need IRQs off, because we don't want to change
|
||||
* the local LDT after an IPI loaded a newer value than the one
|
||||
* that we can see.
|
||||
*/
|
||||
|
||||
if (unlikely(ldt))
|
||||
set_ldt(ldt->entries, ldt->size);
|
||||
else
|
||||
clear_LDT();
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(preemptible());
|
||||
}
|
||||
|
||||
/*
|
||||
* Used for LDT copy/destruction.
|
||||
*/
|
||||
|
@ -52,7 +97,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
* load the LDT, if the LDT is different:
|
||||
*/
|
||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||
load_LDT_nolock(&next->context);
|
||||
load_mm_ldt(next);
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
|
@ -65,7 +110,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
* to make sure to use no freed page tables.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
load_LDT_nolock(&next->context);
|
||||
load_mm_ldt(next);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -166,6 +166,7 @@
|
|||
/* C1E active bits in int pending message */
|
||||
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
|
||||
#define MSR_K8_TSEG_ADDR 0xc0010112
|
||||
#define MSR_K8_TSEG_MASK 0xc0010113
|
||||
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
|
||||
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
|
||||
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
|
||||
|
|
|
@ -72,4 +72,6 @@ static inline bool xen_x2apic_para_available(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern void xen_set_iopl_mask(unsigned mask);
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
|
|
@ -1254,7 +1254,7 @@ void __cpuinit cpu_init(void)
|
|||
load_sp0(t, ¤t->thread);
|
||||
set_tss_desc(cpu, t);
|
||||
load_TR_desc();
|
||||
load_LDT(&init_mm.context);
|
||||
load_mm_ldt(&init_mm);
|
||||
|
||||
clear_all_debug_regs();
|
||||
dbg_restore_debug_regs();
|
||||
|
@ -1302,7 +1302,7 @@ void __cpuinit cpu_init(void)
|
|||
load_sp0(t, thread);
|
||||
set_tss_desc(cpu, t);
|
||||
load_TR_desc();
|
||||
load_LDT(&init_mm.context);
|
||||
load_mm_ldt(&init_mm);
|
||||
|
||||
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
|
||||
|
||||
|
|
|
@ -652,11 +652,14 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
|
|||
static int mce_no_way_out(struct mce *m, char **msg)
|
||||
{
|
||||
int i;
|
||||
char *tmp;
|
||||
|
||||
for (i = 0; i < banks; i++) {
|
||||
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
|
||||
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
|
||||
if (mce_severity(m, tolerant, &tmp) >= MCE_PANIC_SEVERITY) {
|
||||
*msg = tmp;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -414,11 +414,14 @@ ENTRY(ret_from_fork)
|
|||
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
|
||||
jz retint_restore_args
|
||||
|
||||
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
|
||||
jnz int_ret_from_sys_call
|
||||
|
||||
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
|
||||
jmp ret_from_sys_call # go to the SYSRET fastpath
|
||||
/*
|
||||
* By the time we get here, we have no idea whether our pt_regs,
|
||||
* ti flags, and ti status came from the 64-bit SYSCALL fast path,
|
||||
* the slow path, or one of the ia32entry paths.
|
||||
* Use int_ret_from_sys_call to return, since it can safely handle
|
||||
* all of the above.
|
||||
*/
|
||||
jmp int_ret_from_sys_call
|
||||
|
||||
CFI_ENDPROC
|
||||
END(ret_from_fork)
|
||||
|
|
|
@ -77,29 +77,26 @@ bool irq_fpu_usable(void)
|
|||
}
|
||||
EXPORT_SYMBOL(irq_fpu_usable);
|
||||
|
||||
void kernel_fpu_begin(void)
|
||||
void __kernel_fpu_begin(void)
|
||||
{
|
||||
struct task_struct *me = current;
|
||||
|
||||
WARN_ON_ONCE(!irq_fpu_usable());
|
||||
preempt_disable();
|
||||
if (__thread_has_fpu(me)) {
|
||||
__save_init_fpu(me);
|
||||
__thread_clear_has_fpu(me);
|
||||
/* We do 'stts()' in kernel_fpu_end() */
|
||||
/* We do 'stts()' in __kernel_fpu_end() */
|
||||
} else {
|
||||
percpu_write(fpu_owner_task, NULL);
|
||||
clts();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_fpu_begin);
|
||||
EXPORT_SYMBOL(__kernel_fpu_begin);
|
||||
|
||||
void kernel_fpu_end(void)
|
||||
void __kernel_fpu_end(void)
|
||||
{
|
||||
stts();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_fpu_end);
|
||||
EXPORT_SYMBOL(__kernel_fpu_end);
|
||||
|
||||
void unlazy_fpu(struct task_struct *tsk)
|
||||
{
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
|
@ -20,82 +21,87 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* context.lock is held for us, so we don't need any locking. */
|
||||
static void flush_ldt(void *current_mm)
|
||||
{
|
||||
if (current->active_mm == current_mm)
|
||||
load_LDT(¤t->active_mm->context);
|
||||
mm_context_t *pc;
|
||||
|
||||
if (current->active_mm != current_mm)
|
||||
return;
|
||||
|
||||
pc = ¤t->active_mm->context;
|
||||
set_ldt(pc->ldt->entries, pc->ldt->size);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
||||
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
|
||||
static struct ldt_struct *alloc_ldt_struct(int size)
|
||||
{
|
||||
void *oldldt, *newldt;
|
||||
int oldsize;
|
||||
struct ldt_struct *new_ldt;
|
||||
int alloc_size;
|
||||
|
||||
if (mincount <= pc->size)
|
||||
return 0;
|
||||
oldsize = pc->size;
|
||||
mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
|
||||
(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
|
||||
if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||
newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
|
||||
if (size > LDT_ENTRIES)
|
||||
return NULL;
|
||||
|
||||
new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
|
||||
if (!new_ldt)
|
||||
return NULL;
|
||||
|
||||
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
|
||||
alloc_size = size * LDT_ENTRY_SIZE;
|
||||
|
||||
/*
|
||||
* Xen is very picky: it requires a page-aligned LDT that has no
|
||||
* trailing nonzero bytes in any page that contains LDT descriptors.
|
||||
* Keep it simple: zero the whole allocation and never allocate less
|
||||
* than PAGE_SIZE.
|
||||
*/
|
||||
if (alloc_size > PAGE_SIZE)
|
||||
new_ldt->entries = vzalloc(alloc_size);
|
||||
else
|
||||
newldt = (void *)__get_free_page(GFP_KERNEL);
|
||||
new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
|
||||
if (!newldt)
|
||||
return -ENOMEM;
|
||||
|
||||
if (oldsize)
|
||||
memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
|
||||
oldldt = pc->ldt;
|
||||
memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
|
||||
(mincount - oldsize) * LDT_ENTRY_SIZE);
|
||||
|
||||
paravirt_alloc_ldt(newldt, mincount);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* CHECKME: Do we really need this ? */
|
||||
wmb();
|
||||
#endif
|
||||
pc->ldt = newldt;
|
||||
wmb();
|
||||
pc->size = mincount;
|
||||
wmb();
|
||||
|
||||
if (reload) {
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
load_LDT(pc);
|
||||
if (!cpumask_equal(mm_cpumask(current->mm),
|
||||
cpumask_of(smp_processor_id())))
|
||||
smp_call_function(flush_ldt, current->mm, 1);
|
||||
preempt_enable();
|
||||
#else
|
||||
load_LDT(pc);
|
||||
#endif
|
||||
if (!new_ldt->entries) {
|
||||
kfree(new_ldt);
|
||||
return NULL;
|
||||
}
|
||||
if (oldsize) {
|
||||
paravirt_free_ldt(oldldt, oldsize);
|
||||
if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||
vfree(oldldt);
|
||||
else
|
||||
put_page(virt_to_page(oldldt));
|
||||
}
|
||||
return 0;
|
||||
|
||||
new_ldt->size = size;
|
||||
return new_ldt;
|
||||
}
|
||||
|
||||
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
||||
/* After calling this, the LDT is immutable. */
|
||||
static void finalize_ldt_struct(struct ldt_struct *ldt)
|
||||
{
|
||||
int err = alloc_ldt(new, old->size, 0);
|
||||
int i;
|
||||
paravirt_alloc_ldt(ldt->entries, ldt->size);
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* context.lock is held */
|
||||
static void install_ldt(struct mm_struct *current_mm,
|
||||
struct ldt_struct *ldt)
|
||||
{
|
||||
/* Synchronizes with smp_read_barrier_depends in load_mm_ldt. */
|
||||
barrier();
|
||||
ACCESS_ONCE(current_mm->context.ldt) = ldt;
|
||||
|
||||
for (i = 0; i < old->size; i++)
|
||||
write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
|
||||
return 0;
|
||||
/* Activate the LDT for all CPUs using current_mm. */
|
||||
smp_call_function_many(mm_cpumask(current_mm), flush_ldt, current_mm,
|
||||
true);
|
||||
local_irq_disable();
|
||||
flush_ldt(current_mm);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static void free_ldt_struct(struct ldt_struct *ldt)
|
||||
{
|
||||
if (likely(!ldt))
|
||||
return;
|
||||
|
||||
paravirt_free_ldt(ldt->entries, ldt->size);
|
||||
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||
vfree(ldt->entries);
|
||||
else
|
||||
kfree(ldt->entries);
|
||||
kfree(ldt);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -104,17 +110,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
|||
*/
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
struct ldt_struct *new_ldt;
|
||||
struct mm_struct *old_mm;
|
||||
int retval = 0;
|
||||
|
||||
mutex_init(&mm->context.lock);
|
||||
mm->context.size = 0;
|
||||
old_mm = current->mm;
|
||||
if (old_mm && old_mm->context.size > 0) {
|
||||
mutex_lock(&old_mm->context.lock);
|
||||
retval = copy_ldt(&mm->context, &old_mm->context);
|
||||
mutex_unlock(&old_mm->context.lock);
|
||||
if (!old_mm) {
|
||||
mm->context.ldt = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&old_mm->context.lock);
|
||||
if (!old_mm->context.ldt) {
|
||||
mm->context.ldt = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
|
||||
if (!new_ldt) {
|
||||
retval = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
memcpy(new_ldt->entries, old_mm->context.ldt->entries,
|
||||
new_ldt->size * LDT_ENTRY_SIZE);
|
||||
finalize_ldt_struct(new_ldt);
|
||||
|
||||
mm->context.ldt = new_ldt;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&old_mm->context.lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -125,53 +151,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|||
*/
|
||||
void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->context.size) {
|
||||
#ifdef CONFIG_X86_32
|
||||
/* CHECKME: Can this ever happen ? */
|
||||
if (mm == current->active_mm)
|
||||
clear_LDT();
|
||||
#endif
|
||||
paravirt_free_ldt(mm->context.ldt, mm->context.size);
|
||||
if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||
vfree(mm->context.ldt);
|
||||
else
|
||||
put_page(virt_to_page(mm->context.ldt));
|
||||
mm->context.size = 0;
|
||||
}
|
||||
free_ldt_struct(mm->context.ldt);
|
||||
mm->context.ldt = NULL;
|
||||
}
|
||||
|
||||
static int read_ldt(void __user *ptr, unsigned long bytecount)
|
||||
{
|
||||
int err;
|
||||
int retval;
|
||||
unsigned long size;
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
if (!mm->context.size)
|
||||
return 0;
|
||||
mutex_lock(&mm->context.lock);
|
||||
|
||||
if (!mm->context.ldt) {
|
||||
retval = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
|
||||
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
|
||||
|
||||
mutex_lock(&mm->context.lock);
|
||||
size = mm->context.size * LDT_ENTRY_SIZE;
|
||||
size = mm->context.ldt->size * LDT_ENTRY_SIZE;
|
||||
if (size > bytecount)
|
||||
size = bytecount;
|
||||
|
||||
err = 0;
|
||||
if (copy_to_user(ptr, mm->context.ldt, size))
|
||||
err = -EFAULT;
|
||||
mutex_unlock(&mm->context.lock);
|
||||
if (err < 0)
|
||||
goto error_return;
|
||||
if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
|
||||
retval = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (size != bytecount) {
|
||||
/* zero-fill the rest */
|
||||
if (clear_user(ptr + size, bytecount - size) != 0) {
|
||||
err = -EFAULT;
|
||||
goto error_return;
|
||||
/* Zero-fill the rest and pretend we read bytecount bytes. */
|
||||
if (clear_user(ptr + size, bytecount - size)) {
|
||||
retval = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
return bytecount;
|
||||
error_return:
|
||||
return err;
|
||||
retval = bytecount;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&mm->context.lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
|
||||
|
@ -195,6 +215,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|||
struct desc_struct ldt;
|
||||
int error;
|
||||
struct user_desc ldt_info;
|
||||
int oldsize, newsize;
|
||||
struct ldt_struct *new_ldt, *old_ldt;
|
||||
|
||||
error = -EINVAL;
|
||||
if (bytecount != sizeof(ldt_info))
|
||||
|
@ -213,34 +235,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&mm->context.lock);
|
||||
if (ldt_info.entry_number >= mm->context.size) {
|
||||
error = alloc_ldt(¤t->mm->context,
|
||||
ldt_info.entry_number + 1, 1);
|
||||
if (error < 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Allow LDTs to be cleared by the user. */
|
||||
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
|
||||
if (oldmode || LDT_empty(&ldt_info)) {
|
||||
memset(&ldt, 0, sizeof(ldt));
|
||||
goto install;
|
||||
if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
|
||||
LDT_empty(&ldt_info)) {
|
||||
/* The user wants to clear the entry. */
|
||||
memset(&ldt, 0, sizeof(ldt));
|
||||
} else {
|
||||
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fill_ldt(&ldt, &ldt_info);
|
||||
if (oldmode)
|
||||
ldt.avl = 0;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
|
||||
error = -EINVAL;
|
||||
mutex_lock(&mm->context.lock);
|
||||
|
||||
old_ldt = mm->context.ldt;
|
||||
oldsize = old_ldt ? old_ldt->size : 0;
|
||||
newsize = max((int)(ldt_info.entry_number + 1), oldsize);
|
||||
|
||||
error = -ENOMEM;
|
||||
new_ldt = alloc_ldt_struct(newsize);
|
||||
if (!new_ldt)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
fill_ldt(&ldt, &ldt_info);
|
||||
if (oldmode)
|
||||
ldt.avl = 0;
|
||||
if (old_ldt)
|
||||
memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
|
||||
new_ldt->entries[ldt_info.entry_number] = ldt;
|
||||
finalize_ldt_struct(new_ldt);
|
||||
|
||||
/* Install the new entry ... */
|
||||
install:
|
||||
write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
|
||||
install_ldt(mm, new_ldt);
|
||||
free_ldt_struct(old_ldt);
|
||||
error = 0;
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include <asm/syscalls.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
asmlinkage extern void ret_from_fork(void);
|
||||
|
||||
|
@ -116,11 +117,11 @@ void __show_regs(struct pt_regs *regs, int all)
|
|||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
if (dead_task->mm) {
|
||||
if (dead_task->mm->context.size) {
|
||||
if (dead_task->mm->context.ldt) {
|
||||
printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
|
||||
dead_task->comm,
|
||||
dead_task->mm->context.ldt,
|
||||
dead_task->mm->context.size);
|
||||
dead_task->mm->context.ldt->entries,
|
||||
dead_task->mm->context.ldt->size);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
@ -419,6 +420,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
|
||||
__switch_to_xtra(prev_p, next_p, tss);
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
/*
|
||||
* On Xen PV, IOPL bits in pt_regs->flags have no effect, and
|
||||
* current_pt_regs()->flags may not match the current task's
|
||||
* intended IOPL. We need to switch it manually.
|
||||
*/
|
||||
if (unlikely(xen_pv_domain() &&
|
||||
prev->iopl != next->iopl))
|
||||
xen_set_iopl_mask(next->iopl);
|
||||
#endif
|
||||
|
||||
return prev_p;
|
||||
}
|
||||
|
||||
|
@ -470,27 +482,59 @@ void set_personality_ia32(bool x32)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(set_personality_ia32);
|
||||
|
||||
/*
|
||||
* Called from fs/proc with a reference on @p to find the function
|
||||
* which called into schedule(). This needs to be done carefully
|
||||
* because the task might wake up and we might look at a stack
|
||||
* changing under us.
|
||||
*/
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long stack;
|
||||
u64 fp, ip;
|
||||
unsigned long start, bottom, top, sp, fp, ip;
|
||||
int count = 0;
|
||||
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
stack = (unsigned long)task_stack_page(p);
|
||||
if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
|
||||
|
||||
start = (unsigned long)task_stack_page(p);
|
||||
if (!start)
|
||||
return 0;
|
||||
fp = *(u64 *)(p->thread.sp);
|
||||
|
||||
/*
|
||||
* Layout of the stack page:
|
||||
*
|
||||
* ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
|
||||
* PADDING
|
||||
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
|
||||
* stack
|
||||
* ----------- bottom = start + sizeof(thread_info)
|
||||
* thread_info
|
||||
* ----------- start
|
||||
*
|
||||
* The tasks stack pointer points at the location where the
|
||||
* framepointer is stored. The data on the stack is:
|
||||
* ... IP FP ... IP FP
|
||||
*
|
||||
* We need to read FP and IP, so we need to adjust the upper
|
||||
* bound by another unsigned long.
|
||||
*/
|
||||
top = start + THREAD_SIZE;
|
||||
top -= 2 * sizeof(unsigned long);
|
||||
bottom = start + sizeof(struct thread_info);
|
||||
|
||||
sp = ACCESS_ONCE(p->thread.sp);
|
||||
if (sp < bottom || sp > top)
|
||||
return 0;
|
||||
|
||||
fp = ACCESS_ONCE(*(unsigned long *)sp);
|
||||
do {
|
||||
if (fp < (unsigned long)stack ||
|
||||
fp >= (unsigned long)stack+THREAD_SIZE)
|
||||
if (fp < bottom || fp > top)
|
||||
return 0;
|
||||
ip = *(u64 *)(fp+8);
|
||||
ip = ACCESS_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
fp = *(u64 *)fp;
|
||||
} while (count++ < 16);
|
||||
fp = ACCESS_ONCE(*(unsigned long *)fp);
|
||||
} while (count++ < 16 && p->state != TASK_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -439,6 +439,17 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
|
||||
},
|
||||
},
|
||||
|
||||
/* ASRock */
|
||||
{ /* Handle problems with rebooting on ASRock Q1900DC-ITX */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "ASRock Q1900DC-ITX",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
|
||||
},
|
||||
},
|
||||
|
||||
{ /* Handle problems with rebooting on the Latitude E6320. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell Latitude E6320",
|
||||
|
@ -690,9 +701,12 @@ void native_machine_shutdown(void)
|
|||
/* Make certain I only run on the appropriate processor */
|
||||
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
|
||||
|
||||
/* O.K Now that I'm on the appropriate processor,
|
||||
* stop all of the others.
|
||||
/*
|
||||
* O.K Now that I'm on the appropriate processor, stop all of the
|
||||
* others. Also disable the local irq to not receive the per-cpu
|
||||
* timer interrupt which may trigger scheduler's load balance.
|
||||
*/
|
||||
local_irq_disable();
|
||||
stop_other_cpus();
|
||||
#endif
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
|
||||
{
|
||||
|
@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
|
|||
struct desc_struct *desc;
|
||||
unsigned long base;
|
||||
|
||||
seg &= ~7UL;
|
||||
seg >>= 3;
|
||||
|
||||
mutex_lock(&child->mm->context.lock);
|
||||
if (unlikely((seg >> 3) >= child->mm->context.size))
|
||||
if (unlikely(!child->mm->context.ldt ||
|
||||
seg >= child->mm->context.ldt->size))
|
||||
addr = -1L; /* bogus selector, access would fault */
|
||||
else {
|
||||
desc = child->mm->context.ldt + seg;
|
||||
desc = &child->mm->context.ldt->entries[seg];
|
||||
base = get_desc_base(desc);
|
||||
|
||||
/* 16-bit code segment? */
|
||||
|
|
|
@ -435,7 +435,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
|||
* then it's very likely the result of an icebp/int01 trap.
|
||||
* User wants a sigtrap for that.
|
||||
*/
|
||||
if (!dr6 && user_mode(regs))
|
||||
if (!dr6 && user_mode_vm(regs))
|
||||
user_icebp = 1;
|
||||
|
||||
/* Catch kmemcheck conditions first of all! */
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/hypervisor.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/geode.h>
|
||||
|
||||
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
|
||||
EXPORT_SYMBOL(cpu_khz);
|
||||
|
@ -800,15 +801,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
|||
|
||||
static void __init check_system_tsc_reliable(void)
|
||||
{
|
||||
#ifdef CONFIG_MGEODE_LX
|
||||
/* RTSC counts during suspend */
|
||||
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
||||
if (is_geode_lx()) {
|
||||
/* RTSC counts during suspend */
|
||||
#define RTSC_SUSP 0x100
|
||||
unsigned long res_low, res_high;
|
||||
unsigned long res_low, res_high;
|
||||
|
||||
rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
|
||||
/* Geode_LX - the OLPC CPU has a very reliable TSC */
|
||||
if (res_low & RTSC_SUSP)
|
||||
tsc_clocksource_reliable = 1;
|
||||
rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
|
||||
/* Geode_LX - the OLPC CPU has a very reliable TSC */
|
||||
if (res_low & RTSC_SUSP)
|
||||
tsc_clocksource_reliable = 1;
|
||||
}
|
||||
#endif
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
||||
tsc_clocksource_reliable = 1;
|
||||
|
|
|
@ -4246,7 +4246,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
if (rc != X86EMUL_CONTINUE)
|
||||
goto done;
|
||||
}
|
||||
ctxt->dst.orig_val = ctxt->dst.val;
|
||||
/* Copy full 64-bit value for CMPXCHG8B. */
|
||||
ctxt->dst.orig_val64 = ctxt->dst.val64;
|
||||
|
||||
special_insn:
|
||||
|
||||
|
|
|
@ -317,7 +317,7 @@ static void pit_do_work(struct work_struct *work)
|
|||
* LVT0 to NMI delivery. Other PIC interrupts are just sent to
|
||||
* VCPU0, and only if its LVT0 is in EXTINT mode.
|
||||
*/
|
||||
if (kvm->arch.vapics_in_nmi_mode > 0)
|
||||
if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_apic_nmi_wd_deliver(vcpu);
|
||||
}
|
||||
|
|
|
@ -761,10 +761,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
|
|||
if (!nmi_wd_enabled) {
|
||||
apic_debug("Receive NMI setting on APIC_LVT0 "
|
||||
"for cpu %d\n", apic->vcpu->vcpu_id);
|
||||
apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
|
||||
atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
||||
}
|
||||
} else if (nmi_wd_enabled)
|
||||
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
|
||||
atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
||||
}
|
||||
|
||||
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
|
@ -1257,6 +1257,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
|
|||
|
||||
apic_update_ppr(apic);
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
apic_manage_nmi_watchdog(apic, apic_get_reg(apic, APIC_LVT0));
|
||||
update_divide_count(apic);
|
||||
start_apic_timer(apic);
|
||||
apic->irr_pending = true;
|
||||
|
|
|
@ -3658,7 +3658,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
||||
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
|
||||
if (detect_write_misaligned(sp, gpa, bytes) ||
|
||||
detect_write_flooding(sp)) {
|
||||
|
|
|
@ -1081,6 +1081,8 @@ static void init_vmcb(struct vcpu_svm *svm)
|
|||
set_exception_intercept(svm, PF_VECTOR);
|
||||
set_exception_intercept(svm, UD_VECTOR);
|
||||
set_exception_intercept(svm, MC_VECTOR);
|
||||
set_exception_intercept(svm, AC_VECTOR);
|
||||
set_exception_intercept(svm, DB_VECTOR);
|
||||
|
||||
set_intercept(svm, INTERCEPT_INTR);
|
||||
set_intercept(svm, INTERCEPT_NMI);
|
||||
|
@ -1636,20 +1638,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
|
|||
mark_dirty(svm->vmcb, VMCB_SEG);
|
||||
}
|
||||
|
||||
static void update_db_intercept(struct kvm_vcpu *vcpu)
|
||||
static void update_bp_intercept(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
clr_exception_intercept(svm, DB_VECTOR);
|
||||
clr_exception_intercept(svm, BP_VECTOR);
|
||||
|
||||
if (svm->nmi_singlestep)
|
||||
set_exception_intercept(svm, DB_VECTOR);
|
||||
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
|
||||
if (vcpu->guest_debug &
|
||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
|
||||
set_exception_intercept(svm, DB_VECTOR);
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
|
||||
set_exception_intercept(svm, BP_VECTOR);
|
||||
} else
|
||||
|
@ -1667,7 +1662,7 @@ static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
|
|||
|
||||
mark_dirty(svm->vmcb, VMCB_DR);
|
||||
|
||||
update_db_intercept(vcpu);
|
||||
update_bp_intercept(vcpu);
|
||||
}
|
||||
|
||||
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
|
||||
|
@ -1741,7 +1736,6 @@ static int db_interception(struct vcpu_svm *svm)
|
|||
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
|
||||
svm->vmcb->save.rflags &=
|
||||
~(X86_EFLAGS_TF | X86_EFLAGS_RF);
|
||||
update_db_intercept(&svm->vcpu);
|
||||
}
|
||||
|
||||
if (svm->vcpu.guest_debug &
|
||||
|
@ -1776,6 +1770,12 @@ static int ud_interception(struct vcpu_svm *svm)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int ac_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -3291,6 +3291,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
|||
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
|
||||
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
|
||||
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
|
||||
[SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
|
||||
[SVM_EXIT_INTR] = intr_interception,
|
||||
[SVM_EXIT_NMI] = nmi_interception,
|
||||
[SVM_EXIT_SMI] = nop_on_interception,
|
||||
|
@ -3653,7 +3654,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
svm->nmi_singlestep = true;
|
||||
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
|
||||
update_db_intercept(vcpu);
|
||||
}
|
||||
|
||||
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
||||
|
|
|
@ -244,6 +244,7 @@ TRACE_EVENT(kvm_apic,
|
|||
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
|
||||
{ SVM_EXIT_INTR, "interrupt" }, \
|
||||
{ SVM_EXIT_NMI, "nmi" }, \
|
||||
|
|
|
@ -1169,7 +1169,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
|||
u32 eb;
|
||||
|
||||
eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
|
||||
(1u << NM_VECTOR) | (1u << DB_VECTOR);
|
||||
(1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
|
||||
if ((vcpu->guest_debug &
|
||||
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
|
||||
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
|
||||
|
@ -1455,8 +1455,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
|||
#ifdef CONFIG_X86_64
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
|
||||
#endif
|
||||
if (user_has_fpu())
|
||||
clts();
|
||||
/*
|
||||
* If the FPU is not active (through the host task or
|
||||
* the guest vcpu), then restore the cr0.TS bit.
|
||||
*/
|
||||
if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
|
||||
stts();
|
||||
load_gdt(&__get_cpu_var(host_gdt));
|
||||
}
|
||||
|
||||
|
@ -3633,7 +3637,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|||
struct desc_ptr dt;
|
||||
unsigned long cr4;
|
||||
|
||||
vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
|
||||
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
|
||||
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
||||
|
||||
/* Save the most likely value for this task's CR4 in the VMCS. */
|
||||
|
@ -4256,6 +4260,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|||
|
||||
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
|
||||
switch (ex_no) {
|
||||
case AC_VECTOR:
|
||||
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
|
||||
return 1;
|
||||
case DB_VECTOR:
|
||||
dr6 = vmcs_readl(EXIT_QUALIFICATION);
|
||||
if (!(vcpu->guest_debug &
|
||||
|
|
|
@ -1545,6 +1545,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void record_steal_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
accumulate_steal_time(vcpu);
|
||||
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
||||
|
@ -1665,12 +1667,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|||
if (!(data & KVM_MSR_ENABLED))
|
||||
break;
|
||||
|
||||
vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
||||
|
||||
preempt_disable();
|
||||
accumulate_steal_time(vcpu);
|
||||
preempt_enable();
|
||||
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
|
||||
break;
|
||||
|
@ -1918,6 +1914,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|||
case MSR_IA32_LASTINTFROMIP:
|
||||
case MSR_IA32_LASTINTTOIP:
|
||||
case MSR_K8_SYSCFG:
|
||||
case MSR_K8_TSEG_ADDR:
|
||||
case MSR_K8_TSEG_MASK:
|
||||
case MSR_K7_HWCR:
|
||||
case MSR_VM_HSAVE_PA:
|
||||
case MSR_K7_EVNTSEL0:
|
||||
|
@ -2327,7 +2325,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
vcpu->cpu = cpu;
|
||||
}
|
||||
|
||||
accumulate_steal_time(vcpu);
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
}
|
||||
|
||||
|
@ -5907,7 +5904,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
unlazy_fpu(current);
|
||||
__kernel_fpu_begin();
|
||||
fpu_restore_checking(&vcpu->arch.guest_fpu);
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
@ -5921,6 +5918,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
|
||||
vcpu->guest_fpu_loaded = 0;
|
||||
fpu_save_init(&vcpu->arch.guest_fpu);
|
||||
__kernel_fpu_end();
|
||||
++vcpu->stat.fpu_reload;
|
||||
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
|
||||
trace_kvm_fpu(0);
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/user.h>
|
||||
#include <asm/i387.h>
|
||||
|
||||
|
@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info)
|
|||
math_abort(FPU_info, SIGILL);
|
||||
}
|
||||
|
||||
code_descriptor = LDT_DESCRIPTOR(FPU_CS);
|
||||
code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
|
||||
if (SEG_D_SIZE(code_descriptor)) {
|
||||
/* The above test may be wrong, the book is not clear */
|
||||
/* Segmented 32 bit protected mode */
|
||||
|
|
|
@ -16,9 +16,24 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/* s is always from a cpu register, and the cpu does bounds checking
|
||||
* during register load --> no further bounds checks needed */
|
||||
#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
|
||||
#include <asm/desc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
|
||||
{
|
||||
static struct desc_struct zero_desc;
|
||||
struct desc_struct ret = zero_desc;
|
||||
|
||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||
seg >>= 3;
|
||||
mutex_lock(¤t->mm->context.lock);
|
||||
if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
|
||||
ret = current->mm->context.ldt->entries[seg];
|
||||
mutex_unlock(¤t->mm->context.lock);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
|
||||
#define SEG_G_BIT(x) ((x).b & (1 << 23))
|
||||
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <linux/stddef.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
#include "fpu_system.h"
|
||||
#include "exception.h"
|
||||
|
@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
|
|||
addr->selector = PM_REG_(segment);
|
||||
}
|
||||
|
||||
descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
|
||||
descriptor = FPU_get_ldt_descriptor(segment);
|
||||
base_address = SEG_BASE_ADDR(descriptor);
|
||||
address = base_address + offset;
|
||||
limit = base_address
|
||||
|
|
|
@ -155,7 +155,12 @@ void bpf_jit_compile(struct sk_filter *fp)
|
|||
}
|
||||
cleanup_addr = proglen; /* epilogue address */
|
||||
|
||||
for (pass = 0; pass < 10; pass++) {
|
||||
/* JITed image shrinks with every pass and the loop iterates
|
||||
* until the image stops shrinking. Very large bpf programs
|
||||
* may converge on the last pass. In such case do one more
|
||||
* pass to emit the final image
|
||||
*/
|
||||
for (pass = 0; pass < 10 || image; pass++) {
|
||||
u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
|
||||
/* no prologue/epilogue for trivial filters (RET something) */
|
||||
proglen = 0;
|
||||
|
|
|
@ -70,6 +70,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
|
|||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
},
|
||||
},
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
|
||||
{
|
||||
.callback = set_use_crs,
|
||||
.ident = "Foxconn K8M890-8237A",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
},
|
||||
},
|
||||
|
||||
/* Now for the blacklist.. */
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/suspend.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/fpu-internal.h> /* pcntxt_mask */
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static struct saved_context saved_context;
|
||||
|
@ -148,7 +149,7 @@ static void fix_processor_context(void)
|
|||
syscall_init(); /* This sets MSR_*STAR and related */
|
||||
#endif
|
||||
load_TR_desc(); /* This does ltr */
|
||||
load_LDT(¤t->active_mm->context); /* This does lldt */
|
||||
load_mm_ldt(current->active_mm); /* This does lldt */
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
.text
|
||||
.globl __kernel_sigreturn
|
||||
.type __kernel_sigreturn,@function
|
||||
nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
|
||||
ALIGN
|
||||
__kernel_sigreturn:
|
||||
.LSTART_sigreturn:
|
||||
|
|
|
@ -413,6 +413,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|||
pte_t pte;
|
||||
unsigned long pfn;
|
||||
struct page *page;
|
||||
unsigned char dummy;
|
||||
|
||||
ptep = lookup_address((unsigned long)v, &level);
|
||||
BUG_ON(ptep == NULL);
|
||||
|
@ -422,6 +423,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|||
|
||||
pte = pfn_pte(pfn, prot);
|
||||
|
||||
/*
|
||||
* Careful: update_va_mapping() will fail if the virtual address
|
||||
* we're poking isn't populated in the page tables. We don't
|
||||
* need to worry about the direct map (that's always in the page
|
||||
* tables), but we need to be careful about vmap space. In
|
||||
* particular, the top level page table can lazily propagate
|
||||
* entries between processes, so if we've switched mms since we
|
||||
* vmapped the target in the first place, we might not have the
|
||||
* top-level page table entry populated.
|
||||
*
|
||||
* We disable preemption because we want the same mm active when
|
||||
* we probe the target and when we issue the hypercall. We'll
|
||||
* have the same nominal mm, but if we're a kernel thread, lazy
|
||||
* mm dropping could change our pgd.
|
||||
*
|
||||
* Out of an abundance of caution, this uses __get_user() to fault
|
||||
* in the target address just in case there's some obscure case
|
||||
* in which the target address isn't readable.
|
||||
*/
|
||||
|
||||
preempt_disable();
|
||||
|
||||
pagefault_disable(); /* Avoid warnings due to being atomic. */
|
||||
__get_user(dummy, (unsigned char __user __force *)v);
|
||||
pagefault_enable();
|
||||
|
||||
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
||||
BUG();
|
||||
|
||||
|
@ -433,6 +460,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|||
BUG();
|
||||
} else
|
||||
kmap_flush_unused();
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
|
@ -440,6 +469,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|||
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We need to mark the all aliases of the LDT pages RO. We
|
||||
* don't need to call vm_flush_aliases(), though, since that's
|
||||
* only responsible for flushing aliases out the TLBs, not the
|
||||
* page tables, and Xen will flush the TLB for us if needed.
|
||||
*
|
||||
* To avoid confusing future readers: none of this is necessary
|
||||
* to load the LDT. The hypervisor only checks this when the
|
||||
* LDT is faulted in due to subsequent descriptor access.
|
||||
*/
|
||||
|
||||
for(i = 0; i < entries; i += entries_per_page)
|
||||
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
||||
}
|
||||
|
@ -820,7 +860,7 @@ static void xen_load_sp0(struct tss_struct *tss,
|
|||
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
||||
}
|
||||
|
||||
static void xen_set_iopl_mask(unsigned mask)
|
||||
void xen_set_iopl_mask(unsigned mask)
|
||||
{
|
||||
struct physdev_set_iopl set_iopl;
|
||||
|
||||
|
|
|
@ -274,7 +274,7 @@ char * __init xen_memory_setup(void)
|
|||
xen_ignore_unusable(map, memmap.nr_entries);
|
||||
|
||||
/* Make sure the Xen-supplied memory map is well-ordered. */
|
||||
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
|
||||
sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
|
||||
|
||||
max_pages = xen_get_max_pages();
|
||||
if (max_pages > max_pfn)
|
||||
|
|
|
@ -420,13 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
|||
do {
|
||||
if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
spin_lock(&ext_devt_lock);
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
rc = idr_get_new(&ext_devt_idr, part, &idx);
|
||||
if (!rc && idx >= NR_EXT_DEVT) {
|
||||
idr_remove(&ext_devt_idr, idx);
|
||||
rc = -EBUSY;
|
||||
}
|
||||
spin_unlock(&ext_devt_lock);
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
} while (rc == -EAGAIN);
|
||||
|
||||
if (rc)
|
||||
|
@ -451,9 +451,9 @@ void blk_free_devt(dev_t devt)
|
|||
return;
|
||||
|
||||
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
|
||||
spin_lock(&ext_devt_lock);
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
spin_unlock(&ext_devt_lock);
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -684,13 +684,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
|
|||
} else {
|
||||
struct hd_struct *part;
|
||||
|
||||
spin_lock(&ext_devt_lock);
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
if (part && get_disk(part_to_disk(part))) {
|
||||
*partno = part->partno;
|
||||
disk = part_to_disk(part);
|
||||
}
|
||||
spin_unlock(&ext_devt_lock);
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
}
|
||||
|
||||
return disk;
|
||||
|
|
|
@ -715,7 +715,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
|
|||
err:
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -487,7 +487,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
|
|||
struct crypto_alg *base = &alg->halg.base;
|
||||
|
||||
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
|
||||
alg->halg.statesize > PAGE_SIZE / 8)
|
||||
alg->halg.statesize > PAGE_SIZE / 8 ||
|
||||
alg->halg.statesize == 0)
|
||||
return -EINVAL;
|
||||
|
||||
base->cra_type = &crypto_ahash_type;
|
||||
|
|
|
@ -386,7 +386,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
|
|||
crypto_alg_tested(larval->alg.cra_driver_name, 0);
|
||||
}
|
||||
|
||||
err = wait_for_completion_interruptible(&larval->completion);
|
||||
err = wait_for_completion_killable(&larval->completion);
|
||||
WARN_ON(err);
|
||||
|
||||
out:
|
||||
|
|
|
@ -178,7 +178,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
|||
struct crypto_larval *larval = (void *)alg;
|
||||
long timeout;
|
||||
|
||||
timeout = wait_for_completion_interruptible_timeout(
|
||||
timeout = wait_for_completion_killable_timeout(
|
||||
&larval->completion, 60 * HZ);
|
||||
|
||||
alg = larval->adult;
|
||||
|
@ -457,7 +457,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
|
|||
err:
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
@ -586,7 +586,7 @@ void *crypto_alloc_tfm(const char *alg_name,
|
|||
err:
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -350,7 +350,7 @@ static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
|
|||
err = PTR_ERR(alg);
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue