...
 
Commits (261)
......@@ -112,14 +112,11 @@ min_adv_mss - INTEGER
IP Fragmentation:
ipfrag_high_thresh - INTEGER
Maximum memory used to reassemble IP fragments. When
ipfrag_high_thresh bytes of memory is allocated for this purpose,
the fragment handler will toss packets until ipfrag_low_thresh
is reached. This also serves as a maximum limit to namespaces
different from the initial one.
ipfrag_low_thresh - INTEGER
ipfrag_high_thresh - LONG INTEGER
Maximum memory used to reassemble IP fragments.
ipfrag_low_thresh - LONG INTEGER
(Obsolete since linux-4.4.174, backported from linux-4.17)
Maximum memory used to reassemble IP fragments before the kernel
begins to remove incomplete fragment queues to free up resources.
The kernel still accepts new fragments for defragmentation.
......
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 172
SUBLEVEL = 176
EXTRAVERSION =
NAME = Blurry Fish Butt
......
......@@ -55,15 +55,15 @@
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK) || \
defined(CONFIG_ALPHA_EIGER)
defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TITAN)
#define NR_IRQS 80
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
defined(CONFIG_ALPHA_TAKARA)
defined(CONFIG_ALPHA_TAKARA) || \
defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_WILDFIRE)
......
......@@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
(r) <= 18 ? (r)+8 : (r)-10])
(r) <= 18 ? (r)+10 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,
......
......@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
/* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
......
......@@ -147,7 +147,7 @@
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "DA850/OMAP-L138 EVM";
simple-audio-card,name = "DA850-OMAPL138 EVM";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
......
......@@ -35,8 +35,8 @@
compatible = "gpio-fan";
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
pinctrl-names = "default";
gpios = <&gpio1 14 GPIO_ACTIVE_LOW
&gpio1 13 GPIO_ACTIVE_LOW>;
gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
&gpio1 13 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0
3000 1
6000 2>;
......
......@@ -220,12 +220,15 @@
status = "disabled";
};
twsi2: i2c@d4025000 {
twsi2: i2c@d4031000 {
compatible = "mrvl,mmp-twsi";
reg = <0xd4025000 0x1000>;
interrupts = <58>;
reg = <0xd4031000 0x1000>;
interrupt-parent = <&intcmux17>;
interrupts = <0>;
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
resets = <&soc_clocks MMP2_CLK_TWSI1>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
......
......@@ -33,6 +33,7 @@
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
enable-active-high;
regulator-boot-on;
startup-delay-us = <25000>;
};
vbat: fixedregulator-vbat {
......
......@@ -796,6 +796,21 @@ void smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
/* In case panic() and panic() called at the same time on CPU1 and CPU2,
* and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
* CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
* kdump fails. So split out the panic_smp_self_stop() and add
* set_cpu_online(smp_processor_id(), false).
*/
void panic_smp_self_stop(void)
{
pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
smp_processor_id());
set_cpu_online(smp_processor_id(), false);
while (1)
cpu_relax();
}
/*
* not supported here
*/
......
......@@ -118,6 +118,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
/*
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
......@@ -151,11 +157,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
/*
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
......
......@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
return base + (where & 0xffc) + (devfn << 12);
return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
......
......@@ -394,7 +394,11 @@ static int __init_refok impd1_probe(struct lm_device *dev)
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
GFP_KERNEL);
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
"lm%x:00700", dev->id);
if (!lookup || !chipname || !mmciname)
return -ENOMEM;
lookup->dev_id = mmciname;
/*
* Offsets on GPIO block 1:
......
......@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
/*
* N2100 PCI.
*/
static int __init
n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
......
......@@ -2526,7 +2526,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
* a stub; implementing this properly requires iclk autoidle usecounting in
* the clock code. No return value.
*/
static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
static void _setup_iclk_autoidle(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
struct list_head *p;
......@@ -2561,7 +2561,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
* reset. Returns 0 upon success or a negative error code upon
* failure.
*/
static int __init _setup_reset(struct omap_hwmod *oh)
static int _setup_reset(struct omap_hwmod *oh)
{
int r;
......@@ -2622,7 +2622,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
*
* No return value.
*/
static void __init _setup_postsetup(struct omap_hwmod *oh)
static void _setup_postsetup(struct omap_hwmod *oh)
{
u8 postsetup_state;
......
......@@ -547,7 +547,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
.exit = cm_x300_u2d_exit,
};
static void cm_x300_init_u2d(void)
static void __init cm_x300_init_u2d(void)
{
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
}
......
......@@ -183,7 +183,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
.lcd_conn = LCD_COLOR_TFT_16BPP,
};
static void littleton_init_lcd(void)
static void __init littleton_init_lcd(void)
{
pxa_set_fb_info(NULL, &littleton_lcd_info);
}
......
......@@ -558,7 +558,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
};
static void zeus_register_ohci(void)
static void __init zeus_register_ohci(void)
{
/* Port 2 is shared between host and client interface. */
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
......
......@@ -78,7 +78,6 @@
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
......
......@@ -26,6 +26,8 @@
#include <asm/virt.h>
.text
.pushsection .hyp.text, "ax"
.align 11
ENTRY(__hyp_stub_vectors)
......
......@@ -70,10 +70,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
struct page *page = pte_page(pte);
/* no flushing needed for anonymous pages */
if (!page_mapping(page))
return;
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
__flush_dcache_area(page_address(page),
PAGE_SIZE << compound_order(page));
......
......@@ -290,8 +290,8 @@ enum mm_32a_minor_op {
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
mm_srlv32_op = 0x050,
mm_sra_op = 0x080,
mm_srlv32_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
......
......@@ -424,5 +424,5 @@ void mips_cm_error_report(void)
}
/* reprime cause register */
write_gcr_error_cause(0);
write_gcr_error_cause(cm_error);
}
......@@ -571,6 +571,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
......@@ -582,11 +587,6 @@ static int __init octeon_pci_setup(void)
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;
......
......@@ -107,7 +107,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
......@@ -143,7 +143,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
......
......@@ -59,7 +59,7 @@
#endif
#define access_ok(type, addr, size) \
(__chk_user_ptr(addr), \
(__chk_user_ptr(addr), (void)(type), \
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
......
......@@ -280,6 +280,8 @@ int dlpar_detach_node(struct device_node *dn)
if (rc)
return rc;
of_node_put(dn);
return 0;
}
......
......@@ -224,10 +224,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
/* Running under KVM? If not we assume z/VM */
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
......
......@@ -833,6 +833,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
......
......@@ -360,9 +360,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct _lowcore *lc = pcpu_devices->lowcore;
if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;
pcpu_delegate(&pcpu_devices[0], func, data,
pcpu_devices->lowcore->panic_stack -
PANIC_FRAME_OFFSET + PAGE_SIZE);
lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
}
int smp_find_processor_id(u16 address)
......@@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev,
{
int rc;
rc = lock_device_hotplug_sysfs();
if (rc)
return rc;
rc = smp_rescan_cpus();
unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
......
......@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte)
{
if (likely(pte_get_bits(pte, _PAGE_RW)))
pte_clear_bits(pte, _PAGE_RW);
else
return pte;
return(pte_mknewprot(pte));
}
static inline pte_t pte_mkread(pte_t pte)
{
if (unlikely(pte_get_bits(pte, _PAGE_USER)))
return pte;
pte_set_bits(pte, _PAGE_USER);
return(pte_mknewprot(pte));
}
......@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
return pte;
pte_set_bits(pte, _PAGE_RW);
return(pte_mknewprot(pte));
}
......
......@@ -25,8 +25,8 @@ static inline u16 i8254(void)
u16 status, timer;
do {
outb(I8254_PORT_CONTROL,
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
I8254_PORT_CONTROL);
status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8;
......
......@@ -294,11 +294,23 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
lockdep_sys_exit();
again:
cached_flags = READ_ONCE(ti->flags);
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
exit_to_usermode_loop(regs, cached_flags);
if (ipipe_user_intret_notifier_enabled(ti)) {
int ret;
enable_local_irqs();
ret = __ipipe_notify_user_intreturn();
disable_local_irqs();
if (ret == 0)
goto again;
}
#ifdef CONFIG_COMPAT
/*
* Compat syscalls set TS_COMPAT. Make sure we clear it before
......@@ -359,7 +371,8 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
* want to run them exactly once per syscall exit with IRQs on.
*/
if (unlikely((!IS_ENABLED(CONFIG_IPIPE) ||
syscall_get_nr(current, regs) < NR_syscalls) &&
syscall_get_nr(current, regs) <
ipipe_root_nr_syscalls(ti)) &&
(cached_flags & SYSCALL_EXIT_WORK_FLAGS)))
syscall_slow_exit_work(regs, cached_flags);
......
......@@ -50,7 +50,7 @@ static unsigned long get_dr(int n)
/*
* fill in the user structure for a core dump..
*/
static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
static void fill_dump(struct pt_regs *regs, struct user32 *dump)
{
u32 fs, gs;
memset(dump, 0, sizeof(*dump));
......@@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm)
fs = get_fs();
set_fs(KERNEL_DS);
has_dumped = 1;
fill_dump(cprm->regs, &dump);
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
dump.u_ar0 = offsetof(struct user32, regs);
dump.signal = cprm->siginfo->si_signo;
dump_thread32(cprm->regs, &dump);
/*
* If the size of the dump file exceeds the rlimit, then see
......
......@@ -94,6 +94,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \
({ \
int err; \
\
might_fault(); \
\
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
......
......@@ -57,4 +57,9 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
#define __ipipe_check_root_resched() 0
#endif
#ifdef CONFIG_IA32_EMULATION
#define ipipe_root_nr_syscalls(ti) \
((ti->status & TS_COMPAT) ? IA32_NR_syscalls : NR_syscalls)
#endif /* CONFIG_IA32_EMULATION */
#endif /* !__X86_IPIPE_64_H */
......@@ -163,10 +163,12 @@ struct thread_info {
#define TIP_MAYDAY 0 /* MAYDAY call is pending */
#define TIP_NOTIFY 1 /* Notify head domain about kernel events */
#define TIP_HEAD 2 /* Runs in head domain */
#define TIP_USERINTRET 3 /* Notify on IRQ/trap return to root userspace */
#define _TIP_MAYDAY (1 << TIP_MAYDAY)
#define _TIP_NOTIFY (1 << TIP_NOTIFY)
#define _TIP_HEAD (1 << TIP_HEAD)
#define _TIP_USERINTRET (1 << TIP_USERINTRET)
#define STACK_WARN (THREAD_SIZE/8)
......
......@@ -48,7 +48,8 @@ enum {
BIOS_STATUS_SUCCESS = 0,
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
BIOS_STATUS_EINVAL = -EINVAL,
BIOS_STATUS_UNAVAIL = -EBUSY
BIOS_STATUS_UNAVAIL = -EBUSY,
BIOS_STATUS_ABORT = -EINTR,
};
/*
......@@ -111,4 +112,9 @@ extern long system_serial_number;
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
/*
* EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
*/
extern struct semaphore __efi_uv_runtime_lock;
#endif /* _ASM_X86_UV_BIOS_H */
......@@ -671,6 +671,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
}
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
m->bank = i;
*msg = tmp;
ret = 1;
}
......
......@@ -1081,6 +1081,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
.id_table = snbep_uncore_pci_ids,
};
#define NODE_ID_MASK 0x7
/*
* build pci bus to socket mapping
*/
......@@ -1102,7 +1104,7 @@ static int snbep_pci2phy_map_init(int devid)
err = pci_read_config_dword(ubox_dev, 0x40, &config);
if (err)
break;
nodeid = config;
nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, 0x54, &config);
if (err)
......
......@@ -58,6 +58,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
val = (s32)value;
break;
case R_X86_64_PC32:
case R_X86_64_PLT32:
val = (u32)(value - loc);
break;
default:
......
......@@ -4156,6 +4156,13 @@ static bool svm_cpu_has_accelerated_tpr(void)
static bool svm_has_emulated_msr(int index)
{
switch (index) {
case MSR_IA32_MCG_EXT_CTL:
return false;
default:
break;
}
return true;
}
......
......@@ -4632,7 +4632,9 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
{
u8 mode = 0;
if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
if (cpu_has_secondary_exec_ctrls() &&
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
mode |= MSR_BITMAP_MODE_X2APIC;
if (enable_apicv)
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
......@@ -6969,6 +6971,7 @@ static void free_nested(struct vcpu_vmx *vmx)
if (!vmx->nested.vmxon)
return;
hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
......
......@@ -4292,6 +4292,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
/*
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
* is returned, but our callers are not ready for that and they blindly
* call kvm_inject_page_fault. Ensure that they at least do not leak
* uninitialized kernel stack memory into cr2 and error code.
*/
memset(exception, 0, sizeof(*exception));
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
......@@ -5569,8 +5576,7 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE &&
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
if (r == EMULATE_DONE && ctxt->tf)
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
......
......@@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
word1 = read_pci_config_16(bus, slot, func, 0xc0);
word2 = read_pci_config_16(bus, slot, func, 0xc2);
if (word1 != word2) {
res.start = (word1 << 16) | 0x0000;
res.end = (word2 << 16) | 0xffff;
res.start = ((resource_size_t) word1 << 16) | 0x0000;
res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM;
update_res(info, res.start, res.end, res.flags, 0);
}
......
......@@ -28,7 +28,8 @@
static struct uv_systab uv_systab;
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
u64 a4, u64 a5)
{
struct uv_systab *tab = &uv_systab;
s64 ret;
......@@ -43,6 +44,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
a1, a2, a3, a4, a5);
return ret;
}
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
{
s64 ret;
if (down_interruptible(&__efi_uv_runtime_lock))
return BIOS_STATUS_ABORT;
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
up(&__efi_uv_runtime_lock);
return ret;
}
EXPORT_SYMBOL_GPL(uv_bios_call);
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
......@@ -51,10 +65,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
unsigned long bios_flags;
s64 ret;
if (down_interruptible(&__efi_uv_runtime_lock))
return BIOS_STATUS_ABORT;
local_irq_save(bios_flags);
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
local_irq_restore(bios_flags);
up(&__efi_uv_runtime_lock);
return ret;
}
......
......@@ -875,7 +875,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
int ret = 0;
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
if (irq < 0)
return irq;
if (!irq)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
......
......@@ -862,6 +862,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
return;
mutex_lock(&gdp_mutex);
if (!kobject_has_children(glue_dir))
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
......
......@@ -632,14 +632,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
if (try < max_tries) {
int timeo;
try = max_tries - 1;
rcu_read_lock();
nc = rcu_dereference(connection->net_conf);
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
if (try < max_tries)
try = max_tries - 1;
}
continue;
}
if (rv < SS_SUCCESS) {
......
......@@ -3126,7 +3126,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
struct net_conf *nc;
int hg, rule_nr, rr_conflict, tentative;
int hg, rule_nr, rr_conflict, tentative, always_asbp;
mydisk = device->state.disk;
if (mydisk == D_NEGOTIATING)
......@@ -3168,8 +3168,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
always_asbp = nc->always_asbp;
rr_conflict = nc->rr_conflict;
tentative = nc->tentative;
rcu_read_unlock();
if (hg == 100 || (hg == -100 && nc->always_asbp)) {
if (hg == 100 || (hg == -100 && always_asbp)) {
int pcount = (device->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
......@@ -3208,9 +3212,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
rr_conflict = nc->rr_conflict;
tentative = nc->tentative;
rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
......@@ -3889,7 +3890,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
kfree(device->p_uuid);
device->p_uuid = p_uuid;
if (device->state.conn < C_CONNECTED &&
if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
device->state.disk < D_INCONSISTENT &&
device->state.role == R_PRIMARY &&
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
......
......@@ -81,6 +81,7 @@
#include <asm/uaccess.h>
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex);
static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
......@@ -1570,11 +1571,9 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo;
int err;
int err = 0;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
mutex_lock(&loop_index_mutex);
lo = bdev->bd_disk->private_data;
if (!lo) {
err = -ENXIO;
......@@ -1583,20 +1582,18 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&lo->lo_refcnt);
out:
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&loop_index_mutex);
return err;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
static void __lo_release(struct loop_device *lo)
{
struct loop_device *lo;
int err;
mutex_lock(&loop_ctl_mutex);
lo = disk->private_data;
if (atomic_dec_return(&lo->lo_refcnt))
goto out_unlock;
return;
mutex_lock(&loop_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
/*
* In autoclear mode, stop the loop thread
......@@ -1613,10 +1610,16 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
loop_flush(lo);
}
out_unlock:
mutex_unlock(&loop_ctl_mutex);
}
static void lo_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&loop_index_mutex);
__lo_release(disk->private_data);
mutex_unlock(&loop_index_mutex);
}
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.open = lo_open,
......@@ -1896,7 +1899,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
struct kobject *kobj;
int err;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&loop_index_mutex);
err = loop_lookup(&lo, MINOR(dev) >> part_shift);
if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift);
......@@ -1904,7 +1907,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
kobj = NULL;
else
kobj = get_disk(lo->lo_disk);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&loop_index_mutex);
*part = 0;
return kobj;
......@@ -1914,13 +1917,9 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
unsigned long parm)
{
struct loop_device *lo;
int ret;
int ret = -ENOSYS;
ret = mutex_lock_killable(&loop_ctl_mutex);
if (ret)
return ret;
ret = -ENOSYS;
mutex_lock(&loop_index_mutex);
switch (cmd) {
case LOOP_CTL_ADD:
ret = loop_lookup(&lo, parm);
......@@ -1934,15 +1933,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
mutex_lock(&loop_ctl_mutex);
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
mutex_unlock(&loop_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
mutex_unlock(&loop_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
mutex_unlock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
......@@ -1952,7 +1955,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
break;
ret = loop_add(&lo, -1);
}
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&loop_index_mutex);
return ret;
}
......@@ -2035,10 +2038,10 @@ static int __init loop_init(void)
THIS_MODULE, loop_probe, NULL, NULL);
/* pre-create number of devices given by config or max_loop */
mutex_lock(&loop_ctl_mutex);
mutex_lock(&loop_index_mutex);
for (i = 0; i < nr; i++)
loop_add(&lo, i);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&loop_index_mutex);
printk(KERN_INFO "loop: module loaded\n");
return 0;
......
......@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
#define VDC_MAX_RETRIES 10
static struct workqueue_struct *sunvdc_wq;
struct vdc_req_entry {
......@@ -419,6 +421,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
.end_idx = dr->prod,
};
int err, delay;
int retries = 0;
hdr.seq = dr->snd_nxt;
delay = 1;
......@@ -431,6 +434,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
udelay(delay);
if ((delay <<= 1) > 128)
delay = 128;
if (retries++ > VDC_MAX_RETRIES)
break;
} while (err == -EAGAIN);
if (err == -ENOTCONN)
......
......@@ -1027,7 +1027,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
if (fs->ref_count > 0)
--fs->ref_count;
else if (fs->ref_count == -1)
fs->ref_count = 0;
if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
......
......@@ -882,6 +882,7 @@ static void __exit exit_gdrom(void)
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
kfree(gd.cd_info);
}
module_init(init_gdrom);
......
......@@ -59,6 +59,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/serial_8250.h>
#include <linux/nospec.h>
#include "smapi.h"
#include "mwavedd.h"
#include "3780i.h"
......@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x entry usIntCount %x\n",
......@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
" Invalid ipcnum %x\n", ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
" ipcnum %x, usIntCount %x\n",
......@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
......
......@@ -17,6 +17,8 @@
#include "clk.h"
#define CCDR 0x4
#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
#define CCSR 0xc
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
#define CACRR 0x10
......@@ -414,6 +416,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
/* Ensure the MMDC CH0 handshake is bypassed */
writel_relaxed(readl_relaxed(base + CCDR) |
BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
imx_check_clocks(clks, ARRAY_SIZE(clks));
clk_data.clks = clks;
......
......@@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
{
int ret;
struct device_node *root = of_find_node_by_path("/");
const struct of_device_id *match_id;
if (!root)
return -ENODEV;
......@@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
/*
* Initialize the driver just for a compliant set of machines
*/
if (!of_match_node(compatible_machine_match, root))
match_id = of_match_node(compatible_machine_match, root);
of_node_put(root);
if (!match_id)
return -ENODEV;
if (!mcpm_is_available())
......
......@@ -555,7 +555,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_src,
ctx->device->dma.sg_src_len,
direction, DMA_CTRL_ACK);
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
break;
case DMA_FROM_DEVICE:
......@@ -579,7 +579,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_dst,
ctx->device->dma.sg_dst_len,
direction,
DMA_DEV_TO_MEM,
DMA_CTRL_ACK |
DMA_PREP_INTERRUPT);
......
......@@ -181,7 +181,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
__func__);
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
......
......@@ -619,7 +619,7 @@ static void imxdma_tasklet(unsigned long data)
{
struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
struct imxdma_desc *desc, *next_desc;
unsigned long flags;
spin_lock_irqsave(&imxdma->lock, flags);
......@@ -649,10 +649,10 @@ static void imxdma_tasklet(unsigned long data)
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
if (!list_empty(&imxdmac->ld_queue)) {
desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
node);
next_desc = list_first_entry(&imxdmac->ld_queue,
struct imxdma_desc, node);
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
if (imxdma_xfer_desc(desc) < 0)
if (imxdma_xfer_desc(next_desc) < 0)
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
__func__, imxdmac->channel);
}
......
......@@ -87,6 +87,13 @@ static DEFINE_SPINLOCK(efi_runtime_lock);
* context through efi_pstore_write().
*/
/*
* Expose the EFI runtime lock to the UV platform
*/
#ifdef CONFIG_X86_UV
extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
#endif
/*
* As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
* the EFI specification requires that callers of the time related runtime
......
......@@ -36,6 +36,8 @@
#include <drm/drmP.h>
#include "drm_legacy.h"
#include <linux/nospec.h>
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
......@@ -1332,6 +1334,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
idx, dma->buf_count - 1);
return -EINVAL;
}
idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
......
......@@ -722,7 +722,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
if (mode->hsync)
return mode->hsync;
if (mode->htotal < 0)
if (mode->htotal <= 0)
return 0;
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
......
......@@ -594,13 +594,16 @@ out_fixup:
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret = 0;
if (intel_iommu_enabled &&
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
if (dev_priv->map_mode != vmw_dma_phys &&