Skip to content

Commit

Permalink
Merge remote-tracking branch 'stable/linux-4.14.y' into rpi-4.14.y
Browse files Browse the repository at this point in the history
  • Loading branch information
popcornmix committed Jan 9, 2019
2 parents 4c5aec7 + 24737fa commit 4de3f63
Show file tree
Hide file tree
Showing 111 changed files with 839 additions and 328 deletions.
3 changes: 3 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1965,6 +1965,9 @@
off
Disables hypervisor mitigations and doesn't
emit any warnings.
It also drops the swap size and available
RAM limit restriction on both hypervisor and
bare metal.

Default is 'flush'.

Expand Down
6 changes: 5 additions & 1 deletion Documentation/admin-guide/l1tf.rst
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:

off Disables hypervisor mitigations and doesn't emit any
warnings.
It also drops the swap size and available RAM limit restrictions
on both hypervisor and bare metal.

============ =============================================================

The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
Expand Down Expand Up @@ -576,7 +579,8 @@ Default mitigations
The kernel default mitigations for vulnerable processors are:

- PTE inversion to protect against malicious user space. This is done
unconditionally and cannot be controlled.
unconditionally and cannot be controlled. The swap storage is limited
to ~16TB.

- L1D conditional flushing on VMENTER when EPT is enabled for
a guest.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 91
SUBLEVEL = 92
EXTRAVERSION =
NAME = Petit Gorille

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)

/* VTCR_EL2 Registers bits */
#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_RES1 (1U << 31)
#define VTCR_EL2_HD (1 << 22)
#define VTCR_EL2_HA (1 << 21)
#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
Expand Down
7 changes: 4 additions & 3 deletions arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "../../../../include/linux/sizes.h"

int main(int argc, char *argv[])
{
Expand Down Expand Up @@ -45,11 +46,11 @@ int main(int argc, char *argv[])
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;

/*
* Align with 16 bytes: "greater than that used for any standard data
* types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
* Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
* which may be as large as 64KB depending on the kernel configuration.
*/

vmlinuz_load_addr += (16 - vmlinux_size % 16);
vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);

printf("0x%llx\n", vmlinuz_load_addr);

Expand Down
3 changes: 2 additions & 1 deletion arch/mips/cavium-octeon/executive/cvmx-helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
case 3:
return CVMX_HELPER_INTERFACE_MODE_LOOP;
case 4:
return CVMX_HELPER_INTERFACE_MODE_RGMII;
/* TODO: Implement support for AGL (RGMII). */
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/include/asm/cpu-info.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ struct guest_info {
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */

struct cpuinfo_mips {
unsigned long asid_cache;
u64 asid_cache;
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
unsigned long asid_mask;
#endif
Expand Down
1 change: 1 addition & 0 deletions arch/mips/include/asm/mach-loongson64/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL

#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)

#define LEVELS_PER_SLICE 128

Expand Down
2 changes: 1 addition & 1 deletion arch/mips/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include <linux/wait.h>

typedef struct {
unsigned long asid[NR_CPUS];
u64 asid[NR_CPUS];
void *vdso;
atomic_t fp_mode_switching;

Expand Down
10 changes: 4 additions & 6 deletions arch/mips/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,14 @@ extern unsigned long pgd_current[];
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
static unsigned long asid_version_mask(unsigned int cpu)
static inline u64 asid_version_mask(unsigned int cpu)
{
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);

return ~(asid_mask | (asid_mask - 1));
return ~(u64)(asid_mask | (asid_mask - 1));
}

static unsigned long asid_first_version(unsigned int cpu)
static inline u64 asid_first_version(unsigned int cpu)
{
return ~asid_version_mask(cpu) + 1;
}
Expand All @@ -101,14 +101,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
unsigned long asid = asid_cache(cpu);
u64 asid = asid_cache(cpu);

if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = asid_first_version(cpu);
}

cpu_context(cpu, mm) = asid_cache(cpu) = asid;
Expand Down
13 changes: 12 additions & 1 deletion arch/mips/include/asm/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,18 @@
#define _ASM_MMZONE_H_

#include <asm/page.h>
#include <mmzone.h>

#ifdef CONFIG_NEED_MULTIPLE_NODES
# include <mmzone.h>
#endif

#ifndef pa_to_nid
#define pa_to_nid(addr) 0
#endif

#ifndef nid_to_addrbase
#define nid_to_addrbase(nid) 0
#endif

#ifdef CONFIG_DISCONTIGMEM

Expand Down
5 changes: 5 additions & 0 deletions arch/mips/include/asm/pgtable-64.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,11 @@ static inline int pmd_bad(pmd_t pmd)

static inline int pmd_present(pmd_t pmd)
{
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return pmd_val(pmd) & _PAGE_PRESENT;
#endif

return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}

Expand Down
22 changes: 22 additions & 0 deletions arch/mips/include/asm/r4kcache.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
#include <asm/mmzone.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */

extern void (*r4k_blast_dcache)(void);
Expand Down Expand Up @@ -747,4 +748,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )

/* Currently, this is very specific to Loongson-3 */
#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
static inline void blast_##pfx##cache##lsize##_node(long node) \
{ \
unsigned long start = CAC_BASE | nid_to_addrbase(node); \
unsigned long end = start + current_cpu_data.desc.waysize; \
unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
unsigned long ws_end = current_cpu_data.desc.ways << \
current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
cache##lsize##_unroll32(addr|ws, indexop); \
}

__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)

#endif /* _ASM_R4KCACHE_H */
4 changes: 2 additions & 2 deletions arch/mips/kernel/vdso.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)

/* Map delay slot emulation page */
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
VM_READ|VM_WRITE|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
0, NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
Expand Down
38 changes: 20 additions & 18 deletions arch/mips/math-emu/dsemul.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
struct emuframe __user *fr;
int err, fr_idx;
unsigned long fr_uaddr;
struct emuframe fr;
int fr_idx, ret;

/* NOP is easy */
if (ir == 0)
Expand Down Expand Up @@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
fr_idx = alloc_emuframe();
if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
fr = &dsemul_page()[fr_idx];

/* Retrieve the appropriately encoded break instruction */
break_math = BREAK_MATH(isa16);

/* Write the instructions to the frame */
if (isa16) {
err = __put_user(ir >> 16,
(u16 __user *)(&fr->emul));
err |= __put_user(ir & 0xffff,
(u16 __user *)((long)(&fr->emul) + 2));
err |= __put_user(break_math >> 16,
(u16 __user *)(&fr->badinst));
err |= __put_user(break_math & 0xffff,
(u16 __user *)((long)(&fr->badinst) + 2));
union mips_instruction _emul = {
.halfword = { ir >> 16, ir }
};
union mips_instruction _badinst = {
.halfword = { break_math >> 16, break_math }
};

fr.emul = _emul.word;
fr.badinst = _badinst.word;
} else {
err = __put_user(ir, &fr->emul);
err |= __put_user(break_math, &fr->badinst);
fr.emul = ir;
fr.badinst = break_math;
}

if (unlikely(err)) {
/* Write the frame to user memory */
fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
FOLL_FORCE | FOLL_WRITE);
if (unlikely(ret != sizeof(fr))) {
MIPS_FPU_EMU_INC_STATS(errors);
free_emuframe(fr_idx, current->mm);
return SIGBUS;
Expand All @@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
atomic_set(&current->thread.bd_emu_frame, fr_idx);

/* Change user register context to execute the frame */
regs->cp0_epc = (unsigned long)&fr->emul | isa16;

/* Ensure the icache observes our newly written frame */
flush_cache_sigtramp((unsigned long)&fr->emul);
regs->cp0_epc = fr_uaddr | isa16;

return 0;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/mm/c-r3k.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
pmd_t *pmdp;
pte_t *ptep;

pr_debug("cpage[%08lx,%08lx]\n",
pr_debug("cpage[%08llx,%08lx]\n",
cpu_context(smp_processor_id(), mm), addr);

/* No ASID => no such page in the cache. */
Expand Down
44 changes: 37 additions & 7 deletions arch/mips/mm/c-r4k.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
r4k_blast_scache = blast_scache128;
}

static void (*r4k_blast_scache_node)(long node);

static void r4k_blast_scache_node_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();

if (current_cpu_type() != CPU_LOONGSON3)
r4k_blast_scache_node = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache_node = blast_scache16_node;
else if (sc_lsize == 32)
r4k_blast_scache_node = blast_scache32_node;
else if (sc_lsize == 64)
r4k_blast_scache_node = blast_scache64_node;
else if (sc_lsize == 128)
r4k_blast_scache_node = blast_scache128_node;
}

static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
case CPU_LOONGSON3:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
Expand All @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;

case CPU_LOONGSON3:
/* Use get_ebase_cpunum() for both NUMA=y/n */
r4k_blast_scache_node(get_ebase_cpunum() >> 2);
break;

case CPU_BMIPS5000:
r4k_blast_scache();
__sync();
Expand Down Expand Up @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)

preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
else
if (size >= scache_size) {
if (current_cpu_type() != CPU_LOONGSON3)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
} else {
blast_scache_range(addr, addr + size);
}
preempt_enable();
__sync();
return;
Expand Down Expand Up @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)

preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
else {
if (size >= scache_size) {
if (current_cpu_type() != CPU_LOONGSON3)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
} else {
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
Expand Down Expand Up @@ -1910,6 +1939,7 @@ void r4k_cache_init(void)
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
r4k_blast_dcache_user_page_setup();
r4k_blast_icache_user_page_setup();
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/pci/pci_clp.c
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
int rc;

rrb = clp_alloc_block(GFP_KERNEL);
rrb = clp_alloc_block(GFP_ATOMIC);
if (!rrb)
return -ENOMEM;

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1355,7 +1355,7 @@ asmlinkage void kvm_spurious_fault(void);
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
"call kvm_spurious_fault \n\t" \
"jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)

Expand Down
Loading

0 comments on commit 4de3f63

Please sign in to comment.