Skip to content

Commit

Permalink
Automatic merge of 'next' into merge (2024-09-06 20:41)
Browse files Browse the repository at this point in the history
  • Loading branch information
mpe committed Sep 6, 2024
2 parents 33b38e8 + 65948b0 commit 56e2adc
Show file tree
Hide file tree
Showing 23 changed files with 219 additions and 172 deletions.
31 changes: 25 additions & 6 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -853,8 +853,8 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
PPC_85xx
depends on (PPC_8xx && !PIN_TLB_DATA && (!STRICT_KERNEL_RWX || !PIN_TLB_TEXT)) || \
PPC_BOOK3S_32 || PPC_85xx
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
Expand All @@ -870,9 +870,9 @@ config DATA_SHIFT
range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && \
(PIN_TLB_DATA || PIN_TLB_TEXT)
default 19 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 24 if STRICT_KERNEL_RWX && PPC_85xx
default PAGE_SHIFT
help
Expand Down Expand Up @@ -1269,8 +1269,27 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000" if PPC_8xx
default "0xb0000000" if PPC_BOOK3S_32
default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM
default "0xc0000000"

config MODULES_SIZE_BOOL
bool "Set custom size for modules/execmem area"
depends on EXECMEM && ADVANCED_OPTIONS
help
This option allows you to set the size of kernel virtual address
space dedicated for modules/execmem.
For the time being it is only for 8xx and book3s/32. Other
platform share it with vmalloc space.

Say N here unless you know what you are doing.

config MODULES_SIZE
int "Size of modules/execmem area (In Mbytes)" if MODULES_SIZE_BOOL
range 1 256 if EXECMEM
default 64 if EXECMEM && PPC_BOOK3S_32
default 32 if EXECMEM && PPC_8xx
default 0

endmenu

if PPC64
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/configs/ppc64_defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_ZONE_DEVICE=y
CONFIG_NET=y
CONFIG_PACKET=y
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/include/asm/book3s/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,8 @@ void unmap_kernel_page(unsigned long va);
#endif

#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
#define MODULES_VADDR (MODULES_END - SZ_256M)
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)

#ifndef __ASSEMBLY__
#include <linux/sched.h>
Expand Down
20 changes: 20 additions & 0 deletions arch/powerpc/include/asm/book3s/64/hash-4k.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,26 @@
#define remap_4k_pfn(vma, addr, pfn, prot) \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))

/*
* With 4K page size the real_pte machinery is all nops.
*/
#define __real_pte(e, p, o) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)

#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \
index = 0; \
shift = mmu_psize_defs[psize].shift; \

#define pte_iterate_hashed_end() } while(0)

/*
* We expect this to be called only for user addresses or kernel virtual
* addresses other than the linear mapping.
*/
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K

/*
* 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
* a matter of returning the PTE bits that need to be modified. On 64K PTE,
Expand Down
26 changes: 0 additions & 26 deletions arch/powerpc/include/asm/book3s/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -330,32 +330,6 @@ static inline unsigned long pud_leaf_size(pud_t pud)

#ifndef __ASSEMBLY__

/*
* This is the default implementation of various PTE accessors, it's
* used in all cases except Book3S with 64K pages where we have a
* concept of sub-pages
*/
#ifndef __real_pte

#define __real_pte(e, p, o) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)

#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \
index = 0; \
shift = mmu_psize_defs[psize].shift; \

#define pte_iterate_hashed_end() } while(0)

/*
* We expect this to be called only for user addresses or kernel virtual
* addresses other than the linear mapping.
*/
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K

#endif /* __real_pte */

static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long clr,
unsigned long set, int huge)
Expand Down
3 changes: 0 additions & 3 deletions arch/powerpc/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif

extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);

#ifdef CONFIG_PPC_BOOK3S_64
static inline void inc_mm_active_cpus(struct mm_struct *mm)
{
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/include/asm/nohash/32/mmu-8xx.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,9 @@

#define mmu_linear_psize MMU_PAGE_8M

#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
#define MODULES_END PAGE_OFFSET
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)

#ifndef __ASSEMBLY__

Expand Down
8 changes: 7 additions & 1 deletion arch/powerpc/include/asm/nohash/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,14 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));

#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
(MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
#endif
return pgd;
}

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,10 @@ static inline int arch_within_stack_frames(const void * const stack,
return BAD_STACK;
}

#ifdef CONFIG_PPC32
extern void *emergency_ctx[];
#endif

#endif /* !__ASSEMBLY__ */

#endif /* __KERNEL__ */
Expand Down
78 changes: 43 additions & 35 deletions arch/powerpc/kernel/head_8xx.S
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,6 @@

#include "head_32.h"

.macro compare_to_kernel_boundary scratch, addr
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
not. \scratch, \addr
#else
rlwinm \scratch, \addr, 16, 0xfff8
cmpli cr0, \scratch, PAGE_OFFSET@h
#endif
.endm

#define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23

Expand Down Expand Up @@ -199,18 +189,7 @@ instruction_counter:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
mtspr SPRN_MD_EPN, r10
#ifdef CONFIG_EXECMEM
mfcr r11
compare_to_kernel_boundary r10, r10
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
#ifdef CONFIG_EXECMEM
blt+ 3f
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
mtcr r11
#endif
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MD_TWC, r11
mfspr r10, SPRN_MD_TWC
Expand Down Expand Up @@ -248,19 +227,12 @@ instruction_counter:
START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
mfcr r11

/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
mfspr r10, SPRN_MD_EPN
compare_to_kernel_boundary r10, r10
mfspr r10, SPRN_M_TWB /* Get level 1 table */
blt+ 3f
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
mtcr r11
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */

mtspr SPRN_MD_TWC, r11
Expand Down Expand Up @@ -332,15 +304,19 @@ instruction_counter:
cmpwi cr1, r11, RPN_PATTERN
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
DARFixed:/* Return from dcbx instruction bug workaround */
mfspr r11, SPRN_DSISR
rlwinm r11, r11, 0, DSISR_NOHPTE
cmpwi cr1, r11, 0
beq+ cr1, .Ldtlbie
mfspr r11, SPRN_DAR
tlbie r11
rlwinm r11, r11, 16, 0xffff
cmplwi cr1, r11, TASK_SIZE@h
bge- cr1, FixupPGD
.Ldtlbie:
EXCEPTION_PROLOG_1
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
lwz r4, _DAR(r11)
lwz r5, _DSISR(r11)
andis. r10,r5,DSISR_NOHPTE@h
beq+ .Ldtlbie
tlbie r4
.Ldtlbie:
prepare_transfer_to_handler
bl do_page_fault
b interrupt_return
Expand Down Expand Up @@ -394,6 +370,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */
__HEAD
. = 0x2000

FixupPGD:
mtspr SPRN_M_TW, r10
mfspr r10, SPRN_DAR
mtspr SPRN_MD_EPN, r10
mfspr r11, SPRN_M_TWB /* Get level 1 table */
lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
cmpwi cr1, r10, 0
bne cr1, 1f

rlwinm r10, r11, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r10) /* Get the level 1 entry */
cmpwi cr1, r10, 0
beq cr1, 1f
stw r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Set the level 1 entry */
mfspr r10, SPRN_M_TW
mtcr r10
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
rfi
1:
mfspr r10, SPRN_M_TW
b .Ldtlbie

/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
* by decoding the registers used by the dcbx instruction and adding them.
* DAR is set to the calculated address.
Expand All @@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r10, SPRN_SRR0
mtspr SPRN_MD_EPN, r10
rlwinm r11, r10, 16, 0xfff8
cmpli cr1, r11, PAGE_OFFSET@h
cmpli cr1, r11, TASK_SIZE@h
mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ cr1, 3f

Expand Down Expand Up @@ -587,6 +587,10 @@ start_here:
lis r0, (MD_TWAM | MD_RSV4I)@h
mtspr SPRN_MD_CTR, r0
#endif
#ifndef CONFIG_PIN_TLB_TEXT
li r0, 0
mtspr SPRN_MI_CTR, r0
#endif
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
lis r0, MD_TWAM@h
mtspr SPRN_MD_CTR, r0
Expand Down Expand Up @@ -683,6 +687,7 @@ SYM_FUNC_START_LOCAL(initial_mmu)
blr
SYM_FUNC_END(initial_mmu)

#ifdef CONFIG_PIN_TLB
_GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h
ori r9, r9, (1f - PAGE_OFFSET)@l
Expand All @@ -704,6 +709,7 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_MD_CTR, r6
tlbia

#ifdef CONFIG_PIN_TLB_TEXT
LOAD_REG_IMMEDIATE(r5, 28 << 8)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
Expand All @@ -724,6 +730,7 @@ _GLOBAL(mmu_pin_tlb)
bdnzt lt, 2b
lis r0, MI_RSV4I@h
mtspr SPRN_MI_CTR, r0
#endif

LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
#ifdef CONFIG_PIN_TLB_DATA
Expand Down Expand Up @@ -783,3 +790,4 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
#endif
Loading

0 comments on commit 56e2adc

Please sign in to comment.