Initial commit
This commit is contained in:
commit
169c65d57e
51358 changed files with 23120455 additions and 0 deletions
9
arch/frv/mm/Makefile
Normal file
9
arch/frv/mm/Makefile
Normal file
|
@ -0,0 +1,9 @@
|
|||
#
|
||||
# Makefile for the arch-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := init.o kmap.o
|
||||
|
||||
obj-$(CONFIG_MMU) += \
|
||||
pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \
|
||||
mmu-context.o dma-alloc.o elf-fdpic.o
|
71
arch/frv/mm/cache-page.c
Normal file
71
arch/frv/mm/cache-page.c
Normal file
|
@ -0,0 +1,71 @@
|
|||
/* cache-page.c: whole-page cache wrangling functions for MMU linux
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* DCF takes a virtual address and the page may not currently have one
|
||||
* - temporarily hijack a kmap_atomic() slot and attach the page to it
|
||||
*/
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
unsigned long dampr2;
|
||||
void *vaddr;
|
||||
|
||||
dampr2 = __get_DAMPR(2);
|
||||
|
||||
vaddr = kmap_atomic_primary(page);
|
||||
|
||||
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
|
||||
|
||||
kunmap_atomic_primary(vaddr);
|
||||
|
||||
if (dampr2) {
|
||||
__set_DAMPR(2, dampr2);
|
||||
__set_IAMPR(2, dampr2);
|
||||
}
|
||||
|
||||
} /* end flush_dcache_page() */
|
||||
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* ICI takes a virtual address and the page may not currently have one
|
||||
* - so we temporarily attach the page to a bit of virtual space so that is can be flushed
|
||||
*/
|
||||
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long start, unsigned long len)
|
||||
{
|
||||
unsigned long dampr2;
|
||||
void *vaddr;
|
||||
|
||||
dampr2 = __get_DAMPR(2);
|
||||
|
||||
vaddr = kmap_atomic_primary(page);
|
||||
|
||||
start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
|
||||
frv_cache_wback_inv(start, start + len);
|
||||
|
||||
kunmap_atomic_primary(vaddr);
|
||||
|
||||
if (dampr2) {
|
||||
__set_DAMPR(2, dampr2);
|
||||
__set_IAMPR(2, dampr2);
|
||||
}
|
||||
|
||||
} /* end flush_icache_user_range() */
|
||||
|
||||
EXPORT_SYMBOL(flush_icache_user_range);
|
183
arch/frv/mm/dma-alloc.c
Normal file
183
arch/frv/mm/dma-alloc.c
Normal file
|
@ -0,0 +1,183 @@
|
|||
/* dma-alloc.c: consistent DMA memory allocation
|
||||
*
|
||||
* Derived from arch/ppc/mm/cachemap.c
|
||||
*
|
||||
* PowerPC version derived from arch/arm/mm/consistent.c
|
||||
* Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
|
||||
*
|
||||
* linux/arch/arm/mm/consistent.c
|
||||
*
|
||||
* Copyright (C) 2000 Russell King
|
||||
*
|
||||
* Consistent memory allocators. Used for DMA devices that want to
|
||||
* share uncached memory with the processor core. The function return
|
||||
* is the virtual address and 'dma_handle' is the physical address.
|
||||
* Mostly stolen from the ARM port, with some changes for PowerPC.
|
||||
* -- Dan
|
||||
* Modified for 36-bit support. -Matt
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
|
||||
{
|
||||
pgd_t *pge;
|
||||
pud_t *pue;
|
||||
pmd_t *pme;
|
||||
pte_t *pte;
|
||||
int err = -ENOMEM;
|
||||
|
||||
/* Use upper 10 bits of VA to index the first level map */
|
||||
pge = pgd_offset_k(va);
|
||||
pue = pud_offset(pge, va);
|
||||
pme = pmd_offset(pue, va);
|
||||
|
||||
/* Use middle 10 bits of VA to index the second-level map */
|
||||
pte = pte_alloc_kernel(pme, va);
|
||||
if (pte != 0) {
|
||||
err = 0;
|
||||
set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function will allocate the requested contiguous pages and
|
||||
* map them into the kernel's vmalloc() space. This is done so we
|
||||
* get unique mapping for these pages, outside of the kernel's 1:1
|
||||
* virtual:physical mapping. This is necessary so we can cover large
|
||||
* portions of the kernel with single large page TLB entries, and
|
||||
* still get unique uncached pages for consistent DMA.
|
||||
*/
|
||||
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long page, va, pa;
|
||||
void *ret;
|
||||
int order, err, i;
|
||||
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
|
||||
/* only allocate page size areas */
|
||||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
||||
page = __get_free_pages(gfp, order);
|
||||
if (!page) {
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* allocate some common virtual space to map the new pages */
|
||||
area = get_vm_area(size, VM_ALLOC);
|
||||
if (area == 0) {
|
||||
free_pages(page, order);
|
||||
return NULL;
|
||||
}
|
||||
va = VMALLOC_VMADDR(area->addr);
|
||||
ret = (void *) va;
|
||||
|
||||
/* this gives us the real physical address of the first page */
|
||||
*dma_handle = pa = virt_to_bus((void *) page);
|
||||
|
||||
/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
|
||||
* all pages that were allocated.
|
||||
*/
|
||||
if (order > 0) {
|
||||
struct page *rpage = virt_to_page(page);
|
||||
split_page(rpage, order);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
|
||||
err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);
|
||||
|
||||
if (err) {
|
||||
vfree((void *) va);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* we need to ensure that there are no cachelines in use, or worse dirty in this area
|
||||
* - can't do until after virtual address mappings are created
|
||||
*/
|
||||
frv_cache_invalidate(va, va + size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* free page(s) as defined by the above mapping.
|
||||
*/
|
||||
void consistent_free(void *vaddr)
|
||||
{
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
vfree(vaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* make an area consistent.
|
||||
*/
|
||||
void consistent_sync(void *vaddr, size_t size, int direction)
|
||||
{
|
||||
unsigned long start = (unsigned long) vaddr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
switch (direction) {
|
||||
case PCI_DMA_NONE:
|
||||
BUG();
|
||||
case PCI_DMA_FROMDEVICE: /* invalidate only */
|
||||
frv_cache_invalidate(start, end);
|
||||
break;
|
||||
case PCI_DMA_TODEVICE: /* writeback only */
|
||||
frv_dcache_writeback(start, end);
|
||||
break;
|
||||
case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
||||
frv_dcache_writeback(start, end);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* consistent_sync_page make a page are consistent. identical
|
||||
* to consistent_sync, but takes a struct page instead of a virtual address
|
||||
*/
|
||||
|
||||
void consistent_sync_page(struct page *page, unsigned long offset,
|
||||
size_t size, int direction)
|
||||
{
|
||||
void *start;
|
||||
|
||||
start = page_address(page) + offset;
|
||||
consistent_sync(start, size, direction);
|
||||
}
|
113
arch/frv/mm/elf-fdpic.c
Normal file
113
arch/frv/mm/elf-fdpic.c
Normal file
|
@ -0,0 +1,113 @@
|
|||
/* elf-fdpic.c: ELF FDPIC memory layout management
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/elf-fdpic.h>
|
||||
#include <asm/mman.h>
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* lay out the userspace VM according to our grand design
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params,
|
||||
struct elf_fdpic_params *interp_params,
|
||||
unsigned long *start_stack,
|
||||
unsigned long *start_brk)
|
||||
{
|
||||
*start_stack = 0x02200000UL;
|
||||
|
||||
/* if the only executable is a shared object, assume that it is an interpreter rather than
|
||||
* a true executable, and map it such that "ld.so --list" comes out right
|
||||
*/
|
||||
if (!(interp_params->flags & ELF_FDPIC_FLAG_PRESENT) &&
|
||||
exec_params->hdr.e_type != ET_EXEC
|
||||
) {
|
||||
exec_params->load_addr = PAGE_SIZE;
|
||||
|
||||
*start_brk = 0x80000000UL;
|
||||
}
|
||||
else {
|
||||
exec_params->load_addr = 0x02200000UL;
|
||||
|
||||
if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) ==
|
||||
ELF_FDPIC_FLAG_INDEPENDENT
|
||||
) {
|
||||
exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT;
|
||||
exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP;
|
||||
}
|
||||
}
|
||||
|
||||
} /* end elf_fdpic_arch_lay_out_mm() */
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* place non-fixed mmaps firstly in the bottom part of memory, working up, and then in the top part
|
||||
* of memory, working down
|
||||
*/
|
||||
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (len > TASK_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
/* handle MAP_FIXED */
|
||||
if (flags & MAP_FIXED)
|
||||
return addr;
|
||||
|
||||
/* only honour a hint if we're not going to clobber something doing so */
|
||||
if (addr) {
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
goto success;
|
||||
}
|
||||
|
||||
/* search between the bottom of user VM and the stack grow area */
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.high_limit = (current->mm->start_stack - 0x00200000);
|
||||
info.align_mask = 0;
|
||||
info.align_offset = 0;
|
||||
addr = vm_unmapped_area(&info);
|
||||
if (!(addr & ~PAGE_MASK))
|
||||
goto success;
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
|
||||
/* search from just above the WorkRAM area to the top of memory */
|
||||
info.low_limit = PAGE_ALIGN(0x80000000);
|
||||
info.high_limit = TASK_SIZE;
|
||||
addr = vm_unmapped_area(&info);
|
||||
if (!(addr & ~PAGE_MASK))
|
||||
goto success;
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
|
||||
#if 0
|
||||
printk("[area] l=%lx (ENOMEM) f='%s'\n",
|
||||
len, filp ? filp->f_path.dentry->d_name.name : "");
|
||||
#endif
|
||||
return -ENOMEM;
|
||||
|
||||
success:
|
||||
#if 0
|
||||
printk("[area] l=%lx ad=%lx f='%s'\n",
|
||||
len, addr, filp ? filp->f_path.dentry->d_name.name : "");
|
||||
#endif
|
||||
return addr;
|
||||
} /* end arch_get_unmapped_area() */
|
74
arch/frv/mm/extable.c
Normal file
74
arch/frv/mm/extable.c
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* linux/arch/frv/mm/extable.c
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
extern const struct exception_table_entry __attribute__((aligned(8))) __start___ex_table[];
|
||||
extern const struct exception_table_entry __attribute__((aligned(8))) __stop___ex_table[];
|
||||
extern const void __memset_end, __memset_user_error_lr, __memset_user_error_handler;
|
||||
extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
|
||||
extern spinlock_t modlist_lock;
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline unsigned long search_one_table(const struct exception_table_entry *first,
|
||||
const struct exception_table_entry *last,
|
||||
unsigned long value)
|
||||
{
|
||||
while (first <= last) {
|
||||
const struct exception_table_entry __attribute__((aligned(8))) *mid;
|
||||
long diff;
|
||||
|
||||
mid = (last - first) / 2 + first;
|
||||
diff = mid->insn - value;
|
||||
if (diff == 0)
|
||||
return mid->fixup;
|
||||
else if (diff < 0)
|
||||
first = mid + 1;
|
||||
else
|
||||
last = mid - 1;
|
||||
}
|
||||
return 0;
|
||||
} /* end search_one_table() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* see if there's a fixup handler available to deal with a kernel fault
|
||||
*/
|
||||
unsigned long search_exception_table(unsigned long pc)
|
||||
{
|
||||
const struct exception_table_entry *extab;
|
||||
|
||||
/* determine if the fault lay during a memcpy_user or a memset_user */
|
||||
if (__frame->lr == (unsigned long) &__memset_user_error_lr &&
|
||||
(unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
|
||||
) {
|
||||
/* the fault occurred in a protected memset
|
||||
* - we search for the return address (in LR) instead of the program counter
|
||||
* - it was probably during a clear_user()
|
||||
*/
|
||||
return (unsigned long) &__memset_user_error_handler;
|
||||
}
|
||||
|
||||
if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
|
||||
(unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
|
||||
) {
|
||||
/* the fault occurred in a protected memset
|
||||
* - we search for the return address (in LR) instead of the program counter
|
||||
* - it was probably during a copy_to/from_user()
|
||||
*/
|
||||
return (unsigned long) &__memcpy_user_error_handler;
|
||||
}
|
||||
|
||||
extab = search_exception_tables(pc);
|
||||
if (extab)
|
||||
return extab->fixup;
|
||||
|
||||
return 0;
|
||||
|
||||
} /* end search_exception_table() */
|
327
arch/frv/mm/fault.c
Normal file
327
arch/frv/mm/fault.c
Normal file
|
@ -0,0 +1,327 @@
|
|||
/*
|
||||
* linux/arch/frv/mm/fault.c
|
||||
*
|
||||
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
|
||||
* - Written by David Howells (dhowells@redhat.com)
|
||||
* - Derived from arch/m68knommu/mm/fault.c
|
||||
* - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
|
||||
* - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
|
||||
*
|
||||
* Based on:
|
||||
*
|
||||
* linux/arch/m68k/mm/fault.c
|
||||
*
|
||||
* Copyright (C) 1995 Hamish Macdonald
|
||||
*/
|
||||
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/gdb-stub.h>
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* This routine handles page faults. It determines the problem, and
|
||||
* then passes it off to one of the appropriate routines.
|
||||
*/
|
||||
asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
unsigned long _pme, lrai, lrad, fixup;
|
||||
unsigned long flags = 0;
|
||||
siginfo_t info;
|
||||
pgd_t *pge;
|
||||
pud_t *pue;
|
||||
pte_t *pte;
|
||||
int fault;
|
||||
|
||||
#if 0
|
||||
const char *atxc[16] = {
|
||||
[0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat",
|
||||
[0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot",
|
||||
};
|
||||
|
||||
printk("do_page_fault(%d,%lx [%s],%lx)\n",
|
||||
datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0);
|
||||
#endif
|
||||
|
||||
mm = current->mm;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may
|
||||
* be in an interrupt or a critical region, and should
|
||||
* only copy the information from the master page table,
|
||||
* nothing more.
|
||||
*
|
||||
* This verifies that the fault happens in kernel space
|
||||
* and that the fault was a page not present (invalid) error
|
||||
*/
|
||||
if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) {
|
||||
if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END)
|
||||
goto kernel_pte_fault;
|
||||
if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END)
|
||||
goto kernel_pte_fault;
|
||||
}
|
||||
|
||||
info.si_code = SEGV_MAPERR;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(__frame))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
vma = find_vma(mm, ear0);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
if (vma->vm_start <= ear0)
|
||||
goto good_area;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
|
||||
if (user_mode(__frame)) {
|
||||
/*
|
||||
* accessing the stack below %esp is always a bug.
|
||||
* The "+ 32" is there due to some instructions (like
|
||||
* pusha) doing post-decrement on the stack and that
|
||||
* doesn't show up until later..
|
||||
*/
|
||||
if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) {
|
||||
#if 0
|
||||
printk("[%d] ### Access below stack @%lx (sp=%lx)\n",
|
||||
current->pid, ear0, __frame->sp);
|
||||
show_registers(__frame);
|
||||
printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
||||
current->pid,
|
||||
__frame->pc,
|
||||
((u8*)__frame->pc)[0],
|
||||
((u8*)__frame->pc)[1],
|
||||
((u8*)__frame->pc)[2],
|
||||
((u8*)__frame->pc)[3],
|
||||
((u8*)__frame->pc)[4],
|
||||
((u8*)__frame->pc)[5],
|
||||
((u8*)__frame->pc)[6],
|
||||
((u8*)__frame->pc)[7]
|
||||
);
|
||||
#endif
|
||||
goto bad_area;
|
||||
}
|
||||
}
|
||||
|
||||
if (expand_stack(vma, ear0))
|
||||
goto bad_area;
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
info.si_code = SEGV_ACCERR;
|
||||
switch (esr0 & ESR0_ATXC) {
|
||||
default:
|
||||
/* handle write to write protected page */
|
||||
case ESR0_ATXC_WP_EXCEP:
|
||||
#ifdef TEST_VERIFY_AREA
|
||||
if (!(user_mode(__frame)))
|
||||
printk("WP fault at %08lx\n", __frame->pc);
|
||||
#endif
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
break;
|
||||
|
||||
/* handle read from protected page */
|
||||
case ESR0_ATXC_PRIV_EXCEP:
|
||||
goto bad_area;
|
||||
|
||||
/* handle read, write or exec on absent page
|
||||
* - can't support write without permitting read
|
||||
* - don't support execute without permitting read and vice-versa
|
||||
*/
|
||||
case ESR0_ATXC_AMRTLB_MISS:
|
||||
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
goto bad_area;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, ear0, flags);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
else if (fault & VM_FAULT_SIGBUS)
|
||||
goto do_sigbus;
|
||||
BUG();
|
||||
}
|
||||
if (fault & VM_FAULT_MAJOR)
|
||||
current->maj_flt++;
|
||||
else
|
||||
current->min_flt++;
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
if (user_mode(__frame)) {
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
/* info.si_code has been set above */
|
||||
info.si_addr = (void *) ear0;
|
||||
force_sig_info(SIGSEGV, &info, current);
|
||||
return;
|
||||
}
|
||||
|
||||
no_context:
|
||||
/* are we prepared to handle this kernel fault? */
|
||||
if ((fixup = search_exception_table(__frame->pc)) != 0) {
|
||||
__frame->pc = fixup;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
|
||||
bust_spinlocks(1);
|
||||
|
||||
if (ear0 < PAGE_SIZE)
|
||||
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
|
||||
else
|
||||
printk(KERN_ALERT "Unable to handle kernel paging request");
|
||||
printk(" at virtual addr %08lx\n", ear0);
|
||||
printk(" PC : %08lx\n", __frame->pc);
|
||||
printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0);
|
||||
|
||||
asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0));
|
||||
asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0));
|
||||
|
||||
printk(KERN_ALERT " LRAI: %08lx\n", lrai);
|
||||
printk(KERN_ALERT " LRAD: %08lx\n", lrad);
|
||||
|
||||
__break_hijack_kernel_event();
|
||||
|
||||
pge = pgd_offset(current->mm, ear0);
|
||||
pue = pud_offset(pge, ear0);
|
||||
_pme = pue->pue[0].ste[0];
|
||||
|
||||
printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme);
|
||||
|
||||
if (_pme & xAMPRx_V) {
|
||||
unsigned long dampr, damlr, val;
|
||||
|
||||
asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1"
|
||||
: "=&r"(dampr), "=r"(damlr)
|
||||
: "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V)
|
||||
);
|
||||
|
||||
pte = (pte_t *) damlr + __pte_index(ear0);
|
||||
val = pte_val(*pte);
|
||||
|
||||
asm volatile("movgs %0,dampr2" :: "r" (dampr));
|
||||
|
||||
printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val);
|
||||
}
|
||||
|
||||
die_if_kernel("Oops\n");
|
||||
do_exit(SIGKILL);
|
||||
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened to us that made
|
||||
* us unable to handle the page fault gracefully.
|
||||
*/
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
if (!user_mode(__frame))
|
||||
goto no_context;
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRERR;
|
||||
info.si_addr = (void *) ear0;
|
||||
force_sig_info(SIGBUS, &info, current);
|
||||
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(__frame))
|
||||
goto no_context;
|
||||
return;
|
||||
|
||||
/*
|
||||
* The fault was caused by a kernel PTE (such as installed by vmalloc or kmap)
|
||||
*/
|
||||
kernel_pte_fault:
|
||||
{
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
*
|
||||
* Do _not_ use "tsk" here. We might be inside
|
||||
* an interrupt in the middle of a task switch..
|
||||
*/
|
||||
int index = pgd_index(ear0);
|
||||
pgd_t *pgd, *pgd_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
pte_t *pte_k;
|
||||
|
||||
pgd = (pgd_t *) __get_TTBR();
|
||||
pgd = (pgd_t *)__va(pgd) + index;
|
||||
pgd_k = ((pgd_t *)(init_mm.pgd)) + index;
|
||||
|
||||
if (!pgd_present(*pgd_k))
|
||||
goto no_context;
|
||||
//set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line
|
||||
|
||||
pud_k = pud_offset(pgd_k, ear0);
|
||||
if (!pud_present(*pud_k))
|
||||
goto no_context;
|
||||
|
||||
pmd_k = pmd_offset(pud_k, ear0);
|
||||
if (!pmd_present(*pmd_k))
|
||||
goto no_context;
|
||||
|
||||
pud = pud_offset(pgd, ear0);
|
||||
pmd = pmd_offset(pud, ear0);
|
||||
set_pmd(pmd, *pmd_k);
|
||||
|
||||
pte_k = pte_offset_kernel(pmd_k, ear0);
|
||||
if (!pte_present(*pte_k))
|
||||
goto no_context;
|
||||
return;
|
||||
}
|
||||
} /* end do_page_fault() */
|
89
arch/frv/mm/highmem.c
Normal file
89
arch/frv/mm/highmem.c
Normal file
|
@ -0,0 +1,89 @@
|
|||
/* highmem.c: arch-specific highmem stuff
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
return kmap_high(page);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kunmap);
|
||||
|
||||
struct page *kmap_atomic_to_page(void *ptr)
|
||||
{
|
||||
return virt_to_page(ptr);
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
unsigned long paddr;
|
||||
int type;
|
||||
|
||||
pagefault_disable();
|
||||
type = kmap_atomic_idx_push();
|
||||
paddr = page_to_phys(page);
|
||||
|
||||
switch (type) {
|
||||
/*
|
||||
* The first 4 primary maps are reserved for architecture code
|
||||
*/
|
||||
case 0: return __kmap_atomic_primary(0, paddr, 6);
|
||||
case 1: return __kmap_atomic_primary(0, paddr, 7);
|
||||
case 2: return __kmap_atomic_primary(0, paddr, 8);
|
||||
case 3: return __kmap_atomic_primary(0, paddr, 9);
|
||||
case 4: return __kmap_atomic_primary(0, paddr, 10);
|
||||
|
||||
case 5 ... 5 + NR_TLB_LINES - 1:
|
||||
return __kmap_atomic_secondary(type - 5, paddr);
|
||||
|
||||
default:
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
int type = kmap_atomic_idx();
|
||||
switch (type) {
|
||||
case 0: __kunmap_atomic_primary(0, 6); break;
|
||||
case 1: __kunmap_atomic_primary(0, 7); break;
|
||||
case 2: __kunmap_atomic_primary(0, 8); break;
|
||||
case 3: __kunmap_atomic_primary(0, 9); break;
|
||||
case 4: __kunmap_atomic_primary(0, 10); break;
|
||||
|
||||
case 5 ... 5 + NR_TLB_LINES - 1:
|
||||
__kunmap_atomic_secondary(type - 5, kvaddr);
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
178
arch/frv/mm/init.c
Normal file
178
arch/frv/mm/init.c
Normal file
|
@ -0,0 +1,178 @@
|
|||
/* init.c: memory initialisation for FRV
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Derived from:
|
||||
* - linux/arch/m68knommu/mm/init.c
|
||||
* - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, Kenneth Albanowski <kjahds@kjahds.com>,
|
||||
* - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
|
||||
* - linux/arch/m68k/mm/init.c
|
||||
* - Copyright (C) 1995 Hamish Macdonald
|
||||
*/
|
||||
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/virtconvert.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
/*
|
||||
* BAD_PAGE is the page that is used for page faults when linux
|
||||
* is out-of-memory. Older versions of linux just did a
|
||||
* do_exit(), but using this instead means there is less risk
|
||||
* for a process dying in kernel mode, possibly leaving a inode
|
||||
* unused etc..
|
||||
*
|
||||
* BAD_PAGETABLE is the accompanying page-table: it is initialized
|
||||
* to point to BAD_PAGE entries.
|
||||
*
|
||||
* ZERO_PAGE is a special page that is used for zero-initialized
|
||||
* data and COW.
|
||||
*/
|
||||
static unsigned long empty_bad_page_table;
|
||||
static unsigned long empty_bad_page;
|
||||
|
||||
unsigned long empty_zero_page;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* paging_init() continues the virtual memory environment setup which
|
||||
* was begun by the code in arch/head.S.
|
||||
* The parameters are pointers to where to stick the starting and ending
|
||||
* addresses of available kernel virtual memory.
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
||||
|
||||
/* allocate some pages for kernel housekeeping tasks */
|
||||
empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
|
||||
empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
|
||||
empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
|
||||
|
||||
memset((void *) empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (num_physpages - num_mappedpages) {
|
||||
pgd_t *pge;
|
||||
pud_t *pue;
|
||||
pmd_t *pme;
|
||||
|
||||
pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);
|
||||
|
||||
pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
|
||||
pue = pud_offset(pge, PKMAP_BASE);
|
||||
pme = pmd_offset(pue, PKMAP_BASE);
|
||||
__set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* distribute the allocatable pages across the various zones and pass them to the allocator
|
||||
*/
|
||||
zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
|
||||
#endif
|
||||
|
||||
free_area_init(zones_size);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/* initialise init's MMU context */
|
||||
init_new_context(&init_task, &init_mm);
|
||||
#endif
|
||||
|
||||
} /* end paging_init() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
*
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT;
|
||||
unsigned long tmp;
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long loop, pfn;
|
||||
int datapages = 0;
|
||||
#endif
|
||||
int codek = 0, datak = 0;
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
for (loop = 0 ; loop < npages ; loop++)
|
||||
if (PageReserved(&mem_map[loop]))
|
||||
datapages++;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--)
|
||||
free_highmem_page(&mem_map[pfn]);
|
||||
#endif
|
||||
|
||||
codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
|
||||
datak = datapages << (PAGE_SHIFT - 10);
|
||||
|
||||
#else
|
||||
codek = (_etext - _stext) >> 10;
|
||||
datak = 0; //(__bss_stop - _sdata) >> 10;
|
||||
#endif
|
||||
|
||||
tmp = nr_free_pages() << PAGE_SHIFT;
|
||||
printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n",
|
||||
tmp >> 10,
|
||||
npages << (PAGE_SHIFT - 10),
|
||||
(rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
|
||||
rom_length >> 10,
|
||||
codek,
|
||||
datak
|
||||
);
|
||||
|
||||
} /* end mem_init() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* free the memory that was only required for initialisation
|
||||
*/
|
||||
void free_initmem(void)
|
||||
{
|
||||
#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
|
||||
free_initmem_default(0);
|
||||
#endif
|
||||
} /* end free_initmem() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* free the initial ramdisk memory
|
||||
*/
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
} /* end free_initrd_mem() */
|
||||
#endif
|
51
arch/frv/mm/kmap.c
Normal file
51
arch/frv/mm/kmap.c
Normal file
|
@ -0,0 +1,51 @@
|
|||
/* kmap.c: ioremapping handlers
|
||||
*
|
||||
* Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
* - Derived from arch/m68k/mm/kmap.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* Map some physical address range into the kernel address space.
|
||||
*/
|
||||
|
||||
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
|
||||
{
|
||||
return (void __iomem *)physaddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap a ioremap()ed region again
|
||||
*/
|
||||
void iounmap(void volatile __iomem *addr)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Set new cache mode for some kernel address space.
|
||||
* The caller must push data for that range itself, if such data may already
|
||||
* be in the cache.
|
||||
*/
|
||||
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
|
||||
{
|
||||
}
|
208
arch/frv/mm/mmu-context.c
Normal file
208
arch/frv/mm/mmu-context.c
Normal file
|
@ -0,0 +1,208 @@
|
|||
/* mmu-context.c: MMU context allocation and management
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#define NR_CXN 4096
|
||||
|
||||
static unsigned long cxn_bitmap[NR_CXN / (sizeof(unsigned long) * 8)];
|
||||
static LIST_HEAD(cxn_owners_lru);
|
||||
static DEFINE_SPINLOCK(cxn_owners_lock);
|
||||
|
||||
int __nongpreldata cxn_pinned = -1;
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* initialise a new context
|
||||
*/
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
memset(&mm->context, 0, sizeof(mm->context));
|
||||
INIT_LIST_HEAD(&mm->context.id_link);
|
||||
mm->context.itlb_cached_pge = 0xffffffffUL;
|
||||
mm->context.dtlb_cached_pge = 0xffffffffUL;
|
||||
|
||||
return 0;
|
||||
} /* end init_new_context() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* make sure a kernel MMU context has a CPU context number
|
||||
* - call with cxn_owners_lock held
|
||||
*/
|
||||
static unsigned get_cxn(mm_context_t *ctx)
|
||||
{
|
||||
struct list_head *_p;
|
||||
mm_context_t *p;
|
||||
unsigned cxn;
|
||||
|
||||
if (!list_empty(&ctx->id_link)) {
|
||||
list_move_tail(&ctx->id_link, &cxn_owners_lru);
|
||||
}
|
||||
else {
|
||||
/* find the first unallocated context number
|
||||
* - 0 is reserved for the kernel
|
||||
*/
|
||||
cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1);
|
||||
if (cxn < NR_CXN) {
|
||||
set_bit(cxn, cxn_bitmap);
|
||||
}
|
||||
else {
|
||||
/* none remaining - need to steal someone else's cxn */
|
||||
p = NULL;
|
||||
list_for_each(_p, &cxn_owners_lru) {
|
||||
p = list_entry(_p, mm_context_t, id_link);
|
||||
if (!p->id_busy && p->id != cxn_pinned)
|
||||
break;
|
||||
}
|
||||
|
||||
BUG_ON(_p == &cxn_owners_lru);
|
||||
|
||||
cxn = p->id;
|
||||
p->id = 0;
|
||||
list_del_init(&p->id_link);
|
||||
__flush_tlb_mm(cxn);
|
||||
}
|
||||
|
||||
ctx->id = cxn;
|
||||
list_add_tail(&ctx->id_link, &cxn_owners_lru);
|
||||
}
|
||||
|
||||
return ctx->id;
|
||||
} /* end get_cxn() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* restore the current TLB miss handler mapped page tables into the MMU context and set up a
|
||||
* mapping for the page directory
|
||||
*/
|
||||
void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *pgd)
|
||||
{
|
||||
unsigned long _pgd;
|
||||
|
||||
_pgd = virt_to_phys(pgd);
|
||||
|
||||
/* save the state of the outgoing MMU context */
|
||||
old->id_busy = 0;
|
||||
|
||||
asm volatile("movsg scr0,%0" : "=r"(old->itlb_cached_pge));
|
||||
asm volatile("movsg dampr4,%0" : "=r"(old->itlb_ptd_mapping));
|
||||
asm volatile("movsg scr1,%0" : "=r"(old->dtlb_cached_pge));
|
||||
asm volatile("movsg dampr5,%0" : "=r"(old->dtlb_ptd_mapping));
|
||||
|
||||
/* select an MMU context number */
|
||||
spin_lock(&cxn_owners_lock);
|
||||
get_cxn(ctx);
|
||||
ctx->id_busy = 1;
|
||||
spin_unlock(&cxn_owners_lock);
|
||||
|
||||
asm volatile("movgs %0,cxnr" : : "r"(ctx->id));
|
||||
|
||||
/* restore the state of the incoming MMU context */
|
||||
asm volatile("movgs %0,scr0" : : "r"(ctx->itlb_cached_pge));
|
||||
asm volatile("movgs %0,dampr4" : : "r"(ctx->itlb_ptd_mapping));
|
||||
asm volatile("movgs %0,scr1" : : "r"(ctx->dtlb_cached_pge));
|
||||
asm volatile("movgs %0,dampr5" : : "r"(ctx->dtlb_ptd_mapping));
|
||||
|
||||
/* map the PGD into uncached virtual memory */
|
||||
asm volatile("movgs %0,ttbr" : : "r"(_pgd));
|
||||
asm volatile("movgs %0,dampr3"
|
||||
:: "r"(_pgd | xAMPRx_L | xAMPRx_M | xAMPRx_SS_16Kb |
|
||||
xAMPRx_S | xAMPRx_C | xAMPRx_V));
|
||||
|
||||
} /* end change_mm_context() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* finished with an MMU context number
|
||||
*/
|
||||
void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
mm_context_t *ctx = &mm->context;
|
||||
|
||||
spin_lock(&cxn_owners_lock);
|
||||
|
||||
if (!list_empty(&ctx->id_link)) {
|
||||
if (ctx->id == cxn_pinned)
|
||||
cxn_pinned = -1;
|
||||
|
||||
list_del_init(&ctx->id_link);
|
||||
clear_bit(ctx->id, cxn_bitmap);
|
||||
__flush_tlb_mm(ctx->id);
|
||||
ctx->id = 0;
|
||||
}
|
||||
|
||||
spin_unlock(&cxn_owners_lock);
|
||||
} /* end destroy_context() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* display the MMU context currently a process is currently using
|
||||
*/
|
||||
#ifdef CONFIG_PROC_FS
|
||||
char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer)
|
||||
{
|
||||
spin_lock(&cxn_owners_lock);
|
||||
buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id);
|
||||
spin_unlock(&cxn_owners_lock);
|
||||
|
||||
return buffer;
|
||||
} /* end proc_pid_status_frv_cxnr() */
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* (un)pin a process's mm_struct's MMU context ID
|
||||
*/
|
||||
int cxn_pin_by_pid(pid_t pid)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm = NULL;
|
||||
int ret;
|
||||
|
||||
/* unpin if pid is zero */
|
||||
if (pid == 0) {
|
||||
cxn_pinned = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = -ESRCH;
|
||||
|
||||
/* get a handle on the mm_struct */
|
||||
read_lock(&tasklist_lock);
|
||||
tsk = find_task_by_vpid(pid);
|
||||
if (tsk) {
|
||||
ret = -EINVAL;
|
||||
|
||||
task_lock(tsk);
|
||||
if (tsk->mm) {
|
||||
mm = tsk->mm;
|
||||
atomic_inc(&mm->mm_users);
|
||||
ret = 0;
|
||||
}
|
||||
task_unlock(tsk);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* make sure it has a CXN and pin it */
|
||||
spin_lock(&cxn_owners_lock);
|
||||
cxn_pinned = get_cxn(&mm->context);
|
||||
spin_unlock(&cxn_owners_lock);
|
||||
|
||||
mmput(mm);
|
||||
return 0;
|
||||
} /* end cxn_pin_by_pid() */
|
153
arch/frv/mm/pgalloc.c
Normal file
153
arch/frv/mm/pgalloc.c
Normal file
|
@ -0,0 +1,153 @@
|
|||
/* pgalloc.c: page directory & page table allocation
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/quicklist.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
|
||||
|
||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
||||
if (pte)
|
||||
clear_page(pte);
|
||||
return pte;
|
||||
}
|
||||
|
||||
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
#ifdef CONFIG_HIGHPTE
|
||||
page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
|
||||
#else
|
||||
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
|
||||
#endif
|
||||
if (page) {
|
||||
clear_highpage(page);
|
||||
pgtable_page_ctor(page);
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
|
||||
{
|
||||
unsigned long *__ste_p = pmdptr->ste;
|
||||
int loop;
|
||||
|
||||
if (!pmd) {
|
||||
memset(__ste_p, 0, PME_SIZE);
|
||||
}
|
||||
else {
|
||||
BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe));
|
||||
|
||||
for (loop = PME_SIZE; loop > 0; loop -= 4) {
|
||||
*__ste_p++ = pmd;
|
||||
pmd += __frv_PT_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* List of all pgd's needed for non-PAE so it can invalidate entries
|
||||
* in both cached and uncached pgd's; not needed for PAE since the
|
||||
* kernel pmd is shared. If PAE were not to share the pmd a similar
|
||||
* tactic would be needed. This is essentially codepath-based locking
|
||||
* against pageattr.c; it is the unique case in which a valid change
|
||||
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
|
||||
* vmalloc faults work because attached pagetables are never freed.
|
||||
* If the locking proves to be non-performant, a ticketing scheme with
|
||||
* checks at dup_mmap(), exec(), and other mmlist addition points
|
||||
* could be used. The locking scheme was chosen on the basis of
|
||||
* manfred's recommendations and having no core impact whatsoever.
|
||||
* -- nyc
|
||||
*/
|
||||
DEFINE_SPINLOCK(pgd_lock);
|
||||
struct page *pgd_list;
|
||||
|
||||
static inline void pgd_list_add(pgd_t *pgd)
|
||||
{
|
||||
struct page *page = virt_to_page(pgd);
|
||||
page->index = (unsigned long) pgd_list;
|
||||
if (pgd_list)
|
||||
set_page_private(pgd_list, (unsigned long) &page->index);
|
||||
pgd_list = page;
|
||||
set_page_private(page, (unsigned long)&pgd_list);
|
||||
}
|
||||
|
||||
static inline void pgd_list_del(pgd_t *pgd)
|
||||
{
|
||||
struct page *next, **pprev, *page = virt_to_page(pgd);
|
||||
next = (struct page *) page->index;
|
||||
pprev = (struct page **) page_private(page);
|
||||
*pprev = next;
|
||||
if (next)
|
||||
set_page_private(next, (unsigned long) pprev);
|
||||
}
|
||||
|
||||
void pgd_ctor(void *pgd)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (PTRS_PER_PMD == 1)
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
|
||||
memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4,
|
||||
swapper_pg_dir + USER_PGDS_IN_LAST_PML4,
|
||||
(PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t));
|
||||
|
||||
if (PTRS_PER_PMD > 1)
|
||||
return;
|
||||
|
||||
pgd_list_add(pgd);
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
/* never called when PTRS_PER_PMD > 1 */
|
||||
void pgd_dtor(void *pgd)
|
||||
{
|
||||
unsigned long flags; /* can be called from interrupt context */
|
||||
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
pgd_list_del(pgd);
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
}
|
||||
|
||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
|
||||
}
|
||||
|
||||
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
/* in the non-PAE case, clear_page_tables() clears user pgd entries */
|
||||
quicklist_free(0, pgd_dtor, pgd);
|
||||
}
|
||||
|
||||
void __init pgtable_cache_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
void check_pgt_cache(void)
|
||||
{
|
||||
quicklist_trim(0, pgd_dtor, 25, 16);
|
||||
}
|
||||
|
184
arch/frv/mm/tlb-flush.S
Normal file
184
arch/frv/mm/tlb-flush.S
Normal file
|
@ -0,0 +1,184 @@
|
|||
/* tlb-flush.S: TLB flushing routines
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sys.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/spr-regs.h>
|
||||
|
||||
.macro DEBUG ch
|
||||
# sethi.p %hi(0xfeff9c00),gr4
|
||||
# setlo %lo(0xfeff9c00),gr4
|
||||
# setlos #\ch,gr5
|
||||
# stbi gr5,@(gr4,#0)
|
||||
# membar
|
||||
.endm
|
||||
|
||||
.section .rodata
|
||||
|
||||
# sizes corresponding to TPXR.LMAX
|
||||
.balign 1
|
||||
__tlb_lmax_sizes:
|
||||
.byte 0, 64, 0, 0
|
||||
.byte 0, 0, 0, 0
|
||||
.byte 0, 0, 0, 0
|
||||
.byte 0, 0, 0, 0
|
||||
|
||||
.section .text
|
||||
.balign 4
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# flush everything
|
||||
# - void __flush_tlb_all(void)
|
||||
#
|
||||
###############################################################################
|
||||
.globl __flush_tlb_all
|
||||
.type __flush_tlb_all,@function
|
||||
__flush_tlb_all:
|
||||
DEBUG 'A'
|
||||
|
||||
# kill cached PGE value
|
||||
setlos #0xffffffff,gr4
|
||||
movgs gr4,scr0
|
||||
movgs gr4,scr1
|
||||
|
||||
# kill AMPR-cached TLB values
|
||||
movgs gr0,iamlr1
|
||||
movgs gr0,iampr1
|
||||
movgs gr0,damlr1
|
||||
movgs gr0,dampr1
|
||||
|
||||
# find out how many lines there are
|
||||
movsg tpxr,gr5
|
||||
sethi.p %hi(__tlb_lmax_sizes),gr4
|
||||
srli gr5,#TPXR_LMAX_SHIFT,gr5
|
||||
setlo.p %lo(__tlb_lmax_sizes),gr4
|
||||
andi gr5,#TPXR_LMAX_SMASK,gr5
|
||||
ldub @(gr4,gr5),gr4
|
||||
|
||||
# now, we assume that the TLB line step is page size in size
|
||||
setlos.p #PAGE_SIZE,gr5
|
||||
setlos #0,gr6
|
||||
1:
|
||||
tlbpr gr6,gr0,#6,#0
|
||||
subicc.p gr4,#1,gr4,icc0
|
||||
add gr6,gr5,gr6
|
||||
bne icc0,#2,1b
|
||||
|
||||
DEBUG 'B'
|
||||
bralr
|
||||
|
||||
.size __flush_tlb_all, .-__flush_tlb_all
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# flush everything to do with one context
|
||||
# - void __flush_tlb_mm(unsigned long contextid [GR8])
|
||||
#
|
||||
###############################################################################
|
||||
.globl __flush_tlb_mm
|
||||
.type __flush_tlb_mm,@function
|
||||
__flush_tlb_mm:
|
||||
DEBUG 'M'
|
||||
|
||||
# kill cached PGE value
|
||||
setlos #0xffffffff,gr4
|
||||
movgs gr4,scr0
|
||||
movgs gr4,scr1
|
||||
|
||||
# specify the context we want to flush
|
||||
movgs gr8,tplr
|
||||
|
||||
# find out how many lines there are
|
||||
movsg tpxr,gr5
|
||||
sethi.p %hi(__tlb_lmax_sizes),gr4
|
||||
srli gr5,#TPXR_LMAX_SHIFT,gr5
|
||||
setlo.p %lo(__tlb_lmax_sizes),gr4
|
||||
andi gr5,#TPXR_LMAX_SMASK,gr5
|
||||
ldub @(gr4,gr5),gr4
|
||||
|
||||
# now, we assume that the TLB line step is page size in size
|
||||
setlos.p #PAGE_SIZE,gr5
|
||||
setlos #0,gr6
|
||||
0:
|
||||
tlbpr gr6,gr0,#5,#0
|
||||
subicc.p gr4,#1,gr4,icc0
|
||||
add gr6,gr5,gr6
|
||||
bne icc0,#2,0b
|
||||
|
||||
DEBUG 'N'
|
||||
bralr
|
||||
|
||||
.size __flush_tlb_mm, .-__flush_tlb_mm
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# flush a range of addresses from the TLB
|
||||
# - void __flush_tlb_page(unsigned long contextid [GR8],
|
||||
# unsigned long start [GR9])
|
||||
#
|
||||
###############################################################################
|
||||
.globl __flush_tlb_page
|
||||
.type __flush_tlb_page,@function
|
||||
__flush_tlb_page:
|
||||
# kill cached PGE value
|
||||
setlos #0xffffffff,gr4
|
||||
movgs gr4,scr0
|
||||
movgs gr4,scr1
|
||||
|
||||
# specify the context we want to flush
|
||||
movgs gr8,tplr
|
||||
|
||||
# zap the matching TLB line and AMR values
|
||||
setlos #~(PAGE_SIZE-1),gr5
|
||||
and gr9,gr5,gr9
|
||||
tlbpr gr9,gr0,#5,#0
|
||||
|
||||
bralr
|
||||
|
||||
.size __flush_tlb_page, .-__flush_tlb_page
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# flush a range of addresses from the TLB
|
||||
# - void __flush_tlb_range(unsigned long contextid [GR8],
|
||||
# unsigned long start [GR9],
|
||||
# unsigned long end [GR10])
|
||||
#
|
||||
###############################################################################
|
||||
.globl __flush_tlb_range
|
||||
.type __flush_tlb_range,@function
|
||||
__flush_tlb_range:
|
||||
# kill cached PGE value
|
||||
setlos #0xffffffff,gr4
|
||||
movgs gr4,scr0
|
||||
movgs gr4,scr1
|
||||
|
||||
# specify the context we want to flush
|
||||
movgs gr8,tplr
|
||||
|
||||
# round the start down to beginning of TLB line and end up to beginning of next TLB line
|
||||
setlos.p #~(PAGE_SIZE-1),gr5
|
||||
setlos #PAGE_SIZE,gr6
|
||||
subi.p gr10,#1,gr10
|
||||
and gr9,gr5,gr9
|
||||
and gr10,gr5,gr10
|
||||
2:
|
||||
tlbpr gr9,gr0,#5,#0
|
||||
subcc.p gr9,gr10,gr0,icc0
|
||||
add gr9,gr6,gr9
|
||||
bne icc0,#0,2b ; most likely a 1-page flush
|
||||
|
||||
bralr
|
||||
|
||||
.size __flush_tlb_range, .-__flush_tlb_range
|
629
arch/frv/mm/tlb-miss.S
Normal file
629
arch/frv/mm/tlb-miss.S
Normal file
|
@ -0,0 +1,629 @@
|
|||
/* tlb-miss.S: TLB miss handlers
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sys.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/spr-regs.h>
|
||||
|
||||
.section .text..tlbmiss
|
||||
.balign 4
|
||||
|
||||
.globl __entry_insn_mmu_miss
|
||||
__entry_insn_mmu_miss:
|
||||
break
|
||||
nop
|
||||
|
||||
.globl __entry_insn_mmu_exception
|
||||
__entry_insn_mmu_exception:
|
||||
break
|
||||
nop
|
||||
|
||||
.globl __entry_data_mmu_miss
|
||||
__entry_data_mmu_miss:
|
||||
break
|
||||
nop
|
||||
|
||||
.globl __entry_data_mmu_exception
|
||||
__entry_data_mmu_exception:
|
||||
break
|
||||
nop
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# handle a lookup failure of one sort or another in a kernel TLB handler
|
||||
# On entry:
|
||||
# GR29 - faulting address
|
||||
# SCR2 - saved CCR
|
||||
#
|
||||
###############################################################################
|
||||
.type __tlb_kernel_fault,@function
|
||||
__tlb_kernel_fault:
|
||||
# see if we're supposed to re-enable single-step mode upon return
|
||||
sethi.p %hi(__break_tlb_miss_return_break),gr30
|
||||
setlo %lo(__break_tlb_miss_return_break),gr30
|
||||
movsg pcsr,gr31
|
||||
|
||||
subcc gr31,gr30,gr0,icc0
|
||||
beq icc0,#0,__tlb_kernel_fault_sstep
|
||||
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
movgs gr29,scr2 /* save EAR0 value */
|
||||
sethi.p %hi(__kernel_current_task),gr29
|
||||
setlo %lo(__kernel_current_task),gr29
|
||||
ldi.p @(gr29,#0),gr29 /* restore GR29 */
|
||||
|
||||
bra __entry_kernel_handle_mmu_fault
|
||||
|
||||
# we've got to re-enable single-stepping
|
||||
__tlb_kernel_fault_sstep:
|
||||
sethi.p %hi(__break_tlb_miss_real_return_info),gr30
|
||||
setlo %lo(__break_tlb_miss_real_return_info),gr30
|
||||
lddi @(gr30,0),gr30
|
||||
movgs gr30,pcsr
|
||||
movgs gr31,psr
|
||||
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
movgs gr29,scr2 /* save EAR0 value */
|
||||
sethi.p %hi(__kernel_current_task),gr29
|
||||
setlo %lo(__kernel_current_task),gr29
|
||||
ldi.p @(gr29,#0),gr29 /* restore GR29 */
|
||||
bra __entry_kernel_handle_mmu_fault_sstep
|
||||
|
||||
.size __tlb_kernel_fault, .-__tlb_kernel_fault
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# handle a lookup failure of one sort or another in a user TLB handler
|
||||
# On entry:
|
||||
# GR28 - faulting address
|
||||
# SCR2 - saved CCR
|
||||
#
|
||||
###############################################################################
|
||||
.type __tlb_user_fault,@function
|
||||
__tlb_user_fault:
|
||||
# see if we're supposed to re-enable single-step mode upon return
|
||||
sethi.p %hi(__break_tlb_miss_return_break),gr30
|
||||
setlo %lo(__break_tlb_miss_return_break),gr30
|
||||
movsg pcsr,gr31
|
||||
subcc gr31,gr30,gr0,icc0
|
||||
beq icc0,#0,__tlb_user_fault_sstep
|
||||
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
bra __entry_uspace_handle_mmu_fault
|
||||
|
||||
# we've got to re-enable single-stepping
|
||||
__tlb_user_fault_sstep:
|
||||
sethi.p %hi(__break_tlb_miss_real_return_info),gr30
|
||||
setlo %lo(__break_tlb_miss_real_return_info),gr30
|
||||
lddi @(gr30,0),gr30
|
||||
movgs gr30,pcsr
|
||||
movgs gr31,psr
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
bra __entry_uspace_handle_mmu_fault_sstep
|
||||
|
||||
.size __tlb_user_fault, .-__tlb_user_fault
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Kernel instruction TLB miss handler
|
||||
# On entry:
|
||||
# GR1 - kernel stack pointer
|
||||
# GR28 - saved exception frame pointer
|
||||
# GR29 - faulting address
|
||||
# GR31 - EAR0 ^ SCR0
|
||||
# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
|
||||
# DAMR3 - mapped page directory
|
||||
# DAMR4 - mapped page table as matched by SCR0
|
||||
#
|
||||
###############################################################################
|
||||
.globl __entry_kernel_insn_tlb_miss
|
||||
.type __entry_kernel_insn_tlb_miss,@function
|
||||
__entry_kernel_insn_tlb_miss:
|
||||
#if 0
|
||||
sethi.p %hi(0xe1200004),gr30
|
||||
setlo %lo(0xe1200004),gr30
|
||||
st gr0,@(gr30,gr0)
|
||||
sethi.p %hi(0xffc00100),gr30
|
||||
setlo %lo(0xffc00100),gr30
|
||||
sth gr30,@(gr30,gr0)
|
||||
membar
|
||||
#endif
|
||||
|
||||
movsg ccr,gr30 /* save CCR */
|
||||
movgs gr30,scr2
|
||||
|
||||
# see if the cached page table mapping is appropriate
|
||||
srlicc.p gr31,#26,gr0,icc0
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bne icc0,#0,__itlb_k_PTD_miss
|
||||
|
||||
__itlb_k_PTD_mapped:
|
||||
# access the PTD with EAR0[25:14]
|
||||
# - DAMLR4 points to the virtual address of the appropriate page table
|
||||
# - the PTD holds 4096 PTEs
|
||||
# - the PTD must be accessed uncached
|
||||
# - the PTE must be marked accessed if it was valid
|
||||
#
|
||||
and gr31,gr30,gr31
|
||||
movsg damlr4,gr30
|
||||
add gr30,gr31,gr31
|
||||
ldi @(gr31,#0),gr30 /* fetch the PTE */
|
||||
andicc gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
ori.p gr30,#_PAGE_ACCESSED,gr30
|
||||
beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
|
||||
sti.p gr30,@(gr31,#0) /* update the PTE */
|
||||
andi gr30,#~_PAGE_ACCESSED,gr30
|
||||
|
||||
# we're using IAMR1 as an extra TLB entry
|
||||
# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
|
||||
# - need to check DAMR1 lest we cause an multiple-DAT-hit exception
|
||||
# - IAMPR1 has no WP bit, and we mustn't lose WP information
|
||||
movsg iampr1,gr31
|
||||
andicc gr31,#xAMPRx_V,gr0,icc0
|
||||
setlos.p 0xfffff000,gr31
|
||||
beq icc0,#0,__itlb_k_nopunt /* punt not required */
|
||||
|
||||
movsg iamlr1,gr31
|
||||
movgs gr31,tplr /* set TPLR.CXN */
|
||||
tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
|
||||
|
||||
movsg dampr1,gr31
|
||||
ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
|
||||
movgs gr31,tppr
|
||||
movsg iamlr1,gr31 /* set TPLR.CXN */
|
||||
movgs gr31,tplr
|
||||
tlbpr gr31,gr0,#2,#0 /* save to the TLB */
|
||||
movsg tpxr,gr31 /* check the TLB write error flag */
|
||||
andicc.p gr31,#TPXR_E,gr0,icc0
|
||||
setlos #0xfffff000,gr31
|
||||
bne icc0,#0,__tlb_kernel_fault
|
||||
|
||||
__itlb_k_nopunt:
|
||||
|
||||
# assemble the new TLB entry
|
||||
and gr29,gr31,gr29
|
||||
movsg cxnr,gr31
|
||||
or gr29,gr31,gr29
|
||||
movgs gr29,iamlr1 /* xAMLR = address | context number */
|
||||
movgs gr30,iampr1
|
||||
movgs gr29,damlr1
|
||||
movgs gr30,dampr1
|
||||
|
||||
# return, restoring registers
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
sethi.p %hi(__kernel_current_task),gr29
|
||||
setlo %lo(__kernel_current_task),gr29
|
||||
ldi @(gr29,#0),gr29
|
||||
rett #0
|
||||
beq icc0,#3,0 /* prevent icache prefetch */
|
||||
|
||||
# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
|
||||
# appropriate page table and map that instead
|
||||
# - access the PGD with EAR0[31:26]
|
||||
# - DAMLR3 points to the virtual address of the page directory
|
||||
# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
|
||||
__itlb_k_PTD_miss:
|
||||
srli gr29,#26,gr31 /* calculate PGE offset */
|
||||
slli gr31,#8,gr31 /* and clear bottom bits */
|
||||
|
||||
movsg damlr3,gr30
|
||||
ld @(gr31,gr30),gr30 /* access the PGE */
|
||||
|
||||
andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
andicc gr30,#xAMPRx_SS,gr0,icc1
|
||||
|
||||
# map this PTD instead and record coverage address
|
||||
ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
|
||||
beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
|
||||
slli.p gr31,#18,gr31
|
||||
bne icc1,#0,__itlb_k_bigpage
|
||||
movgs gr30,dampr4
|
||||
movgs gr31,scr0
|
||||
|
||||
# we can now resume normal service
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bra __itlb_k_PTD_mapped
|
||||
|
||||
__itlb_k_bigpage:
|
||||
break
|
||||
nop
|
||||
|
||||
.size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Kernel data TLB miss handler
|
||||
# On entry:
|
||||
# GR1 - kernel stack pointer
|
||||
# GR28 - saved exception frame pointer
|
||||
# GR29 - faulting address
|
||||
# GR31 - EAR0 ^ SCR1
|
||||
# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
|
||||
# DAMR3 - mapped page directory
|
||||
# DAMR5 - mapped page table as matched by SCR1
|
||||
#
|
||||
###############################################################################
|
||||
.globl __entry_kernel_data_tlb_miss
|
||||
.type __entry_kernel_data_tlb_miss,@function
|
||||
__entry_kernel_data_tlb_miss:
|
||||
#if 0
|
||||
sethi.p %hi(0xe1200004),gr30
|
||||
setlo %lo(0xe1200004),gr30
|
||||
st gr0,@(gr30,gr0)
|
||||
sethi.p %hi(0xffc00100),gr30
|
||||
setlo %lo(0xffc00100),gr30
|
||||
sth gr30,@(gr30,gr0)
|
||||
membar
|
||||
#endif
|
||||
|
||||
movsg ccr,gr30 /* save CCR */
|
||||
movgs gr30,scr2
|
||||
|
||||
# see if the cached page table mapping is appropriate
|
||||
srlicc.p gr31,#26,gr0,icc0
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bne icc0,#0,__dtlb_k_PTD_miss
|
||||
|
||||
__dtlb_k_PTD_mapped:
|
||||
# access the PTD with EAR0[25:14]
|
||||
# - DAMLR5 points to the virtual address of the appropriate page table
|
||||
# - the PTD holds 4096 PTEs
|
||||
# - the PTD must be accessed uncached
|
||||
# - the PTE must be marked accessed if it was valid
|
||||
#
|
||||
and gr31,gr30,gr31
|
||||
movsg damlr5,gr30
|
||||
add gr30,gr31,gr31
|
||||
ldi @(gr31,#0),gr30 /* fetch the PTE */
|
||||
andicc gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
ori.p gr30,#_PAGE_ACCESSED,gr30
|
||||
beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
|
||||
sti.p gr30,@(gr31,#0) /* update the PTE */
|
||||
andi gr30,#~_PAGE_ACCESSED,gr30
|
||||
|
||||
# we're using DAMR1 as an extra TLB entry
|
||||
# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
|
||||
# - need to check IAMR1 lest we cause an multiple-DAT-hit exception
|
||||
movsg dampr1,gr31
|
||||
andicc gr31,#xAMPRx_V,gr0,icc0
|
||||
setlos.p 0xfffff000,gr31
|
||||
beq icc0,#0,__dtlb_k_nopunt /* punt not required */
|
||||
|
||||
movsg damlr1,gr31
|
||||
movgs gr31,tplr /* set TPLR.CXN */
|
||||
tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
|
||||
|
||||
movsg dampr1,gr31
|
||||
ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
|
||||
movgs gr31,tppr
|
||||
movsg damlr1,gr31 /* set TPLR.CXN */
|
||||
movgs gr31,tplr
|
||||
tlbpr gr31,gr0,#2,#0 /* save to the TLB */
|
||||
movsg tpxr,gr31 /* check the TLB write error flag */
|
||||
andicc.p gr31,#TPXR_E,gr0,icc0
|
||||
setlos #0xfffff000,gr31
|
||||
bne icc0,#0,__tlb_kernel_fault
|
||||
|
||||
__dtlb_k_nopunt:
|
||||
|
||||
# assemble the new TLB entry
|
||||
and gr29,gr31,gr29
|
||||
movsg cxnr,gr31
|
||||
or gr29,gr31,gr29
|
||||
movgs gr29,iamlr1 /* xAMLR = address | context number */
|
||||
movgs gr30,iampr1
|
||||
movgs gr29,damlr1
|
||||
movgs gr30,dampr1
|
||||
|
||||
# return, restoring registers
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
sethi.p %hi(__kernel_current_task),gr29
|
||||
setlo %lo(__kernel_current_task),gr29
|
||||
ldi @(gr29,#0),gr29
|
||||
rett #0
|
||||
beq icc0,#3,0 /* prevent icache prefetch */
|
||||
|
||||
# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
|
||||
# appropriate page table and map that instead
|
||||
# - access the PGD with EAR0[31:26]
|
||||
# - DAMLR3 points to the virtual address of the page directory
|
||||
# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
|
||||
__dtlb_k_PTD_miss:
|
||||
srli gr29,#26,gr31 /* calculate PGE offset */
|
||||
slli gr31,#8,gr31 /* and clear bottom bits */
|
||||
|
||||
movsg damlr3,gr30
|
||||
ld @(gr31,gr30),gr30 /* access the PGE */
|
||||
|
||||
andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
andicc gr30,#xAMPRx_SS,gr0,icc1
|
||||
|
||||
# map this PTD instead and record coverage address
|
||||
ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
|
||||
beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
|
||||
slli.p gr31,#18,gr31
|
||||
bne icc1,#0,__dtlb_k_bigpage
|
||||
movgs gr30,dampr5
|
||||
movgs gr31,scr1
|
||||
|
||||
# we can now resume normal service
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bra __dtlb_k_PTD_mapped
|
||||
|
||||
__dtlb_k_bigpage:
|
||||
break
|
||||
nop
|
||||
|
||||
.size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Userspace instruction TLB miss handler (with PGE prediction)
|
||||
# On entry:
|
||||
# GR28 - faulting address
|
||||
# GR31 - EAR0 ^ SCR0
|
||||
# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
|
||||
# DAMR3 - mapped page directory
|
||||
# DAMR4 - mapped page table as matched by SCR0
|
||||
#
|
||||
###############################################################################
|
||||
.globl __entry_user_insn_tlb_miss
|
||||
.type __entry_user_insn_tlb_miss,@function
|
||||
__entry_user_insn_tlb_miss:
|
||||
#if 0
|
||||
sethi.p %hi(0xe1200004),gr30
|
||||
setlo %lo(0xe1200004),gr30
|
||||
st gr0,@(gr30,gr0)
|
||||
sethi.p %hi(0xffc00100),gr30
|
||||
setlo %lo(0xffc00100),gr30
|
||||
sth gr30,@(gr30,gr0)
|
||||
membar
|
||||
#endif
|
||||
|
||||
movsg ccr,gr30 /* save CCR */
|
||||
movgs gr30,scr2
|
||||
|
||||
# see if the cached page table mapping is appropriate
|
||||
srlicc.p gr31,#26,gr0,icc0
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bne icc0,#0,__itlb_u_PTD_miss
|
||||
|
||||
__itlb_u_PTD_mapped:
|
||||
# access the PTD with EAR0[25:14]
|
||||
# - DAMLR4 points to the virtual address of the appropriate page table
|
||||
# - the PTD holds 4096 PTEs
|
||||
# - the PTD must be accessed uncached
|
||||
# - the PTE must be marked accessed if it was valid
|
||||
#
|
||||
and gr31,gr30,gr31
|
||||
movsg damlr4,gr30
|
||||
add gr30,gr31,gr31
|
||||
ldi @(gr31,#0),gr30 /* fetch the PTE */
|
||||
andicc gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
ori.p gr30,#_PAGE_ACCESSED,gr30
|
||||
beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
|
||||
sti.p gr30,@(gr31,#0) /* update the PTE */
|
||||
andi gr30,#~_PAGE_ACCESSED,gr30
|
||||
|
||||
# we're using IAMR1/DAMR1 as an extra TLB entry
|
||||
# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
|
||||
movsg dampr1,gr31
|
||||
andicc gr31,#xAMPRx_V,gr0,icc0
|
||||
setlos.p 0xfffff000,gr31
|
||||
beq icc0,#0,__itlb_u_nopunt /* punt not required */
|
||||
|
||||
movsg dampr1,gr31
|
||||
movgs gr31,tppr
|
||||
movsg damlr1,gr31 /* set TPLR.CXN */
|
||||
movgs gr31,tplr
|
||||
tlbpr gr31,gr0,#2,#0 /* save to the TLB */
|
||||
movsg tpxr,gr31 /* check the TLB write error flag */
|
||||
andicc.p gr31,#TPXR_E,gr0,icc0
|
||||
setlos #0xfffff000,gr31
|
||||
bne icc0,#0,__tlb_user_fault
|
||||
|
||||
__itlb_u_nopunt:
|
||||
|
||||
# assemble the new TLB entry
|
||||
and gr28,gr31,gr28
|
||||
movsg cxnr,gr31
|
||||
or gr28,gr31,gr28
|
||||
movgs gr28,iamlr1 /* xAMLR = address | context number */
|
||||
movgs gr30,iampr1
|
||||
movgs gr28,damlr1
|
||||
movgs gr30,dampr1
|
||||
|
||||
# return, restoring registers
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
rett #0
|
||||
beq icc0,#3,0 /* prevent icache prefetch */
|
||||
|
||||
# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
|
||||
# appropriate page table and map that instead
|
||||
# - access the PGD with EAR0[31:26]
|
||||
# - DAMLR3 points to the virtual address of the page directory
|
||||
# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
|
||||
__itlb_u_PTD_miss:
|
||||
srli gr28,#26,gr31 /* calculate PGE offset */
|
||||
slli gr31,#8,gr31 /* and clear bottom bits */
|
||||
|
||||
movsg damlr3,gr30
|
||||
ld @(gr31,gr30),gr30 /* access the PGE */
|
||||
|
||||
andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
andicc gr30,#xAMPRx_SS,gr0,icc1
|
||||
|
||||
# map this PTD instead and record coverage address
|
||||
ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
|
||||
beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
|
||||
slli.p gr31,#18,gr31
|
||||
bne icc1,#0,__itlb_u_bigpage
|
||||
movgs gr30,dampr4
|
||||
movgs gr31,scr0
|
||||
|
||||
# we can now resume normal service
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bra __itlb_u_PTD_mapped
|
||||
|
||||
__itlb_u_bigpage:
|
||||
break
|
||||
nop
|
||||
|
||||
.size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Userspace data TLB miss handler
|
||||
# On entry:
|
||||
# GR28 - faulting address
|
||||
# GR31 - EAR0 ^ SCR1
|
||||
# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
|
||||
# DAMR3 - mapped page directory
|
||||
# DAMR5 - mapped page table as matched by SCR1
|
||||
#
|
||||
###############################################################################
|
||||
.globl __entry_user_data_tlb_miss
|
||||
.type __entry_user_data_tlb_miss,@function
|
||||
__entry_user_data_tlb_miss:
|
||||
#if 0
|
||||
sethi.p %hi(0xe1200004),gr30
|
||||
setlo %lo(0xe1200004),gr30
|
||||
st gr0,@(gr30,gr0)
|
||||
sethi.p %hi(0xffc00100),gr30
|
||||
setlo %lo(0xffc00100),gr30
|
||||
sth gr30,@(gr30,gr0)
|
||||
membar
|
||||
#endif
|
||||
|
||||
movsg ccr,gr30 /* save CCR */
|
||||
movgs gr30,scr2
|
||||
|
||||
# see if the cached page table mapping is appropriate
|
||||
srlicc.p gr31,#26,gr0,icc0
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bne icc0,#0,__dtlb_u_PTD_miss
|
||||
|
||||
__dtlb_u_PTD_mapped:
|
||||
# access the PTD with EAR0[25:14]
|
||||
# - DAMLR5 points to the virtual address of the appropriate page table
|
||||
# - the PTD holds 4096 PTEs
|
||||
# - the PTD must be accessed uncached
|
||||
# - the PTE must be marked accessed if it was valid
|
||||
#
|
||||
and gr31,gr30,gr31
|
||||
movsg damlr5,gr30
|
||||
|
||||
__dtlb_u_using_iPTD:
|
||||
add gr30,gr31,gr31
|
||||
ldi @(gr31,#0),gr30 /* fetch the PTE */
|
||||
andicc gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
ori.p gr30,#_PAGE_ACCESSED,gr30
|
||||
beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
|
||||
sti.p gr30,@(gr31,#0) /* update the PTE */
|
||||
andi gr30,#~_PAGE_ACCESSED,gr30
|
||||
|
||||
# we're using DAMR1 as an extra TLB entry
|
||||
# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
|
||||
movsg dampr1,gr31
|
||||
andicc gr31,#xAMPRx_V,gr0,icc0
|
||||
setlos.p 0xfffff000,gr31
|
||||
beq icc0,#0,__dtlb_u_nopunt /* punt not required */
|
||||
|
||||
movsg dampr1,gr31
|
||||
movgs gr31,tppr
|
||||
movsg damlr1,gr31 /* set TPLR.CXN */
|
||||
movgs gr31,tplr
|
||||
tlbpr gr31,gr0,#2,#0 /* save to the TLB */
|
||||
movsg tpxr,gr31 /* check the TLB write error flag */
|
||||
andicc.p gr31,#TPXR_E,gr0,icc0
|
||||
setlos #0xfffff000,gr31
|
||||
bne icc0,#0,__tlb_user_fault
|
||||
|
||||
__dtlb_u_nopunt:
|
||||
|
||||
# assemble the new TLB entry
|
||||
and gr28,gr31,gr28
|
||||
movsg cxnr,gr31
|
||||
or gr28,gr31,gr28
|
||||
movgs gr28,iamlr1 /* xAMLR = address | context number */
|
||||
movgs gr30,iampr1
|
||||
movgs gr28,damlr1
|
||||
movgs gr30,dampr1
|
||||
|
||||
# return, restoring registers
|
||||
movsg scr2,gr30
|
||||
movgs gr30,ccr
|
||||
rett #0
|
||||
beq icc0,#3,0 /* prevent icache prefetch */
|
||||
|
||||
# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
|
||||
# appropriate page table and map that instead
|
||||
# - first of all, check the insn PGE cache - we may well get a hit there
|
||||
# - access the PGD with EAR0[31:26]
|
||||
# - DAMLR3 points to the virtual address of the page directory
|
||||
# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
|
||||
__dtlb_u_PTD_miss:
|
||||
movsg scr0,gr31 /* consult the insn-PGE-cache key */
|
||||
xor gr28,gr31,gr31
|
||||
srlicc gr31,#26,gr0,icc0
|
||||
srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bne icc0,#0,__dtlb_u_iPGE_miss
|
||||
|
||||
# what we're looking for is covered by the insn-PGE-cache
|
||||
setlos 0x3ffc,gr30
|
||||
and gr31,gr30,gr31
|
||||
movsg damlr4,gr30
|
||||
bra __dtlb_u_using_iPTD
|
||||
|
||||
__dtlb_u_iPGE_miss:
|
||||
srli gr28,#26,gr31 /* calculate PGE offset */
|
||||
slli gr31,#8,gr31 /* and clear bottom bits */
|
||||
|
||||
movsg damlr3,gr30
|
||||
ld @(gr31,gr30),gr30 /* access the PGE */
|
||||
|
||||
andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
|
||||
andicc gr30,#xAMPRx_SS,gr0,icc1
|
||||
|
||||
# map this PTD instead and record coverage address
|
||||
ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
|
||||
beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
|
||||
slli.p gr31,#18,gr31
|
||||
bne icc1,#0,__dtlb_u_bigpage
|
||||
movgs gr30,dampr5
|
||||
movgs gr31,scr1
|
||||
|
||||
# we can now resume normal service
|
||||
setlos 0x3ffc,gr30
|
||||
srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
|
||||
bra __dtlb_u_PTD_mapped
|
||||
|
||||
__dtlb_u_bigpage:
|
||||
break
|
||||
nop
|
||||
|
||||
.size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss
|
Loading…
Add table
Add a link
Reference in a new issue