david/ipxe
david
/
ipxe
Archived
1
0
Fork 0

[librm] Support ioremap() for addresses above 4GB in a 64-bit build

Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown 2016-02-26 15:34:28 +00:00
parent 5bd8427d3d
commit 99b5216b1c
6 changed files with 216 additions and 1 deletions

View File

@ -9,4 +9,6 @@
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <ipxe/iomap_pages.h>
#endif /* _BITS_IOMAP_H */

View File

@ -0,0 +1,24 @@
#ifndef _IPXE_IOMAP_PAGES_H
#define _IPXE_IOMAP_PAGES_H
/** @file
*
* I/O mapping API using page tables
*
*/
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#ifdef IOMAP_PAGES
#define IOMAP_PREFIX_pages
#else
#define IOMAP_PREFIX_pages __pages_
#endif
static inline __always_inline unsigned long
IOMAP_INLINE ( pages, io_to_bus ) ( volatile const void *io_addr ) {
/* Not easy to do; just return the CPU address for debugging purposes */
return ( ( intptr_t ) io_addr );
}
#endif /* _IPXE_IOMAP_PAGES_H */

View File

@ -378,6 +378,51 @@ struct interrupt_vector {
extern void set_interrupt_vector ( unsigned int intr, void *vector );
/** A page table */
struct page_table {
/** Page address and flags */
uint64_t page[512];
};
/** Page flags */
enum page_flags {
/** Page is present */
PAGE_P = 0x01,
/** Page is writable */
PAGE_RW = 0x02,
/** Page is accessible by user code */
PAGE_US = 0x04,
/** Page-level write-through */
PAGE_PWT = 0x08,
/** Page-level cache disable */
PAGE_PCD = 0x10,
/** Page is a large page */
PAGE_PS = 0x80,
/** Page is the last page in an allocation
*
* This bit is ignored by the hardware. We use it to track
* the size of allocations made by ioremap().
*/
PAGE_LAST = 0x800,
};
/** The I/O space page table */
extern struct page_table io_pages;
/** I/O page size
*
* We choose to use 2MB pages for I/O space, to minimise the number of
* page table entries required.
*/
#define IO_PAGE_SIZE 0x200000UL
/** I/O page base address
*
* We choose to place I/O space immediately above the identity-mapped
* 32-bit address space.
*/
#define IO_BASE ( ( void * ) 0x100000000ULL )
#endif /* ASSEMBLY */
#endif /* LIBRM_H */

View File

@ -1340,11 +1340,19 @@ interrupt_wrapper:
* These point to the PDPT. This creates some aliased
* addresses within unused portions of the 64-bit address
* space, but allows us to use just a single PDPT.
*
* - PDE[...] covering arbitrary 2MB portions of I/O space
*
* These are 2MB pages created by ioremap() to cover I/O
* device addresses.
*/
pml4e:
.space SIZEOF_PT
.size pml4e, . - pml4e
.globl io_pages
.equ io_pages, pml4e
/* Page directory pointer table entries (PDPTEs)
*
* This comprises:
@ -1357,6 +1365,11 @@ pml4e:
* These point to the appropriate page directories (in pde_low)
* used to identity-map the whole of the 32-bit address space.
*
* - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
*
* This points back to the PML4, allowing the PML4 to be
* (ab)used to hold 2MB pages used for I/O device addresses.
*
* - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
*
* This points back to the PDPT itself, allowing the PDPT to be
@ -1421,6 +1434,10 @@ init_pages:
/* Initialise PDPTE for negative 1GB */
movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
/* Initialise PDPTE for I/O space */
leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
/* Initialise PDPTEs for low 4GB */
movl $PDE_LOW_PTS, %ecx
leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \

View File

@ -8,6 +8,8 @@
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <stdint.h>
#include <strings.h>
#include <assert.h>
#include <ipxe/profile.h>
#include <realmode.h>
#include <pic8259.h>
@ -176,6 +178,123 @@ void __attribute__ (( regparm ( 1 ) )) interrupt ( int intr ) {
profile_exclude ( profiler );
}
/**
* Map pages for I/O
*
* @v bus_addr Bus address
* @v len Length of region
* @ret io_addr I/O address
*/
static void * ioremap_pages ( unsigned long bus_addr, size_t len ) {
unsigned long start;
unsigned int count;
unsigned int stride;
unsigned int first;
unsigned int i;
size_t offset;
void *io_addr;
DBGC ( &io_pages, "IO mapping %08lx+%zx\n", bus_addr, len );
/* Sanity check */
assert ( len != 0 );
/* Round down start address to a page boundary */
start = ( bus_addr & ~( IO_PAGE_SIZE - 1 ) );
offset = ( bus_addr - start );
assert ( offset < IO_PAGE_SIZE );
/* Calculate number of pages required */
count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE );
assert ( count != 0 );
assert ( count < ( sizeof ( io_pages.page ) /
sizeof ( io_pages.page[0] ) ) );
/* Round up number of pages to a power of two */
stride = ( 1 << ( fls ( count ) - 1 ) );
assert ( count <= stride );
/* Allocate pages */
for ( first = 0 ; first < ( sizeof ( io_pages.page ) /
sizeof ( io_pages.page[0] ) ) ;
first += stride ) {
/* Calculate I/O address */
io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset );
/* Check that page table entries are available */
for ( i = first ; i < ( first + count ) ; i++ ) {
if ( io_pages.page[i] & PAGE_P ) {
io_addr = NULL;
break;
}
}
if ( ! io_addr )
continue;
/* Create page table entries */
for ( i = first ; i < ( first + count ) ; i++ ) {
io_pages.page[i] = ( start | PAGE_P | PAGE_RW |
PAGE_US | PAGE_PWT | PAGE_PCD |
PAGE_PS );
start += IO_PAGE_SIZE;
}
/* Mark last page as being the last in this allocation */
io_pages.page[ i - 1 ] |= PAGE_LAST;
/* Return I/O address */
DBGC ( &io_pages, "IO mapped %08lx+%zx to %p using PTEs "
"[%d-%d]\n", bus_addr, len, io_addr, first,
( first + count - 1 ) );
return io_addr;
}
DBGC ( &io_pages, "IO could not map %08lx+%zx\n", bus_addr, len );
return NULL;
}
/**
* Unmap pages for I/O
*
* @v io_addr I/O address
*/
static void iounmap_pages ( volatile const void *io_addr ) {
volatile const void *invalidate = io_addr;
unsigned int first;
unsigned int i;
int is_last;
DBGC ( &io_pages, "IO unmapping %p\n", io_addr );
/* Calculate first page table entry */
first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE );
/* Clear page table entries */
for ( i = first ; ; i++ ) {
/* Sanity check */
assert ( io_pages.page[i] & PAGE_P );
/* Check if this is the last page in this allocation */
is_last = ( io_pages.page[i] & PAGE_LAST );
/* Clear page table entry */
io_pages.page[i] = 0;
/* Invalidate TLB for this page */
__asm__ __volatile__ ( "invlpg (%0)" : : "r" ( invalidate ) );
invalidate += IO_PAGE_SIZE;
/* Terminate if this was the last page */
if ( is_last )
break;
}
DBGC ( &io_pages, "IO unmapped %p using PTEs [%d-%d]\n",
io_addr, first, i );
}
PROVIDE_UACCESS_INLINE ( librm, phys_to_user );
PROVIDE_UACCESS_INLINE ( librm, user_to_phys );
PROVIDE_UACCESS_INLINE ( librm, virt_to_user );
@ -186,3 +305,6 @@ PROVIDE_UACCESS_INLINE ( librm, memmove_user );
PROVIDE_UACCESS_INLINE ( librm, memset_user );
PROVIDE_UACCESS_INLINE ( librm, strlen_user );
PROVIDE_UACCESS_INLINE ( librm, memchr_user );
PROVIDE_IOMAP_INLINE ( pages, io_to_bus );
PROVIDE_IOMAP ( pages, ioremap, ioremap_pages );
PROVIDE_IOMAP ( pages, iounmap, iounmap_pages );

View File

@ -11,7 +11,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#define UACCESS_LIBRM
#define IOAPI_X86
#define IOMAP_VIRT
#define PCIAPI_PCBIOS
#define TIMER_PCBIOS
#define CONSOLE_PCBIOS
@ -23,6 +22,12 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#define TIME_RTC
#define REBOOT_PCBIOS
#ifdef __x86_64__
#define IOMAP_PAGES
#else
#define IOMAP_VIRT
#endif
#define IMAGE_ELF /* ELF image support */
#define IMAGE_MULTIBOOT /* MultiBoot image support */
#define IMAGE_PXE /* PXE image support */