فهرست منبع

Add aarch64 example.

Andrew Walbran 2 سال پیش
والد
کامیت
dcf62d021e

+ 7 - 0
examples/aarch64/.cargo/config

@@ -0,0 +1,7 @@
+[build]
+target = "aarch64-unknown-none"
+
+[target.aarch64-unknown-none]
+rustflags = [
+    "-C", "link-arg=-Timage.ld",
+]

+ 14 - 0
examples/aarch64/Cargo.toml

@@ -0,0 +1,14 @@
+[package]
+name = "aarch64"
+version = "0.1.0"
+authors = ["Andrew Walbran <[email protected]>"]
+edition = "2021"
+
+[dependencies]
+log = "0.4.17"
+psci = "0.1.1"
+spin = "0.9.4"
+virtio-drivers = { path = "../.." }
+
+[build-dependencies]
+cc = "1.0.73"

+ 42 - 0
examples/aarch64/Makefile

@@ -0,0 +1,42 @@
+target := aarch64-unknown-none
+mode := release
+kernel := target/$(target)/$(mode)/aarch64
+
+sysroot := $(shell rustc --print sysroot)
+objdump := $(shell find $(sysroot) -name llvm-objdump) --arch-name=aarch64
+objcopy := $(shell find $(sysroot) -name llvm-objcopy)
+
+BUILD_ARGS += --target $(target)
+ifeq ($(mode), release)
+	BUILD_ARGS += --release
+endif
+
+.PHONY: kernel clean qemu run env
+
+env:
+	rustup component add llvm-tools-preview rustfmt
+	rustup target add $(target)
+
+kernel:
+	cargo build $(BUILD_ARGS)
+
+asm: kernel
+	$(objdump) -d $(kernel) | less
+
+sym: kernel
+	$(objdump) -t $(kernel) | less
+
+header: kernel
+	$(objdump) -x $(kernel) | less
+
+clean:
+	cargo clean
+
+qemu: kernel
+	qemu-system-aarch64 \
+		-machine virt \
+		-cpu max \
+		-serial mon:stdio \
+		-kernel $(kernel)
+
+run: qemu

+ 11 - 0
examples/aarch64/build.rs

@@ -0,0 +1,11 @@
+use cc::Build;
+use std::env;
+
+fn main() {
+    env::set_var("CROSS_COMPILE", "aarch64-linux-gnu");
+    Build::new()
+        .file("entry.S")
+        .file("exceptions.S")
+        .file("idmap.S")
+        .compile("empty")
+}

+ 126 - 0
examples/aarch64/entry.S

@@ -0,0 +1,126 @@
+.macro adr_l, reg:req, sym:req
+	adrp \reg, \sym
+	add \reg, \reg, :lo12:\sym
+.endm
+
+.macro mov_i, reg:req, imm:req
+	movz \reg, :abs_g3:\imm
+	movk \reg, :abs_g2_nc:\imm
+	movk \reg, :abs_g1_nc:\imm
+	movk \reg, :abs_g0_nc:\imm
+.endm
+
+.set .L_MAIR_DEV_nGnRE,	0x04
+.set .L_MAIR_MEM_WBWA,	0xff
+.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
+
+/* 4 KiB granule size for TTBR0_EL1. */
+.set .L_TCR_TG0_4KB, 0x0 << 14
+/* 4 KiB granule size for TTBR1_EL1. */
+.set .L_TCR_TG1_4KB, 0x2 << 30
+/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
+.set .L_TCR_EPD1, 0x1 << 23
+/* Translation table walks for TTBR0_EL1 are inner sharable. */
+.set .L_TCR_SH_INNER, 0x3 << 12
+/*
+ * Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
+ * cacheable.
+ */
+.set .L_TCR_RGN_OWB, 0x1 << 10
+/*
+ * Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
+ * cacheable.
+ */
+.set .L_TCR_RGN_IWB, 0x1 << 8
+/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
+.set .L_TCR_T0SZ_512, 64 - 39
+.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
+.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
+
+/* Stage 1 instruction access cacheability is unaffected. */
+.set .L_SCTLR_ELx_I, 0x1 << 12
+/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
+.set .L_SCTLR_ELx_SA, 0x1 << 3
+/* Stage 1 data access cacheability is unaffected. */
+.set .L_SCTLR_ELx_C, 0x1 << 2
+/* EL0 and EL1 stage 1 MMU enabled. */
+.set .L_SCTLR_ELx_M, 0x1 << 0
+/* Privileged Access Never is unchanged on taking an exception to EL1. */
+.set .L_SCTLR_EL1_SPAN, 0x1 << 23
+/* SETEND instruction disabled at EL0 in aarch32 mode. */
+.set .L_SCTLR_EL1_SED, 0x1 << 8
+/* Various IT instructions are disabled at EL0 in aarch32 mode. */
+.set .L_SCTLR_EL1_ITD, 0x1 << 7
+.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
+.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
+.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1
+
+/**
+ * This is a generic entry point for an image. It carries out the operations required to prepare the
+ * loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
+ * prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
+ * for the Rust entry point, as these may contain boot parameters.
+ */
+.section .init.entry, "ax"
+.global entry
+entry:
+	/* Load and apply the memory management configuration, ready to enable MMU and caches. */
+	adrp x30, idmap
+	msr ttbr0_el1, x30
+
+	mov_i x30, .Lmairval
+	msr mair_el1, x30
+
+	mov_i x30, .Ltcrval
+	/* Copy the supported PA range into TCR_EL1.IPS. */
+	mrs x29, id_aa64mmfr0_el1
+	bfi x30, x29, #32, #4
+
+	msr tcr_el1, x30
+
+	mov_i x30, .Lsctlrval
+
+	/*
+	 * Ensure everything before this point has completed, then invalidate any potentially stale
+	 * local TLB entries before they start being used.
+	 */
+	isb
+	tlbi vmalle1
+	ic iallu
+	dsb nsh
+	isb
+
+	/*
+	 * Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
+	 */
+	msr sctlr_el1, x30
+	isb
+
+	/* Disable trapping floating point access in EL1. */
+	mrs x30, cpacr_el1
+	orr x30, x30, #(0x3 << 20)
+	msr cpacr_el1, x30
+	isb
+
+	/* Zero out the bss section. */
+	adr_l x29, bss_begin
+	adr_l x30, bss_end
+0:	cmp x29, x30
+	b.hs 1f
+	stp xzr, xzr, [x29], #16
+	b 0b
+
+1:	/* Prepare the stack. */
+	adr_l x30, boot_stack_end
+	mov sp, x30
+
+	/* Set up exception vector. */
+	adr x30, vector_table_el1
+	msr vbar_el1, x30
+
+	/* Call into Rust code. */
+	bl main
+
+	/* Loop forever waiting for interrupts. */
+2:	wfi
+	b 2b

+ 162 - 0
examples/aarch64/exceptions.S

@@ -0,0 +1,162 @@
+/**
+ * Saves the volatile registers onto the stack. This currently takes 14
+ * instructions, so it can be used in exception handlers with 18 instructions
+ * left.
+ *
+ * On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
+ * which can be used as the first and second arguments of a subsequent call.
+ */
+.macro save_volatile_to_stack
+	/* Reserve stack space and save registers x0-x18, x29 & x30. */
+	stp x0, x1, [sp, #-(8 * 24)]!
+	stp x2, x3, [sp, #8 * 2]
+	stp x4, x5, [sp, #8 * 4]
+	stp x6, x7, [sp, #8 * 6]
+	stp x8, x9, [sp, #8 * 8]
+	stp x10, x11, [sp, #8 * 10]
+	stp x12, x13, [sp, #8 * 12]
+	stp x14, x15, [sp, #8 * 14]
+	stp x16, x17, [sp, #8 * 16]
+	str x18, [sp, #8 * 18]
+	stp x29, x30, [sp, #8 * 20]
+
+	/*
+	 * Save elr_el1 & spsr_el1. This such that we can take nested exception
+	 * and still be able to unwind.
+	 */
+	mrs x0, elr_el1
+	mrs x1, spsr_el1
+	stp x0, x1, [sp, #8 * 22]
+.endm
+
+/**
+ * Restores the volatile registers from the stack. This currently takes 14
+ * instructions, so it can be used in exception handlers while still leaving 18
+ * instructions left; if paired with save_volatile_to_stack, there are 4
+ * instructions to spare.
+ */
+.macro restore_volatile_from_stack
+	/* Restore registers x2-x18, x29 & x30. */
+	ldp x2, x3, [sp, #8 * 2]
+	ldp x4, x5, [sp, #8 * 4]
+	ldp x6, x7, [sp, #8 * 6]
+	ldp x8, x9, [sp, #8 * 8]
+	ldp x10, x11, [sp, #8 * 10]
+	ldp x12, x13, [sp, #8 * 12]
+	ldp x14, x15, [sp, #8 * 14]
+	ldp x16, x17, [sp, #8 * 16]
+	ldr x18, [sp, #8 * 18]
+	ldp x29, x30, [sp, #8 * 20]
+
+	/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
+	ldp x0, x1, [sp, #8 * 22]
+	msr elr_el1, x0
+	msr spsr_el1, x1
+
+	/* Restore x0 & x1, and release stack space. */
+	ldp x0, x1, [sp], #8 * 24
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at the current EL while using
+ * SP0. It behaves similarly to the SPx case by first switching to SPx, doing
+ * the work, then switching back to SP0 before returning.
+ *
+ * Switching to SPx and calling the Rust handler takes 16 instructions. To
+ * restore and return we need an additional 16 instructions, so we can implement
+ * the whole handler within the allotted 32 instructions.
+ */
+.macro current_exception_sp0 handler:req
+	msr spsel, #1
+	save_volatile_to_stack
+	bl \handler
+	restore_volatile_from_stack
+	msr spsel, #0
+	eret
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at the current EL while using
+ * SPx. It saves volatile registers, calls the Rust handler, restores volatile
+ * registers, then returns.
+ *
+ * This also works for exceptions taken from EL0, if we don't care about
+ * non-volatile registers.
+ *
+ * Saving state and jumping to the Rust handler takes 15 instructions, and
+ * restoring and returning also takes 15 instructions, so we can fit the whole
+ * handler in 30 instructions, under the limit of 32.
+ */
+.macro current_exception_spx handler:req
+	save_volatile_to_stack
+	bl \handler
+	restore_volatile_from_stack
+	eret
+.endm
+
+.section .text.vector_table_el1, "ax"
+.global vector_table_el1
+.balign 0x800
+vector_table_el1:
+sync_cur_sp0:
+	current_exception_sp0 sync_exception_current
+
+.balign 0x80
+irq_cur_sp0:
+	current_exception_sp0 irq_current
+
+.balign 0x80
+fiq_cur_sp0:
+	current_exception_sp0 fiq_current
+
+.balign 0x80
+serr_cur_sp0:
+	current_exception_sp0 serr_current
+
+.balign 0x80
+sync_cur_spx:
+	current_exception_spx sync_exception_current
+
+.balign 0x80
+irq_cur_spx:
+	current_exception_spx irq_current
+
+.balign 0x80
+fiq_cur_spx:
+	current_exception_spx fiq_current
+
+.balign 0x80
+serr_cur_spx:
+	current_exception_spx serr_current
+
+.balign 0x80
+sync_lower_64:
+	current_exception_spx sync_lower
+
+.balign 0x80
+irq_lower_64:
+	current_exception_spx irq_lower
+
+.balign 0x80
+fiq_lower_64:
+	current_exception_spx fiq_lower
+
+.balign 0x80
+serr_lower_64:
+	current_exception_spx serr_lower
+
+.balign 0x80
+sync_lower_32:
+	current_exception_spx sync_lower
+
+.balign 0x80
+irq_lower_32:
+	current_exception_spx irq_lower
+
+.balign 0x80
+fiq_lower_32:
+	current_exception_spx fiq_lower
+
+.balign 0x80
+serr_lower_32:
+	current_exception_spx serr_lower

+ 26 - 0
examples/aarch64/idmap.S

@@ -0,0 +1,26 @@
+.set .L_TT_TYPE_BLOCK, 0x1
+.set .L_TT_TYPE_PAGE,  0x3
+.set .L_TT_TYPE_TABLE, 0x3
+
+/* Access flag. */
+.set .L_TT_AF, 0x1 << 10
+/* Not global. */
+.set .L_TT_NG, 0x1 << 11
+.set .L_TT_XN, 0x3 << 53
+
+.set .L_TT_MT_DEV, 0x0 << 2			// MAIR #0 (DEV_nGnRE)
+.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8)	// MAIR #1 (MEM_WBWA), inner shareable
+
+.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
+.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG
+
+.section ".rodata.idmap", "a", %progbits
+.global idmap
+.align 12
+idmap:
+	/* level 1 */
+	.quad		.L_BLOCK_DEV | 0x0		    // 1 GiB of device mappings
+	.quad		.L_BLOCK_MEM | 0x40000000	// 1 GiB of DRAM
+	.fill		254, 8, 0x0			// 254 GiB of unmapped VA space
+	.quad		.L_BLOCK_DEV | 0x4000000000 // 1 GiB of device mappings
+	.fill		255, 8, 0x0			// 255 GiB of remaining VA space

+ 85 - 0
examples/aarch64/image.ld

@@ -0,0 +1,85 @@
+/*
+ * Code will start running at this symbol which is placed at the start of the
+ * image.
+ */
+ENTRY(entry)
+
+MEMORY
+{
+	image : ORIGIN = 0x40000000, LENGTH = 2M
+}
+
+SECTIONS
+{
+	/*
+	 * Collect together the code.
+	 */
+	.init : ALIGN(4096) {
+		text_begin = .;
+		*(.init.entry)
+		*(.init.*)
+	} >image
+	.text : {
+		*(.text.*)
+	} >image
+	text_end = .;
+
+	/*
+	 * Collect together read-only data.
+	 */
+	.rodata : ALIGN(4096) {
+		rodata_begin = .;
+		*(.rodata.*)
+	} >image
+	.got : {
+		*(.got)
+	} >image
+	rodata_end = .;
+
+	/*
+	 * Collect together the read-write data including .bss at the end which
+	 * will be zero'd by the entry code.
+	 */
+	.data : ALIGN(4096) {
+		data_begin = .;
+		*(.data.*)
+		/*
+		 * The entry point code assumes that .data is a multiple of 32
+		 * bytes long.
+		 */
+		. = ALIGN(32);
+		data_end = .;
+	} >image
+
+	/* Everything beyond this point will not be included in the binary. */
+	bin_end = .;
+
+	/* The entry point code assumes that .bss is 16-byte aligned. */
+	.bss : ALIGN(16)  {
+		bss_begin = .;
+		*(.bss.*)
+		*(COMMON)
+		. = ALIGN(16);
+		bss_end = .;
+	} >image
+
+	.stack (NOLOAD) : ALIGN(4096) {
+		boot_stack_begin = .;
+		. += 40 * 4096;
+		. = ALIGN(4096);
+		boot_stack_end = .;
+	} >image
+
+	/*
+	 * Remove unused sections from the image.
+	 */
+	/DISCARD/ : {
+		/* The image loads itself so doesn't need these sections. */
+		*(.gnu.hash)
+		*(.hash)
+		*(.interp)
+		*(.eh_frame_hdr)
+		*(.eh_frame)
+		*(.note.gnu.build-id)
+	}
+}

+ 66 - 0
examples/aarch64/src/exceptions.rs

@@ -0,0 +1,66 @@
+//! Exception handlers.
+
+use core::arch::asm;
+use log::error;
+use psci::system_off;
+
+#[no_mangle]
+extern "C" fn sync_exception_current(_elr: u64, _spsr: u64) {
+    error!("sync_exception_current");
+    print_esr();
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn irq_current(_elr: u64, _spsr: u64) {
+    error!("irq_current");
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn fiq_current(_elr: u64, _spsr: u64) {
+    error!("fiq_current");
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn serr_current(_elr: u64, _spsr: u64) {
+    error!("serr_current");
+    print_esr();
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn sync_lower(_elr: u64, _spsr: u64) {
+    error!("sync_lower");
+    print_esr();
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn irq_lower(_elr: u64, _spsr: u64) {
+    error!("irq_lower");
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn fiq_lower(_elr: u64, _spsr: u64) {
+    error!("fiq_lower");
+    system_off().unwrap();
+}
+
+#[no_mangle]
+extern "C" fn serr_lower(_elr: u64, _spsr: u64) {
+    error!("serr_lower");
+    print_esr();
+    system_off().unwrap();
+}
+
+#[inline]
+fn print_esr() {
+    let mut esr: u64;
+    unsafe {
+        asm!("mrs {esr}, esr_el1", esr = out(reg) esr);
+    }
+    log::error!("esr={:#08x}", esr);
+}

+ 47 - 0
examples/aarch64/src/logger.rs

@@ -0,0 +1,47 @@
+//! Log implementation using the UART.
+
+use crate::pl011::Uart;
+use core::fmt::Write;
+use log::{LevelFilter, Log, Metadata, Record, SetLoggerError};
+use spin::mutex::SpinMutex;
+
+/// Base memory-mapped address of the primary PL011 UART device.
+pub const BASE_ADDRESS: usize = 0x900_0000;
+
+static LOGGER: Logger = Logger {
+    uart: SpinMutex::new(None),
+};
+
+struct Logger {
+    uart: SpinMutex<Option<Uart>>,
+}
+
+/// Initialises UART logger.
+pub fn init(max_level: LevelFilter) -> Result<(), SetLoggerError> {
+    // Safe because BASE_ADDRESS is the base of the MMIO region for a UART and is mapped as device
+    // memory.
+    let uart = unsafe { Uart::new(BASE_ADDRESS) };
+    LOGGER.uart.lock().replace(uart);
+
+    log::set_logger(&LOGGER)?;
+    log::set_max_level(max_level);
+    Ok(())
+}
+
+impl Log for Logger {
+    fn enabled(&self, _metadata: &Metadata) -> bool {
+        true
+    }
+
+    fn log(&self, record: &Record) {
+        writeln!(
+            LOGGER.uart.lock().as_mut().unwrap(),
+            "[{}] {}",
+            record.level(),
+            record.args()
+        )
+        .unwrap();
+    }
+
+    fn flush(&self) {}
+}

+ 24 - 0
examples/aarch64/src/main.rs

@@ -0,0 +1,24 @@
+#![no_std]
+#![no_main]
+
+mod exceptions;
+mod logger;
+mod pl011;
+
+use core::panic::PanicInfo;
+use log::{error, info, LevelFilter};
+use psci::system_off;
+
+#[no_mangle]
+extern "C" fn main(_x0: u64, _x1: u64, _x2: u64, _x3: u64) {
+    logger::init(LevelFilter::Debug).unwrap();
+    info!("virtio-drivers example started.");
+    system_off().unwrap();
+}
+
+#[panic_handler]
+fn panic(info: &PanicInfo) -> ! {
+    error!("{}", info);
+    system_off().unwrap();
+    loop {}
+}

+ 61 - 0
examples/aarch64/src/pl011.rs

@@ -0,0 +1,61 @@
+//! Minimal driver for a PL011 UART.
+
+use core::fmt::{self, Write};
+
+const FLAG_REGISTER_OFFSET: usize = 0x18;
+const FR_BUSY: u8 = 1 << 3;
+const FR_TXFF: u8 = 1 << 5;
+
+/// Minimal driver for a PL011 UART.
+pub struct Uart {
+    base_address: *mut u8,
+}
+
+impl Uart {
+    /// Constructs a new instance of the UART driver for a device at the given base address.
+    ///
+    /// # Safety
+    ///
+    /// The given base address must point to the 8 MMIO control registers of an appropriate UART
+    /// device, which must be mapped into the address space of the process as device memory and not
+    /// have any other aliases.
+    pub unsafe fn new(base_address: usize) -> Self {
+        Self {
+            base_address: base_address as *mut u8,
+        }
+    }
+
+    /// Writes a single byte to the UART.
+    pub fn write_byte(&self, byte: u8) {
+        // Wait until there is room in the TX buffer.
+        while self.read_flag_register() & FR_TXFF != 0 {}
+
+        // Safe because we know that the base address points to the control registers of a PL011
+        // device which is appropriately mapped.
+        unsafe {
+            // Write to the TX buffer.
+            self.base_address.write_volatile(byte);
+        }
+
+        // Wait until the UART is no longer busy.
+        while self.read_flag_register() & FR_BUSY != 0 {}
+    }
+
+    fn read_flag_register(&self) -> u8 {
+        // Safe because we know that the base address points to the control registers of a PL011
+        // device which is appropriately mapped.
+        unsafe { self.base_address.add(FLAG_REGISTER_OFFSET).read_volatile() }
+    }
+}
+
+impl Write for Uart {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        for c in s.as_bytes() {
+            self.write_byte(*c);
+        }
+        Ok(())
+    }
+}
+
+// Safe because it just contains a pointer to device memory, which can be accessed from any context.
+unsafe impl Send for Uart {}

+ 45 - 0
examples/aarch64/src/uart.rs

@@ -0,0 +1,45 @@
+//! Minimal driver for an 8250 UART.
+
+use core::fmt::{self, Write};
+use core::ptr::write_volatile;
+
+/// Minimal driver for an 8250 UART.
+pub struct Uart {
+    base_address: *mut u8,
+}
+
+impl Uart {
+    /// Constructs a new instance of the UART driver for a device at the given base address.
+    ///
+    /// # Safety
+    ///
+    /// The given base address must point to the 8 MMIO control registers of an appropriate UART
+    /// device, which must be mapped into the address space of the process as device memory and not
+    /// have any other aliases.
+    pub unsafe fn new(base_address: usize) -> Self {
+        Self {
+            base_address: base_address as *mut u8,
+        }
+    }
+
+    /// Writes a single byte to the UART.
+    pub fn write_byte(&self, byte: u8) {
+        // Safe because we know that the base address points to the control registers of an UART
+        // device which is appropriately mapped.
+        unsafe {
+            write_volatile(self.base_address, byte);
+        }
+    }
+}
+
+impl Write for Uart {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        for c in s.as_bytes() {
+            self.write_byte(*c);
+        }
+        Ok(())
+    }
+}
+
+// Safe because it just contains a pointer to device memory, which can be accessed from any context.
+unsafe impl Send for Uart {}