From e728b74908d4d65389c26860385ed02e35c7443c Mon Sep 17 00:00:00 2001 From: Daniel Rossier Date: Mon, 14 Jun 2021 11:17:00 +0200 Subject: [PATCH] Improved Makefile and added basic dir for arm64 support (WiP: not compiling w/this config) --- doc/source/user_guide.rst | 1 + so3/Kbuild | 6 +- so3/Kconfig | 18 +- so3/Makefile | 133 +- so3/arch/Kconfig | 36 + so3/arch/Makefile | 4 + so3/arch/arm/Kconfig | 27 - so3/arch/arm32/Kconfig | 23 + so3/arch/{arm => arm32}/Makefile | 0 so3/arch/{arm => arm32}/asm-offsets.c | 5 +- so3/arch/arm32/asm-offsets.s | 654 ++++++++++ so3/arch/{arm => arm32}/boot/Makefile | 0 so3/arch/{arm => arm32}/boot/head.S | 0 so3/arch/{arm => arm32}/bpi/Kconfig | 0 .../{arm => arm32}/bpi/include/mach/timer.h | 0 .../{arm => arm32}/bpi/include/mach/uart.h | 0 so3/arch/{arm => arm32}/cache-cp15.c | 0 so3/arch/{arm => arm32}/cache_v7.c | 0 so3/arch/{arm => arm32}/cache_v7_asm.S | 0 so3/arch/{arm => arm32}/context.S | 0 so3/arch/{arm => arm32}/exception.S | 0 so3/arch/{arm => arm32}/fault.c | 0 so3/arch/{arm => arm32}/include/asm/armv7.h | 0 .../{arm => arm32}/include/asm/assembler.h | 0 .../include/asm/atomic-generic.h | 0 so3/arch/{arm => arm32}/include/asm/atomic.h | 0 .../{arm => arm32}/include/asm/byteorder.h | 0 .../{arm => arm32}/include/asm/cacheflush.h | 0 so3/arch/{arm => arm32}/include/asm/errno.h | 0 .../{arm => arm32}/include/asm/hardware.h | 0 .../include/asm/hardware/pl310.h | 0 so3/arch/{arm => arm32}/include/asm/io.h | 0 so3/arch/{arm => arm32}/include/asm/memory.h | 0 so3/arch/{arm => arm32}/include/asm/mmu.h | 0 .../{arm => arm32}/include/asm/posix_types.h | 0 .../{arm => arm32}/include/asm/processor.h | 0 so3/arch/{arm => arm32}/include/asm/setup.h | 0 .../{arm => arm32}/include/asm/spinlock.h | 0 so3/arch/{arm => arm32}/include/asm/syscall.h | 0 so3/arch/{arm => arm32}/include/asm/types.h | 0 so3/arch/{arm => arm32}/include/asm/utils.h | 0 so3/arch/{arm => arm32}/merida/Kconfig | 0 .../merida/include/mach/timer.h | 0 .../{arm => arm32}/merida/include/mach/uart.h | 0 so3/arch/{arm => arm32}/mmu.c | 0 so3/arch/{arm => arm32}/rpi4/Kconfig | 0 .../{arm => arm32}/rpi4/include/mach/uart.h | 0 so3/arch/{arm => arm32}/setup.c | 0 so3/arch/{arm => arm32}/so3.lds | 0 so3/arch/{arm => arm32}/vexpress/Kconfig | 0 .../vexpress/include/mach/uart.h | 0 so3/arch/arm64/Kconfig | 22 + so3/arch/arm64/Makefile | 14 + so3/arch/arm64/asm-offsets.c | 75 ++ so3/arch/arm64/backtrace.c | 170 +++ so3/arch/arm64/backtrace_asm.S | 28 + so3/arch/arm64/boot/Makefile | 1 + so3/arch/arm64/boot/head.S | 424 +++++++ so3/arch/arm64/boot/hyp-stub.S | 124 ++ so3/arch/arm64/cache.S | 244 ++++ so3/arch/arm64/cache_v8.c | 168 +++ so3/arch/arm64/context.S | 48 + so3/arch/arm64/domain.c | 78 ++ so3/arch/arm64/exception.S | 241 ++++ so3/arch/arm64/fault.c | 74 ++ so3/arch/arm64/include/asm/arm_timer.h | 95 ++ so3/arch/arm64/include/asm/atomic.h | 324 +++++ so3/arch/arm64/include/asm/backtrace.h | 35 + so3/arch/arm64/include/asm/bitops.h | 205 +++ so3/arch/arm64/include/asm/byteorder.h | 25 + so3/arch/arm64/include/asm/cacheflush.h | 53 + so3/arch/arm64/include/asm/cpregs.h | 356 ++++++ so3/arch/arm64/include/asm/errno.h | 19 + so3/arch/arm64/include/asm/hardware.h | 24 + so3/arch/arm64/include/asm/image.h | 24 + so3/arch/arm64/include/asm/io.h | 27 + so3/arch/arm64/include/asm/mmu.h | 364 ++++++ so3/arch/arm64/include/asm/percpu.h | 60 + so3/arch/arm64/include/asm/posix_types.h | 55 + so3/arch/arm64/include/asm/processor.h | 1094 +++++++++++++++++ so3/arch/arm64/include/asm/setup.h | 45 + so3/arch/arm64/include/asm/spinlock.h | 79 ++ so3/arch/arm64/include/asm/syscall.h | 82 ++ so3/arch/arm64/include/asm/types.h | 44 + so3/arch/arm64/include/asm/utils.h | 56 + so3/arch/arm64/include/asm/vfp.h | 50 + so3/arch/arm64/include/asm/virt.h | 206 ++++ so3/arch/arm64/lib/Makefile | 3 + so3/arch/arm64/lib/strchr.S | 31 + so3/arch/arm64/mmu.c | 582 +++++++++ so3/arch/arm64/rpi4_64/Kconfig | 10 + so3/arch/arm64/rpi4_64/Makefile | 6 + so3/arch/arm64/rpi4_64/include/mach/gic.h | 30 + so3/arch/arm64/rpi4_64/include/mach/rpi4.h | 72 ++ so3/arch/arm64/rpi4_64/include/mach/uart.h | 25 + so3/arch/arm64/rpi4_64/platsmp.c | 58 + so3/arch/arm64/setup.c | 122 ++ so3/arch/arm64/so3.lds | 99 ++ so3/arch/arm64/spinlock.S | 44 + so3/arch/arm64/virt64/Kconfig | 12 + so3/arch/arm64/virt64/include/mach/gic.h | 30 + so3/arch/arm64/virt64/include/mach/uart.h | 26 + so3/configs/rpi4_defconfig | 20 +- so3/configs/vexpress_fb_defconfig | 19 +- so3/configs/vexpress_full_defconfig | 28 +- so3/configs/vexpress_mmc_defconfig | 28 +- so3/configs/vexpress_net_defconfig | 28 +- so3/include/device/net.h | 2 +- so3/include/memory.h | 6 +- so3/include/types.h | 2 +- so3/lib/vsprintf.c | 4 +- so3/mm/memory.c | 10 +- 112 files changed, 7054 insertions(+), 183 deletions(-) create mode 100644 so3/arch/Kconfig create mode 100644 so3/arch/Makefile delete mode 100644 so3/arch/arm/Kconfig create mode 100644 so3/arch/arm32/Kconfig rename so3/arch/{arm => arm32}/Makefile (100%) rename so3/arch/{arm => arm32}/asm-offsets.c (99%) create mode 100644 so3/arch/arm32/asm-offsets.s rename so3/arch/{arm => arm32}/boot/Makefile (100%) rename so3/arch/{arm => arm32}/boot/head.S (100%) rename so3/arch/{arm => arm32}/bpi/Kconfig (100%) rename so3/arch/{arm => arm32}/bpi/include/mach/timer.h (100%) rename so3/arch/{arm => arm32}/bpi/include/mach/uart.h (100%) rename so3/arch/{arm => arm32}/cache-cp15.c (100%) rename so3/arch/{arm => arm32}/cache_v7.c (100%) rename so3/arch/{arm => arm32}/cache_v7_asm.S (100%) rename so3/arch/{arm => arm32}/context.S (100%) rename so3/arch/{arm => arm32}/exception.S (100%) rename so3/arch/{arm => arm32}/fault.c (100%) rename so3/arch/{arm => arm32}/include/asm/armv7.h (100%) rename so3/arch/{arm => arm32}/include/asm/assembler.h (100%) rename so3/arch/{arm => arm32}/include/asm/atomic-generic.h (100%) rename so3/arch/{arm => arm32}/include/asm/atomic.h (100%) rename so3/arch/{arm => arm32}/include/asm/byteorder.h (100%) rename so3/arch/{arm => arm32}/include/asm/cacheflush.h (100%) rename so3/arch/{arm => arm32}/include/asm/errno.h (100%) rename so3/arch/{arm => arm32}/include/asm/hardware.h (100%) rename so3/arch/{arm => arm32}/include/asm/hardware/pl310.h (100%) rename so3/arch/{arm => arm32}/include/asm/io.h (100%) rename so3/arch/{arm => arm32}/include/asm/memory.h (100%) rename so3/arch/{arm => arm32}/include/asm/mmu.h (100%) rename so3/arch/{arm => arm32}/include/asm/posix_types.h (100%) rename so3/arch/{arm => arm32}/include/asm/processor.h (100%) rename so3/arch/{arm => arm32}/include/asm/setup.h (100%) rename so3/arch/{arm => arm32}/include/asm/spinlock.h (100%) rename so3/arch/{arm => arm32}/include/asm/syscall.h (100%) rename so3/arch/{arm => arm32}/include/asm/types.h (100%) rename so3/arch/{arm => arm32}/include/asm/utils.h (100%) rename so3/arch/{arm => arm32}/merida/Kconfig (100%) rename so3/arch/{arm => arm32}/merida/include/mach/timer.h (100%) rename so3/arch/{arm => arm32}/merida/include/mach/uart.h (100%) rename so3/arch/{arm => arm32}/mmu.c (100%) rename so3/arch/{arm => arm32}/rpi4/Kconfig (100%) rename so3/arch/{arm => arm32}/rpi4/include/mach/uart.h (100%) rename so3/arch/{arm => arm32}/setup.c (100%) rename so3/arch/{arm => arm32}/so3.lds (100%) rename so3/arch/{arm => arm32}/vexpress/Kconfig (100%) rename so3/arch/{arm => arm32}/vexpress/include/mach/uart.h (100%) create mode 100644 so3/arch/arm64/Kconfig create mode 100644 so3/arch/arm64/Makefile create mode 100644 so3/arch/arm64/asm-offsets.c create mode 100644 so3/arch/arm64/backtrace.c create mode 100644 so3/arch/arm64/backtrace_asm.S create mode 100644 so3/arch/arm64/boot/Makefile create mode 100644 so3/arch/arm64/boot/head.S create mode 100644 so3/arch/arm64/boot/hyp-stub.S create mode 100644 so3/arch/arm64/cache.S create mode 100644 so3/arch/arm64/cache_v8.c create mode 100644 so3/arch/arm64/context.S create mode 100644 so3/arch/arm64/domain.c create mode 100644 so3/arch/arm64/exception.S create mode 100644 so3/arch/arm64/fault.c create mode 100644 so3/arch/arm64/include/asm/arm_timer.h create mode 100644 so3/arch/arm64/include/asm/atomic.h create mode 100644 so3/arch/arm64/include/asm/backtrace.h create mode 100644 so3/arch/arm64/include/asm/bitops.h create mode 100644 so3/arch/arm64/include/asm/byteorder.h create mode 100644 so3/arch/arm64/include/asm/cacheflush.h create mode 100644 so3/arch/arm64/include/asm/cpregs.h create mode 100644 so3/arch/arm64/include/asm/errno.h create mode 100644 so3/arch/arm64/include/asm/hardware.h create mode 100644 so3/arch/arm64/include/asm/image.h create mode 100644 so3/arch/arm64/include/asm/io.h create mode 100644 so3/arch/arm64/include/asm/mmu.h create mode 100644 so3/arch/arm64/include/asm/percpu.h create mode 100644 so3/arch/arm64/include/asm/posix_types.h create mode 100644 so3/arch/arm64/include/asm/processor.h create mode 100644 so3/arch/arm64/include/asm/setup.h create mode 100644 so3/arch/arm64/include/asm/spinlock.h create mode 100644 so3/arch/arm64/include/asm/syscall.h create mode 100644 so3/arch/arm64/include/asm/types.h create mode 100644 so3/arch/arm64/include/asm/utils.h create mode 100644 so3/arch/arm64/include/asm/vfp.h create mode 100644 so3/arch/arm64/include/asm/virt.h create mode 100644 so3/arch/arm64/lib/Makefile create mode 100644 so3/arch/arm64/lib/strchr.S create mode 100644 so3/arch/arm64/mmu.c create mode 100644 so3/arch/arm64/rpi4_64/Kconfig create mode 100644 so3/arch/arm64/rpi4_64/Makefile create mode 100644 so3/arch/arm64/rpi4_64/include/mach/gic.h create mode 100644 so3/arch/arm64/rpi4_64/include/mach/rpi4.h create mode 100644 so3/arch/arm64/rpi4_64/include/mach/uart.h create mode 100644 so3/arch/arm64/rpi4_64/platsmp.c create mode 100644 so3/arch/arm64/setup.c create mode 100644 so3/arch/arm64/so3.lds create mode 100644 so3/arch/arm64/spinlock.S create mode 100644 so3/arch/arm64/virt64/Kconfig create mode 100644 so3/arch/arm64/virt64/include/mach/gic.h create mode 100644 so3/arch/arm64/virt64/include/mach/uart.h diff --git a/doc/source/user_guide.rst b/doc/source/user_guide.rst index 47b75493c..69238615e 100644 --- a/doc/source/user_guide.rst +++ b/doc/source/user_guide.rst @@ -26,6 +26,7 @@ Various other packages are required: sudo apt-get install elfutils u-boot-tools sudo apt-get install device-tree-compiler sudo apt-get install fdisk + sudo apt-get install libncurses-dev The following packets are not mandatory, but they can be installed to prevent annoying warnings: diff --git a/so3/Kbuild b/so3/Kbuild index 9380babdb..ccad993ca 100644 --- a/so3/Kbuild +++ b/so3/Kbuild @@ -8,7 +8,7 @@ offsets-file := include/generated/asm-offsets.h always := $(offsets-file) targets := $(offsets-file) -targets += arch/arm/asm-offsets.s +targets += arch/$(SRCARCH)/asm-offsets.s # Default sed regexp - multiline due to syntax constraints define sed-y @@ -35,12 +35,12 @@ define cmd_offsets endef # We use internal kbuild rules to avoid the "is up to date" message from make -arch/arm/asm-offsets.s: arch/arm/asm-offsets.c FORCE +arch/$(SRCARCH)/asm-offsets.s: arch/$(SRCARCH)/asm-offsets.c FORCE $(Q)mkdir -p $(dir $@) $(call if_changed_dep,cc_s_c) -$(obj)/$(offsets-file): arch/arm/asm-offsets.s Kbuild +$(obj)/$(offsets-file): arch/$(SRCARCH)/asm-offsets.s Kbuild $(call cmd,offsets) diff --git a/so3/Kconfig b/so3/Kconfig index 0f0726b87..65a250372 100644 --- a/so3/Kconfig +++ b/so3/Kconfig @@ -2,13 +2,7 @@ mainmenu "SO3 Configuration" -menu "General" - config CROSS_COMPILE - string "Cross-compiler tool prefix" - default "arm-linux-gnueabihf-" -endmenu - -source "arch/arm/Kconfig" +source "arch/Kconfig" source "kernel/Kconfig" @@ -29,14 +23,4 @@ config MMU config DEBUG_PRINTK bool "Debug printk" - -menu "Generated files" - config ELF - bool "Generate ELF" - default y - config BIN - bool "Generate BIN" - default y - depends on ELF -endmenu diff --git a/so3/Makefile b/so3/Makefile index 7ab80fc8a..9ee80657a 100644 --- a/so3/Makefile +++ b/so3/Makefile @@ -97,11 +97,6 @@ VPATH := $(srctree) export srctree objtree VPATH -#CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) -CROSS_COMPILE = arm-linux-gnueabihf- - -SRCARCH := arm - KCONFIG_CONFIG ?= .config export KCONFIG_CONFIG @@ -126,7 +121,7 @@ HOSTCXXFLAGS = -O2 # cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< # # If $(quiet) is empty, the whole command will be printed. -# If it is set to "quiet_", only the short version will be printed. +# If it is set to "quiet_", only the short version will be printed. # If it is set to "silent_", nothing will be printed at all, since # the variable $(silent_cmd_cc_o_c) doesn't exist. # @@ -163,58 +158,14 @@ MAKEFLAGS += --include-dir=$(srctree) $(srctree)/scripts/Kbuild.include: ; include $(srctree)/scripts/Kbuild.include -# Make variables (CC, etc...) - -AS = $(CROSS_COMPILE)as -LD = $(CROSS_COMPILE)ld -CC = $(CROSS_COMPILE)gcc -CPP = $(CC) -E -AR = $(CROSS_COMPILE)ar -NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip -OBJCOPY = $(CROSS_COMPILE)objcopy -OBJDUMP = $(CROSS_COMPILE)objdump AWK = awk -INSTALLKERNEL := installkernel -PERL = perl +PERL = perl LEX = flex YACC = bison -# Use SO3INCLUDE when you must reference the include/ directory. -# Needed to be compatible with the O= option - -gccincdir := $(shell $(CC) -print-file-name=include) - -SO3INCLUDE := -Iinclude -I. \ - $(if $(KBUILD_SRC), -I$(srctree)/include) \ - -include include/generated/autoconf.h - -KBUILD_CPPFLAGS := -D__KERNEL__ +export LEX YACC +export HOSTCXX HOSTCXXFLAGS HOSTCC HOSTCFLAGS -KBUILD_CPPFLAGS += -I$(TOPDIR)include -I include/net -KBUILD_CPPFLAGS += -fno-builtin -ffreestanding -nostdinc -isystem $(gccincdir) - -KBUILD_CFLAGS := -g -O0 -fno-common -marm -mno-thumb-interwork -march=armv7-a -Wall -Wstrict-prototypes $(KBUILD_CPPFLAGS) -KBUILD_CFLAGS := -mabi=aapcs-linux -mlittle-endian -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -fno-strict-aliasing -fno-common -Werror-implicit-function-declaration -Wno-format-security -std=gnu89 -fno-PIE -fno-dwarf2-cfi-asm -fno-ipa-sra -mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp -funwind-tables -marm -march=armv7-a -Uarm -fno-delete-null-pointer-checks -Wno-frame-address --param=allow-store-data-races=0 -Wframe-larger-than=1024 -fno-stack-protector -Wno-unused-but-set-variable -Wno-unused-const-variable -fomit-frame-pointer -fno-var-tracking-assignments -g -Wdeclaration-after-statement -Wno-pointer-sign -fno-strict-overflow -fno-merge-all-constants -fmerge-constants -fno-stack-check -fconserve-stack -Werror=implicit-int -Werror=strict-prototypes -Werror=date-time -Werror=incompatible-pointer-types -Werror=designated-init -fno-function-sections -fno-data-sections -DBITS_PER_LONG=32 - -KBUILD_AFLAGS_KERNEL := -I. -KBUILD_CFLAGS_KERNEL := -I. -I./lib/libfdt -KBUILD_AFLAGS := -g -D__ASSEMBLY__ -mlittle-endian -fno-PIE -mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp -funwind-tables -marm -march=armv7-a -Wa,-gdwarf-2 - -# Read KERNELRELEASE from include/config/kernel.release (if it exists) -KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) -KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) - -export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP LEX YACC -export MAKE AWK GENKSYMS INSTALLKERNEL PERL UTS_MACHINE -export HOSTCXX HOSTCXXFLAGS - -export KBUILD_CPPFLAGS NOSTDINC_FLAGS SO3INCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL -export KBUILD_ARFLAGS -export TARGET CONFIG_BIN CONFIG_ELF - # Files to ignore in find ... statements RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg -o -name .git \) -prune -o @@ -327,12 +278,84 @@ $(KCONFIG_CONFIG): include include/config/auto.conf +# From here, we can rely CONFIG_* variables stored in .config +CROSS_COMPILE = $(CONFIG_CROSS_COMPILE) + + +# Make variables (CC, etc...) + +AS = $(CROSS_COMPILE)as +LD = $(CROSS_COMPILE)ld +CC = $(CROSS_COMPILE)gcc +CPP = $(CC) -E +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump + +# Use SO3INCLUDE when you must reference the include/ directory. +# Needed to be compatible with the O= option + +gccincdir := $(shell $(CC) -print-file-name=include) + +SO3INCLUDE := -Iinclude -I. \ + $(if $(KBUILD_SRC), -I$(srctree)/include) \ + -include include/generated/autoconf.h + +KBUILD_CPPFLAGS := -D__KERNEL__ + +KBUILD_CPPFLAGS += -I$(TOPDIR)include -I include/net +KBUILD_CPPFLAGS += -fno-builtin -ffreestanding -nostdinc -isystem $(gccincdir) + +KBUILD_CFLAGS := -g -O0 -fno-common -Wall -Wstrict-prototypes $(KBUILD_CPPFLAGS) +KBUILD_CFLAGS := -mlittle-endian -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -fno-strict-aliasing -fno-common -Werror-implicit-function-declaration -Wno-format-security -std=gnu89 -fno-PIE -fno-dwarf2-cfi-asm -fno-ipa-sra -funwind-tables -fno-delete-null-pointer-checks -Wno-frame-address -Wframe-larger-than=1024 \ + -fno-stack-protector -Wno-unused-but-set-variable -Wno-unused-const-variable -fomit-frame-pointer -fno-var-tracking-assignments -g -Wdeclaration-after-statement -Wno-pointer-sign -fno-strict-overflow -fno-merge-all-constants -fmerge-constants -fno-stack-check -fconserve-stack \ + -Werror=implicit-int -Werror=strict-prototypes -Werror=date-time -Werror=incompatible-pointer-types -Werror=designated-init -fno-function-sections -fno-data-sections + +KBUILD_AFLAGS_KERNEL := -I. +KBUILD_CFLAGS_KERNEL := -I. -I./lib/libfdt +KBUILD_AFLAGS := -g -D__ASSEMBLY__ -mlittle-endian -fno-PIE -funwind-tables -Wa,-gdwarf-2 + +# Specific CFLAGS/AFLAGS according to ARCH + +SRCARCH := $(patsubst "%",%,$(CONFIG_ARCH)) + +ifeq ($(CONFIG_ARCH_ARM32),y) +KBUILD_CFLAGS += -mabi=aapcs-linux -mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp -funwind-tables -marm -march=armv7-a -Uarm -DBITS_PER_LONG=32 --param allow-store-data-races=0 +KBUILD_AFLAGS += -mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp -funwind-tables -marm -march=armv7-a -Wa,-gdwarf-2 +endif + +ifeq ($(CONFIG_ARCH_ARM64),y) +KBUILD_CFLAGS += -DBITS_PER_LONG=64 -fno-allow-store-data-races +KBUILD_AFLAGS += +endif + +# Read KERNELRELEASE from include/config/kernel.release (if it exists) +KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) +KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) + +export ARCH SRCARCH CONFIG_SHELL CROSS_COMPILE AS LD CC +export MAKE AWK GENKSYMS INSTALLKERNEL PERL UTS_MACHINE +export CPP AR NM STRIP OBJCOPY OBJDUMP + +export KBUILD_CPPFLAGS NOSTDINC_FLAGS SO3INCLUDE OBJCOPYFLAGS LDFLAGS +export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL +export KBUILD_ARFLAGS + ifeq ($(CONFIG_VEXPRESS),y) TARGET = vexpress endif ifeq ($(CONFIG_RPI4),y) TARGET = rpi4 endif +ifeq ($(CONFIG_RPI4_64),y) +TARGET = rpi4_64 +endif +ifeq ($(CONFIG_VIRT64),y) +TARGET = virt64 +endif + # The all: target is the default when no target is given on the # command line. @@ -346,7 +369,7 @@ include scripts/Makefile.build all: $(BIN) dtbs -objs-y := kernel mm fs ipc arch/arm devices apps net +objs-y := arch/$(SRCARCH) kernel mm fs ipc devices apps net libs-y := lib libs-y += lib/libfdt @@ -420,7 +443,7 @@ $(sort $(so3-all)): $(so3-dirs) ; # tweaks to this spot to avoid wrong language settings when running # make menuconfig etc. # Error messages still appears in the original language - + prepare0: scripts_basic FORCE $(Q)$(MAKE) $(build)=. diff --git a/so3/arch/Kconfig b/so3/arch/Kconfig new file mode 100644 index 000000000..34e0de7f5 --- /dev/null +++ b/so3/arch/Kconfig @@ -0,0 +1,36 @@ +choice + prompt "SO3 OS type" + +config STANDALONE + bool "Standalone OS" + +endchoice + +choice + prompt "CPU Architecture" + + config ARCH_ARM32 + bool "ARM 32-bit" + + config ARCH_ARM64 + bool "ARM 64-bit" + +endchoice + + +config ARCH + string + default "arm32" if ARCH_ARM32 + default "arm64" if ARCH_ARM64 + +config CROSS_COMPILE + string + default "arm-none-linux-gnueabihf-" if ARCH_ARM32 + default "aarch64-none-linux-gnu-" if ARCH_ARM64 + + +source "arch/arm32/Kconfig" +source "arch/arm64/Kconfig" + + + diff --git a/so3/arch/Makefile b/so3/arch/Makefile new file mode 100644 index 000000000..fd0ba5df0 --- /dev/null +++ b/so3/arch/Makefile @@ -0,0 +1,4 @@ + +obj-$(CONFIG_ARCH_ARM32) += arm32/ +obj-$(CONFIG_ARCH_ARM64) += arm64/ + diff --git a/so3/arch/arm/Kconfig b/so3/arch/arm/Kconfig deleted file mode 100644 index ea49f912d..000000000 --- a/so3/arch/arm/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ - -menu "Platform" - -choice - prompt "SO3 OS type" - -config STANDALONE - bool "Standalone OS" - -endchoice - -choice - prompt "Target" - - config VEXPRESS - bool "Vexpress" - config RPI4 - bool "Raspberry Pi 4 B" - - -endchoice - -source "arch/arm/vexpress/Kconfig" -source "arch/arm/merida/Kconfig" -source "arch/arm/bpi/Kconfig" -source "arch/arm/rpi4/Kconfig" -endmenu diff --git a/so3/arch/arm32/Kconfig b/so3/arch/arm32/Kconfig new file mode 100644 index 000000000..eb29ac604 --- /dev/null +++ b/so3/arch/arm32/Kconfig @@ -0,0 +1,23 @@ + +if ARCH_ARM32 + +menu "Platform" + +choice + prompt "Target" + + config VEXPRESS + bool "Vexpress" + + config RPI4 + bool "Raspberry Pi 4 Model B support" + +endchoice + + +source "arch/arm32/vexpress/Kconfig" +source "arch/arm32/rpi4/Kconfig" + +endmenu + +endif diff --git a/so3/arch/arm/Makefile b/so3/arch/arm32/Makefile similarity index 100% rename from so3/arch/arm/Makefile rename to so3/arch/arm32/Makefile diff --git a/so3/arch/arm/asm-offsets.c b/so3/arch/arm32/asm-offsets.c similarity index 99% rename from so3/arch/arm/asm-offsets.c rename to so3/arch/arm32/asm-offsets.c index 7c99f4e06..afc50dc96 100644 --- a/so3/arch/arm/asm-offsets.c +++ b/so3/arch/arm32/asm-offsets.c @@ -16,12 +16,13 @@ * */ -#include -#include #include #include #include +#include +#include + /* * Make sure that the compiler and target are compatible. */ diff --git a/so3/arch/arm32/asm-offsets.s b/so3/arch/arm32/asm-offsets.s new file mode 100644 index 000000000..b9825f3f5 --- /dev/null +++ b/so3/arch/arm32/asm-offsets.s @@ -0,0 +1,654 @@ + .arch armv7-a + .eabi_attribute 28, 1 @ Tag_ABI_VFP_args + .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal + .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions + .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model + .eabi_attribute 24, 1 @ Tag_ABI_align8_needed + .eabi_attribute 25, 1 @ Tag_ABI_align8_preserved + .eabi_attribute 26, 2 @ Tag_ABI_enum_size + .eabi_attribute 30, 6 @ Tag_ABI_optimization_goals + .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access + .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t + .file "asm-offsets.c" +@ GNU C89 (GNU Toolchain for the A-profile Architecture 9.2-2019.12 (arm-9.10)) version 9.2.1 20191025 (arm-none-linux-gnueabihf) +@ compiled by GNU C version 4.8.1, GMP version 4.3.2, MPFR version 3.1.6, MPC version 1.0.3, isl version isl-0.15-1-g835ea3a-GMP + +@ GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 +@ options passed: -nostdinc -I include -I . -I include -I include/net -I . +@ -I ./lib/libfdt -I arch/arm32/include/ -I arch/arm32/rpi4/include/ +@ -iprefix /opt/toolchain/arm/gcc-arm-9.2-2019.12-x86_64-arm-none-linux-gnueabihf/bin/../lib/gcc/arm-none-linux-gnueabihf/9.2.1/ +@ -isysroot /opt/toolchain/arm/gcc-arm-9.2-2019.12-x86_64-arm-none-linux-gnueabihf/bin/../arm-none-linux-gnueabihf/libc +@ -D __KERNEL__ -U arm -D BITS_PER_LONG=32 -D KBUILD_STR(s)=#s +@ -D KBUILD_BASENAME=KBUILD_STR(asm_offsets) +@ -include include/generated/autoconf.h +@ -isystem /opt/toolchain/arm/gcc-arm-9.2-2019.12-x86_64-arm-none-linux-gnueabihf/bin/../lib/gcc/arm-none-linux-gnueabihf/9.2.1/include +@ -MD arch/arm32/.asm-offsets.s.d arch/arm32/asm-offsets.c -mlittle-endian +@ -mabi=aapcs-linux -mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp -marm +@ -mfloat-abi=hard -mtls-dialect=gnu -march=armv7-a+fp +@ -auxbase-strip arch/arm32/asm-offsets.s -g -Wall -Wundef +@ -Wstrict-prototypes -Wno-trigraphs -Werror=implicit-function-declaration +@ -Wno-format-security -Wno-frame-address -Wframe-larger-than=1024 +@ -Wno-unused-but-set-variable -Wunused-const-variable=0 +@ -Wdeclaration-after-statement -Wno-pointer-sign -Werror=implicit-int +@ -Werror=strict-prototypes -Werror=date-time +@ -Werror=incompatible-pointer-types -Werror=designated-init -std=gnu90 +@ -fno-builtin -ffreestanding -fno-strict-aliasing -fno-common -fno-PIE +@ -fno-dwarf2-cfi-asm -fno-ipa-sra -fno-delete-null-pointer-checks +@ -fno-stack-protector -fomit-frame-pointer -fno-var-tracking-assignments +@ -fno-strict-overflow -fno-merge-all-constants -fmerge-constants +@ -fstack-check=no -fconserve-stack -fno-function-sections +@ -fno-data-sections -funwind-tables -fverbose-asm +@ --param allow-store-data-races=0 +@ options enabled: -faggressive-loop-optimizations -fassume-phsa +@ -fauto-inc-dec -fearly-inlining -feliminate-unused-debug-types +@ -ffp-int-builtin-inexact -ffunction-cse -fgcse-lm -fgnu-runtime +@ -fgnu-unique -fident -finline-atomics -fipa-stack-alignment +@ -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots +@ -fivopts -fkeep-static-consts -fleading-underscore -flifetime-dse +@ -flto-odr-type-merging -fmath-errno -fmerge-constants +@ -fmerge-debug-strings -fomit-frame-pointer -fpeephole -fplt +@ -fprefetch-loop-arrays -freg-struct-return +@ -fsched-critical-path-heuristic -fsched-dep-count-heuristic +@ -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic +@ -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic +@ -fsched-stalled-insns-dep -fsemantic-interposition -fshow-column +@ -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller +@ -fssa-backprop -fstdarg-opt -fstrict-volatile-bitfields -fsync-libcalls +@ -ftrapping-math -ftree-cselim -ftree-forwprop -ftree-loop-if-convert +@ -ftree-loop-im -ftree-loop-ivcanon -ftree-loop-optimize +@ -ftree-parallelize-loops= -ftree-phiprop -ftree-reassoc -ftree-scev-cprop +@ -funit-at-a-time -funwind-tables -fverbose-asm -fwrapv -fwrapv-pointer +@ -fzero-initialized-in-bss -marm -mbe32 -mglibc -mlittle-endian +@ -mpic-data-is-text-relative -msched-prolog -munaligned-access +@ -mvectorize-with-neon-quad + + .text +.Ltext0: + .align 2 + .global main + .arch armv7-a + .syntax unified + .arm + .fpu vfp + .type main, %function +main: + .fnstart +.LFB45: + .file 1 "arch/arm32/asm-offsets.c" + .loc 1 52 1 + @ args = 0, pretend = 0, frame = 0 + @ frame_needed = 0, uses_anonymous_args = 0 + @ link register save eliminated. +@ arch/arm32/asm-offsets.c:53: BLANK(); + .loc 1 53 2 + .syntax divided +@ 53 "arch/arm32/asm-offsets.c" 1 + +-> +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:55: DEFINE(OFFSET_TCB_CPU_REGS, offsetof(tcb_t, cpu_regs)); + .loc 1 55 2 +@ 55 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_TCB_CPU_REGS #140 offsetof(tcb_t, cpu_regs) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:57: BLANK(); + .loc 1 57 2 +@ 57 "arch/arm32/asm-offsets.c" 1 + +-> +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:59: DEFINE(OFFSET_R0, offsetof(cpu_regs_t, r0)); + .loc 1 59 2 +@ 59 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R0 #0 offsetof(cpu_regs_t, r0) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:60: DEFINE(OFFSET_R1, offsetof(cpu_regs_t, r1)); + .loc 1 60 2 +@ 60 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R1 #4 offsetof(cpu_regs_t, r1) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:61: DEFINE(OFFSET_R2, offsetof(cpu_regs_t, r2)); + .loc 1 61 2 +@ 61 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R2 #8 offsetof(cpu_regs_t, r2) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:62: DEFINE(OFFSET_R3, offsetof(cpu_regs_t, r3)); + .loc 1 62 2 +@ 62 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R3 #12 offsetof(cpu_regs_t, r3) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:63: DEFINE(OFFSET_R4, offsetof(cpu_regs_t, r4)); + .loc 1 63 2 +@ 63 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R4 #16 offsetof(cpu_regs_t, r4) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:64: DEFINE(OFFSET_R5, offsetof(cpu_regs_t, r5)); + .loc 1 64 2 +@ 64 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R5 #20 offsetof(cpu_regs_t, r5) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:65: DEFINE(OFFSET_R6, offsetof(cpu_regs_t, r6)); + .loc 1 65 2 +@ 65 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R6 #24 offsetof(cpu_regs_t, r6) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:66: DEFINE(OFFSET_R7, offsetof(cpu_regs_t, r7)); + .loc 1 66 2 +@ 66 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R7 #28 offsetof(cpu_regs_t, r7) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:67: DEFINE(OFFSET_R8, offsetof(cpu_regs_t, r8)); + .loc 1 67 2 +@ 67 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R8 #32 offsetof(cpu_regs_t, r8) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:68: DEFINE(OFFSET_R9, offsetof(cpu_regs_t, r9)); + .loc 1 68 2 +@ 68 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R9 #36 offsetof(cpu_regs_t, r9) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:69: DEFINE(OFFSET_R10, offsetof(cpu_regs_t, r10)); + .loc 1 69 2 +@ 69 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_R10 #40 offsetof(cpu_regs_t, r10) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:70: DEFINE(OFFSET_FP, offsetof(cpu_regs_t, fp)); + .loc 1 70 2 +@ 70 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_FP #44 offsetof(cpu_regs_t, fp) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:71: DEFINE(OFFSET_IP, offsetof(cpu_regs_t, ip)); + .loc 1 71 2 +@ 71 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_IP #48 offsetof(cpu_regs_t, ip) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:72: DEFINE(OFFSET_SP, offsetof(cpu_regs_t, sp)); + .loc 1 72 2 +@ 72 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_SP #52 offsetof(cpu_regs_t, sp) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:73: DEFINE(OFFSET_LR, offsetof(cpu_regs_t, lr)); + .loc 1 73 2 +@ 73 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_LR #56 offsetof(cpu_regs_t, lr) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:74: DEFINE(OFFSET_PC, offsetof(cpu_regs_t, pc)); + .loc 1 74 2 +@ 74 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_PC #60 offsetof(cpu_regs_t, pc) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:75: DEFINE(OFFSET_PSR, offsetof(cpu_regs_t, psr)); + .loc 1 75 2 +@ 75 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_PSR #64 offsetof(cpu_regs_t, psr) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:76: DEFINE(OFFSET_SP_USR, offsetof(cpu_regs_t, sp_usr)); + .loc 1 76 2 +@ 76 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_SP_USR #68 offsetof(cpu_regs_t, sp_usr) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:77: DEFINE(OFFSET_LR_USR, offsetof(cpu_regs_t, lr_usr)); + .loc 1 77 2 +@ 77 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_LR_USR #72 offsetof(cpu_regs_t, lr_usr) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:79: BLANK(); + .loc 1 79 2 +@ 79 "arch/arm32/asm-offsets.c" 1 + +-> +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:81: DEFINE(OFFSET_SYS_SIGNUM, offsetof(__sigaction_t, signum)); + .loc 1 81 2 +@ 81 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_SYS_SIGNUM #0 offsetof(__sigaction_t, signum) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:82: DEFINE(OFFSET_SYS_SA, offsetof(__sigaction_t, sa)); + .loc 1 82 2 +@ 82 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_SYS_SA #4 offsetof(__sigaction_t, sa) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:84: BLANK(); + .loc 1 84 2 +@ 84 "arch/arm32/asm-offsets.c" 1 + +-> +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:86: DEFINE(OFFSET_SA_HANDLER, offsetof(sigaction_t, sa_handler)); + .loc 1 86 2 +@ 86 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_SA_HANDLER #0 offsetof(sigaction_t, sa_handler) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:87: DEFINE(OFFSET_SA_RESTORER, offsetof(sigaction_t, sa_restorer)); + .loc 1 87 2 +@ 87 "arch/arm32/asm-offsets.c" 1 + +->OFFSET_SA_RESTORER #8 offsetof(sigaction_t, sa_restorer) @ +@ 0 "" 2 +@ arch/arm32/asm-offsets.c:89: return 0; + .loc 1 89 9 + .arm + .syntax unified + mov r3, #0 @ _1, +@ arch/arm32/asm-offsets.c:90: } + .loc 1 90 1 + mov r0, r3 @, + bx lr @ +.LFE45: + .fnend + .size main, .-main + .section .debug_frame,"",%progbits +.Lframe0: + .4byte .LECIE0-.LSCIE0 +.LSCIE0: + .4byte 0xffffffff + .byte 0x3 + .ascii "\000" + .uleb128 0x1 + .sleb128 -4 + .uleb128 0xe + .byte 0xc + .uleb128 0xd + .uleb128 0 + .align 2 +.LECIE0: +.LSFDE0: + .4byte .LEFDE0-.LASFDE0 +.LASFDE0: + .4byte .Lframe0 + .4byte .LFB45 + .4byte .LFE45-.LFB45 + .align 2 +.LEFDE0: + .text +.Letext0: + .file 2 "arch/arm32/include/asm/types.h" + .file 3 "include/types.h" + .file 4 "arch/arm32/include/asm/memory.h" + .file 5 "include/common.h" + .file 6 "include/thread.h" + .section .debug_info,"",%progbits +.Ldebug_info0: + .4byte 0x111 + .2byte 0x4 + .4byte .Ldebug_abbrev0 + .byte 0x4 + .uleb128 0x1 + .4byte .LASF22 + .byte 0x1 + .4byte .LASF23 + .4byte .LASF24 + .4byte .Ltext0 + .4byte .Letext0-.Ltext0 + .4byte .Ldebug_line0 + .uleb128 0x2 + .byte 0x4 + .byte 0x7 + .4byte .LASF0 + .uleb128 0x2 + .byte 0x2 + .byte 0x7 + .4byte .LASF1 + .uleb128 0x2 + .byte 0x1 + .byte 0x6 + .4byte .LASF2 + .uleb128 0x2 + .byte 0x1 + .byte 0x8 + .4byte .LASF3 + .uleb128 0x2 + .byte 0x2 + .byte 0x5 + .4byte .LASF4 + .uleb128 0x3 + .byte 0x4 + .byte 0x5 + .ascii "int\000" + .uleb128 0x4 + .4byte .LASF8 + .byte 0x2 + .byte 0x26 + .byte 0x16 + .4byte 0x5b + .uleb128 0x2 + .byte 0x4 + .byte 0x7 + .4byte .LASF5 + .uleb128 0x2 + .byte 0x8 + .byte 0x5 + .4byte .LASF6 + .uleb128 0x2 + .byte 0x8 + .byte 0x7 + .4byte .LASF7 + .uleb128 0x4 + .4byte .LASF9 + .byte 0x3 + .byte 0x3f + .byte 0x11 + .4byte 0x4f + .uleb128 0x5 + .4byte .LASF11 + .byte 0x4 + .byte 0x1b + .byte 0x12 + .4byte 0x88 + .uleb128 0x6 + .byte 0x4 + .4byte 0x70 + .uleb128 0x2 + .byte 0x1 + .byte 0x8 + .4byte .LASF10 + .uleb128 0x5 + .4byte .LASF12 + .byte 0x5 + .byte 0x1b + .byte 0x11 + .4byte 0x70 + .uleb128 0x7 + .byte 0x7 + .byte 0x4 + .4byte 0x5b + .byte 0x5 + .byte 0x5c + .byte 0xe + .4byte 0xce + .uleb128 0x8 + .4byte .LASF13 + .byte 0 + .uleb128 0x8 + .4byte .LASF14 + .byte 0x1 + .uleb128 0x8 + .4byte .LASF15 + .byte 0x2 + .uleb128 0x8 + .4byte .LASF16 + .byte 0x3 + .uleb128 0x8 + .4byte .LASF17 + .byte 0x4 + .byte 0 + .uleb128 0x4 + .4byte .LASF18 + .byte 0x5 + .byte 0x5e + .byte 0x3 + .4byte 0xa1 + .uleb128 0x5 + .4byte .LASF19 + .byte 0x5 + .byte 0x5f + .byte 0x15 + .4byte 0xce + .uleb128 0x5 + .4byte .LASF20 + .byte 0x5 + .byte 0x64 + .byte 0x11 + .4byte 0x70 + .uleb128 0x5 + .4byte .LASF21 + .byte 0x6 + .byte 0x2a + .byte 0x15 + .4byte 0x5b + .uleb128 0x9 + .4byte .LASF25 + .byte 0x1 + .byte 0x33 + .byte 0x5 + .4byte 0x48 + .4byte .LFB45 + .4byte .LFE45-.LFB45 + .uleb128 0x1 + .byte 0x9c + .byte 0 + .section .debug_abbrev,"",%progbits +.Ldebug_abbrev0: + .uleb128 0x1 + .uleb128 0x11 + .byte 0x1 + .uleb128 0x25 + .uleb128 0xe + .uleb128 0x13 + .uleb128 0xb + .uleb128 0x3 + .uleb128 0xe + .uleb128 0x1b + .uleb128 0xe + .uleb128 0x11 + .uleb128 0x1 + .uleb128 0x12 + .uleb128 0x6 + .uleb128 0x10 + .uleb128 0x17 + .byte 0 + .byte 0 + .uleb128 0x2 + .uleb128 0x24 + .byte 0 + .uleb128 0xb + .uleb128 0xb + .uleb128 0x3e + .uleb128 0xb + .uleb128 0x3 + .uleb128 0xe + .byte 0 + .byte 0 + .uleb128 0x3 + .uleb128 0x24 + .byte 0 + .uleb128 0xb + .uleb128 0xb + .uleb128 0x3e + .uleb128 0xb + .uleb128 0x3 + .uleb128 0x8 + .byte 0 + .byte 0 + .uleb128 0x4 + .uleb128 0x16 + .byte 0 + .uleb128 0x3 + .uleb128 0xe + .uleb128 0x3a + .uleb128 0xb + .uleb128 0x3b + .uleb128 0xb + .uleb128 0x39 + .uleb128 0xb + .uleb128 0x49 + .uleb128 0x13 + .byte 0 + .byte 0 + .uleb128 0x5 + .uleb128 0x34 + .byte 0 + .uleb128 0x3 + .uleb128 0xe + .uleb128 0x3a + .uleb128 0xb + .uleb128 0x3b + .uleb128 0xb + .uleb128 0x39 + .uleb128 0xb + .uleb128 0x49 + .uleb128 0x13 + .uleb128 0x3f + .uleb128 0x19 + .uleb128 0x3c + .uleb128 0x19 + .byte 0 + .byte 0 + .uleb128 0x6 + .uleb128 0xf + .byte 0 + .uleb128 0xb + .uleb128 0xb + .uleb128 0x49 + .uleb128 0x13 + .byte 0 + .byte 0 + .uleb128 0x7 + .uleb128 0x4 + .byte 0x1 + .uleb128 0x3e + .uleb128 0xb + .uleb128 0xb + .uleb128 0xb + .uleb128 0x49 + .uleb128 0x13 + .uleb128 0x3a + .uleb128 0xb + .uleb128 0x3b + .uleb128 0xb + .uleb128 0x39 + .uleb128 0xb + .uleb128 0x1 + .uleb128 0x13 + .byte 0 + .byte 0 + .uleb128 0x8 + .uleb128 0x28 + .byte 0 + .uleb128 0x3 + .uleb128 0xe + .uleb128 0x1c + .uleb128 0xb + .byte 0 + .byte 0 + .uleb128 0x9 + .uleb128 0x2e + .byte 0 + .uleb128 0x3f + .uleb128 0x19 + .uleb128 0x3 + .uleb128 0xe + .uleb128 0x3a + .uleb128 0xb + .uleb128 0x3b + .uleb128 0xb + .uleb128 0x39 + .uleb128 0xb + .uleb128 0x27 + .uleb128 0x19 + .uleb128 0x49 + .uleb128 0x13 + .uleb128 0x11 + .uleb128 0x1 + .uleb128 0x12 + .uleb128 0x6 + .uleb128 0x40 + .uleb128 0x18 + .uleb128 0x2117 + .uleb128 0x19 + .byte 0 + .byte 0 + .byte 0 + .section .debug_aranges,"",%progbits + .4byte 0x1c + .2byte 0x2 + .4byte .Ldebug_info0 + .byte 0x4 + .byte 0 + .2byte 0 + .2byte 0 + .4byte .Ltext0 + .4byte .Letext0-.Ltext0 + .4byte 0 + .4byte 0 + .section .debug_line,"",%progbits +.Ldebug_line0: + .section .debug_str,"MS",%progbits,1 +.LASF8: + .ascii "__u32\000" +.LASF14: + .ascii "BOOT_STAGE_IRQ_INIT\000" +.LASF5: + .ascii "unsigned int\000" +.LASF18: + .ascii "boot_stage_t\000" +.LASF24: + .ascii "/home/rossierd/soo.tech/20-makefiles/so3\000" +.LASF12: + .ascii "__end\000" +.LASF0: + .ascii "long unsigned int\000" +.LASF7: + .ascii "long long unsigned int\000" +.LASF22: + .ascii "GNU C89 9.2.1 20191025 -mlittle-endian -mabi=aapcs-" + .ascii "linux -mabi=aapcs-linux -mno-thumb-interwork -mfpu=" + .ascii "vfp -marm -mfloat-abi=hard -mtls-dialect=gnu -march" + .ascii "=armv7-a+fp -g -std=gnu90 -fno-builtin -ffreestandi" + .ascii "ng -fno-strict-aliasing -fno-common -fno-PIE -fno-d" + .ascii "warf2-cfi-asm -fno-ipa-sra -fno-delete-null-pointer" + .ascii "-checks -fno-stack-protector -fomit-frame-pointer -" + .ascii "fno-var-tracking-assignments -fno-strict-overflow -" + .ascii "fno-merge-all-constants -fmerge-constants -fstack-c" + .ascii "heck=no -fconserve-stack -fno-function-sections -fn" + .ascii "o-data-sections -funwind-tables --param allow-store" + .ascii "-data-races=0\000" +.LASF15: + .ascii "BOOT_STAGE_SCHED\000" +.LASF16: + .ascii "BOOT_STAGE_IRQ_ENABLE\000" +.LASF21: + .ascii "__stack_top\000" +.LASF3: + .ascii "unsigned char\000" +.LASF25: + .ascii "main\000" +.LASF9: + .ascii "uint32_t\000" +.LASF17: + .ascii "BOOT_STAGE_COMPLETED\000" +.LASF6: + .ascii "long long int\000" +.LASF1: + .ascii "short unsigned int\000" +.LASF2: + .ascii "signed char\000" +.LASF11: + .ascii "__sys_l1pgtable\000" +.LASF23: + .ascii "arch/arm32/asm-offsets.c\000" +.LASF20: + .ascii "origin_cpu\000" +.LASF19: + .ascii "boot_stage\000" +.LASF4: + .ascii "short int\000" +.LASF13: + .ascii "BOOT_STAGE_INIT\000" +.LASF10: + .ascii "char\000" + .ident "GCC: (GNU Toolchain for the A-profile Architecture 9.2-2019.12 (arm-9.10)) 9.2.1 20191025" + .section .note.GNU-stack,"",%progbits diff --git a/so3/arch/arm/boot/Makefile b/so3/arch/arm32/boot/Makefile similarity index 100% rename from so3/arch/arm/boot/Makefile rename to so3/arch/arm32/boot/Makefile diff --git a/so3/arch/arm/boot/head.S b/so3/arch/arm32/boot/head.S similarity index 100% rename from so3/arch/arm/boot/head.S rename to so3/arch/arm32/boot/head.S diff --git a/so3/arch/arm/bpi/Kconfig b/so3/arch/arm32/bpi/Kconfig similarity index 100% rename from so3/arch/arm/bpi/Kconfig rename to so3/arch/arm32/bpi/Kconfig diff --git a/so3/arch/arm/bpi/include/mach/timer.h b/so3/arch/arm32/bpi/include/mach/timer.h similarity index 100% rename from so3/arch/arm/bpi/include/mach/timer.h rename to so3/arch/arm32/bpi/include/mach/timer.h diff --git a/so3/arch/arm/bpi/include/mach/uart.h b/so3/arch/arm32/bpi/include/mach/uart.h similarity index 100% rename from so3/arch/arm/bpi/include/mach/uart.h rename to so3/arch/arm32/bpi/include/mach/uart.h diff --git a/so3/arch/arm/cache-cp15.c b/so3/arch/arm32/cache-cp15.c similarity index 100% rename from so3/arch/arm/cache-cp15.c rename to so3/arch/arm32/cache-cp15.c diff --git a/so3/arch/arm/cache_v7.c b/so3/arch/arm32/cache_v7.c similarity index 100% rename from so3/arch/arm/cache_v7.c rename to so3/arch/arm32/cache_v7.c diff --git a/so3/arch/arm/cache_v7_asm.S b/so3/arch/arm32/cache_v7_asm.S similarity index 100% rename from so3/arch/arm/cache_v7_asm.S rename to so3/arch/arm32/cache_v7_asm.S diff --git a/so3/arch/arm/context.S b/so3/arch/arm32/context.S similarity index 100% rename from so3/arch/arm/context.S rename to so3/arch/arm32/context.S diff --git a/so3/arch/arm/exception.S b/so3/arch/arm32/exception.S similarity index 100% rename from so3/arch/arm/exception.S rename to so3/arch/arm32/exception.S diff --git a/so3/arch/arm/fault.c b/so3/arch/arm32/fault.c similarity index 100% rename from so3/arch/arm/fault.c rename to so3/arch/arm32/fault.c diff --git a/so3/arch/arm/include/asm/armv7.h b/so3/arch/arm32/include/asm/armv7.h similarity index 100% rename from so3/arch/arm/include/asm/armv7.h rename to so3/arch/arm32/include/asm/armv7.h diff --git a/so3/arch/arm/include/asm/assembler.h b/so3/arch/arm32/include/asm/assembler.h similarity index 100% rename from so3/arch/arm/include/asm/assembler.h rename to so3/arch/arm32/include/asm/assembler.h diff --git a/so3/arch/arm/include/asm/atomic-generic.h b/so3/arch/arm32/include/asm/atomic-generic.h similarity index 100% rename from so3/arch/arm/include/asm/atomic-generic.h rename to so3/arch/arm32/include/asm/atomic-generic.h diff --git a/so3/arch/arm/include/asm/atomic.h b/so3/arch/arm32/include/asm/atomic.h similarity index 100% rename from so3/arch/arm/include/asm/atomic.h rename to so3/arch/arm32/include/asm/atomic.h diff --git a/so3/arch/arm/include/asm/byteorder.h b/so3/arch/arm32/include/asm/byteorder.h similarity index 100% rename from so3/arch/arm/include/asm/byteorder.h rename to so3/arch/arm32/include/asm/byteorder.h diff --git a/so3/arch/arm/include/asm/cacheflush.h b/so3/arch/arm32/include/asm/cacheflush.h similarity index 100% rename from so3/arch/arm/include/asm/cacheflush.h rename to so3/arch/arm32/include/asm/cacheflush.h diff --git a/so3/arch/arm/include/asm/errno.h b/so3/arch/arm32/include/asm/errno.h similarity index 100% rename from so3/arch/arm/include/asm/errno.h rename to so3/arch/arm32/include/asm/errno.h diff --git a/so3/arch/arm/include/asm/hardware.h b/so3/arch/arm32/include/asm/hardware.h similarity index 100% rename from so3/arch/arm/include/asm/hardware.h rename to so3/arch/arm32/include/asm/hardware.h diff --git a/so3/arch/arm/include/asm/hardware/pl310.h b/so3/arch/arm32/include/asm/hardware/pl310.h similarity index 100% rename from so3/arch/arm/include/asm/hardware/pl310.h rename to so3/arch/arm32/include/asm/hardware/pl310.h diff --git a/so3/arch/arm/include/asm/io.h b/so3/arch/arm32/include/asm/io.h similarity index 100% rename from so3/arch/arm/include/asm/io.h rename to so3/arch/arm32/include/asm/io.h diff --git a/so3/arch/arm/include/asm/memory.h b/so3/arch/arm32/include/asm/memory.h similarity index 100% rename from so3/arch/arm/include/asm/memory.h rename to so3/arch/arm32/include/asm/memory.h diff --git a/so3/arch/arm/include/asm/mmu.h b/so3/arch/arm32/include/asm/mmu.h similarity index 100% rename from so3/arch/arm/include/asm/mmu.h rename to so3/arch/arm32/include/asm/mmu.h diff --git a/so3/arch/arm/include/asm/posix_types.h b/so3/arch/arm32/include/asm/posix_types.h similarity index 100% rename from so3/arch/arm/include/asm/posix_types.h rename to so3/arch/arm32/include/asm/posix_types.h diff --git a/so3/arch/arm/include/asm/processor.h b/so3/arch/arm32/include/asm/processor.h similarity index 100% rename from so3/arch/arm/include/asm/processor.h rename to so3/arch/arm32/include/asm/processor.h diff --git a/so3/arch/arm/include/asm/setup.h b/so3/arch/arm32/include/asm/setup.h similarity index 100% rename from so3/arch/arm/include/asm/setup.h rename to so3/arch/arm32/include/asm/setup.h diff --git a/so3/arch/arm/include/asm/spinlock.h b/so3/arch/arm32/include/asm/spinlock.h similarity index 100% rename from so3/arch/arm/include/asm/spinlock.h rename to so3/arch/arm32/include/asm/spinlock.h diff --git a/so3/arch/arm/include/asm/syscall.h b/so3/arch/arm32/include/asm/syscall.h similarity index 100% rename from so3/arch/arm/include/asm/syscall.h rename to so3/arch/arm32/include/asm/syscall.h diff --git a/so3/arch/arm/include/asm/types.h b/so3/arch/arm32/include/asm/types.h similarity index 100% rename from so3/arch/arm/include/asm/types.h rename to so3/arch/arm32/include/asm/types.h diff --git a/so3/arch/arm/include/asm/utils.h b/so3/arch/arm32/include/asm/utils.h similarity index 100% rename from so3/arch/arm/include/asm/utils.h rename to so3/arch/arm32/include/asm/utils.h diff --git a/so3/arch/arm/merida/Kconfig b/so3/arch/arm32/merida/Kconfig similarity index 100% rename from so3/arch/arm/merida/Kconfig rename to so3/arch/arm32/merida/Kconfig diff --git a/so3/arch/arm/merida/include/mach/timer.h b/so3/arch/arm32/merida/include/mach/timer.h similarity index 100% rename from so3/arch/arm/merida/include/mach/timer.h rename to so3/arch/arm32/merida/include/mach/timer.h diff --git a/so3/arch/arm/merida/include/mach/uart.h b/so3/arch/arm32/merida/include/mach/uart.h similarity index 100% rename from so3/arch/arm/merida/include/mach/uart.h rename to so3/arch/arm32/merida/include/mach/uart.h diff --git a/so3/arch/arm/mmu.c b/so3/arch/arm32/mmu.c similarity index 100% rename from so3/arch/arm/mmu.c rename to so3/arch/arm32/mmu.c diff --git a/so3/arch/arm/rpi4/Kconfig b/so3/arch/arm32/rpi4/Kconfig similarity index 100% rename from so3/arch/arm/rpi4/Kconfig rename to so3/arch/arm32/rpi4/Kconfig diff --git a/so3/arch/arm/rpi4/include/mach/uart.h b/so3/arch/arm32/rpi4/include/mach/uart.h similarity index 100% rename from so3/arch/arm/rpi4/include/mach/uart.h rename to so3/arch/arm32/rpi4/include/mach/uart.h diff --git a/so3/arch/arm/setup.c b/so3/arch/arm32/setup.c similarity index 100% rename from so3/arch/arm/setup.c rename to so3/arch/arm32/setup.c diff --git a/so3/arch/arm/so3.lds b/so3/arch/arm32/so3.lds similarity index 100% rename from so3/arch/arm/so3.lds rename to so3/arch/arm32/so3.lds diff --git a/so3/arch/arm/vexpress/Kconfig b/so3/arch/arm32/vexpress/Kconfig similarity index 100% rename from so3/arch/arm/vexpress/Kconfig rename to so3/arch/arm32/vexpress/Kconfig diff --git a/so3/arch/arm/vexpress/include/mach/uart.h b/so3/arch/arm32/vexpress/include/mach/uart.h similarity index 100% rename from so3/arch/arm/vexpress/include/mach/uart.h rename to so3/arch/arm32/vexpress/include/mach/uart.h diff --git a/so3/arch/arm64/Kconfig b/so3/arch/arm64/Kconfig new file mode 100644 index 000000000..f200af763 --- /dev/null +++ b/so3/arch/arm64/Kconfig @@ -0,0 +1,22 @@ + +if ARCH_ARM64 + +menu "Platform" + + choice + prompt "Target" + + config VIRT64 + bool "Generic QEMU virt64 model" + + config RPI4_64 + bool "Raspberry Pi 4 64-bit" + + endchoice + +source "arch/arm64/virt64/Kconfig" +source "arch/arm64/rpi4_64/Kconfig" + +endmenu + +endif diff --git a/so3/arch/arm64/Makefile b/so3/arch/arm64/Makefile new file mode 100644 index 000000000..1cd7ed5fa --- /dev/null +++ b/so3/arch/arm64/Makefile @@ -0,0 +1,14 @@ + +obj-y += boot/ + +obj-y += spinlock.o + +obj-y += fault.o backtrace.o +obj-y += backtrace.o backtrace_asm.o +obj-y += setup.o mmu.o cache_v8.o cache.o context.o domain.o +obj-y += exception.o + +obj-y += lib/ + +#obj-y += $(TARGET)/ + diff --git a/so3/arch/arm64/asm-offsets.c b/so3/arch/arm64/asm-offsets.c new file mode 100644 index 000000000..80239a607 --- /dev/null +++ b/so3/arch/arm64/asm-offsets.c @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include + +#include +#include + +/* Use marker if you need to separate the values later */ + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ + + BLANK(); + + DEFINE(OFFSET_X0, offsetof(struct cpu_regs, x0)); + DEFINE(OFFSET_X1, offsetof(struct cpu_regs, x1)); + DEFINE(OFFSET_X2, offsetof(struct cpu_regs, x2)); + DEFINE(OFFSET_X3, offsetof(struct cpu_regs, x3)); + DEFINE(OFFSET_X4, offsetof(struct cpu_regs, x4)); + DEFINE(OFFSET_X5, offsetof(struct cpu_regs, x5)); + DEFINE(OFFSET_X6, offsetof(struct cpu_regs, x6)); + DEFINE(OFFSET_X7, offsetof(struct cpu_regs, x7)); + DEFINE(OFFSET_X8, offsetof(struct cpu_regs, x8)); + DEFINE(OFFSET_X9, offsetof(struct cpu_regs, x9)); + DEFINE(OFFSET_X10, offsetof(struct cpu_regs, x10)); + DEFINE(OFFSET_X11, offsetof(struct cpu_regs, x11)); + DEFINE(OFFSET_X12, offsetof(struct cpu_regs, x12)); + DEFINE(OFFSET_X13, offsetof(struct cpu_regs, x13)); + DEFINE(OFFSET_X14, offsetof(struct cpu_regs, x14)); + DEFINE(OFFSET_X15, offsetof(struct cpu_regs, x15)); + DEFINE(OFFSET_X16, offsetof(struct cpu_regs, x16)); + DEFINE(OFFSET_X17, offsetof(struct cpu_regs, x17)); + DEFINE(OFFSET_X18, offsetof(struct cpu_regs, x18)); + DEFINE(OFFSET_X19, offsetof(struct cpu_regs, x19)); + DEFINE(OFFSET_X20, offsetof(struct cpu_regs, x20)); + DEFINE(OFFSET_X21, offsetof(struct cpu_regs, x21)); + DEFINE(OFFSET_X22, offsetof(struct cpu_regs, x22)); + DEFINE(OFFSET_X23, offsetof(struct cpu_regs, x23)); + DEFINE(OFFSET_X24, offsetof(struct cpu_regs, x24)); + DEFINE(OFFSET_X25, offsetof(struct cpu_regs, x25)); + DEFINE(OFFSET_X26, offsetof(struct cpu_regs, x26)); + DEFINE(OFFSET_X27, offsetof(struct cpu_regs, x27)); + DEFINE(OFFSET_X28, offsetof(struct cpu_regs, x28)); + DEFINE(OFFSET_FP, offsetof(struct cpu_regs, fp)); + DEFINE(OFFSET_LR, offsetof(struct cpu_regs, lr)); + DEFINE(OFFSET_SP, offsetof(struct cpu_regs, sp)); + DEFINE(OFFSET_PC, offsetof(struct cpu_regs, pc)); + DEFINE(OFFSET_PSTATE, offsetof(struct cpu_regs, pstate)); + + return 0; +} + diff --git a/so3/arch/arm64/backtrace.c b/so3/arch/arm64/backtrace.c new file mode 100644 index 000000000..f5c615464 --- /dev/null +++ b/so3/arch/arm64/backtrace.c @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2016,2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +void show_registers(struct cpu_regs *regs); +extern void __backtrace(void); + +void show_backtrace(ulong sp, ulong lr, ulong pc) +{ + // __backtrace(); +} + +void show_backtrace_regs(struct cpu_regs *regs) +{ + show_registers(regs); + //__backtrace(); +} + +void show_registers(struct cpu_regs *regs) +{ + //unsigned long flags = condition_codes(regs); + + printk("CPU: %d\n", smp_processor_id()); + +#if 0 + printk("PC is at %08lx\n", (unsigned long) regs->r15); + printk("LR is at %08lx\n", (unsigned long) regs->r14); + printk("pc : [<%08lx>] lr : [<%08lx>] \n" + "sp : %08lx ip : %08lx fp : %08lx\n", + (unsigned long) regs->r15, + (unsigned long) regs->r14, (unsigned long) regs->r13, + (unsigned long) regs->r12, (unsigned long) regs->r11); + printk("r10: %08lx r9 : %08lx r8 : %08lx\n", + (unsigned long) regs->r10, (unsigned long) regs->r9, + (unsigned long) regs->r8); + printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + (unsigned long) regs->r7, (unsigned long) regs->r6, + (unsigned long) regs->r5, (unsigned long) regs->r4); + printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + (unsigned long) regs->r3, (unsigned long) regs->r2, + (unsigned long) regs->r1, (unsigned long) regs->r0); + printk("Flags: %c%c%c%c", + flags & PSR_N_BIT ? 'N' : 'n', + flags & PSR_Z_BIT ? 'Z' : 'z', + flags & PSR_C_BIT ? 'C' : 'c', + flags & PSR_V_BIT ? 'V' : 'v'); + printk(" IRQs o%s FIQs o%s Mode %s%s\n", + interrupts_enabled(regs) ? "n" : "ff", + fast_interrupts_enabled(regs) ? "n" : "ff", + processor_modes[processor_mode(regs)], + "ARM"); + + { + unsigned int ctrl, transbase, dac; + __asm__ ( + " mrc p15, 0, %0, c1, c0\n" + " mrc p15, 0, %1, c2, c0\n" + " mrc p15, 0, %2, c3, c0\n" + : "=r" (ctrl), "=r" (transbase), "=r" (dac)); + printk("Control: %04X Table: %08X DAC: %08X\n", + ctrl, transbase, dac); + } +#endif +} + +void dump_stack(void) +{ + __backtrace(); +} + +void dump_execution_state(void) +{ +#if 0 + struct cpu_user_regs regs; + + register unsigned int r0 __asm__("r0"); + register unsigned int r1 __asm__("r1"); + register unsigned int r2 __asm__("r2"); + register unsigned int r3 __asm__("r3"); + register unsigned int r4 __asm__("r4"); + register unsigned int r5 __asm__("r5"); + register unsigned int r6 __asm__("r6"); + register unsigned int r7 __asm__("r7"); + register unsigned int r8 __asm__("r8"); + register unsigned int r9 __asm__("r9"); + register unsigned int r10 __asm__("r10"); + register unsigned int r11 __asm__("r11"); + register unsigned int r12 __asm__("r12"); + register unsigned int r13 __asm__("r13"); + register unsigned int r14 __asm__("r14"); + register unsigned int r15; + + asm("mov %0, pc":"=r"(r15)); + + regs.r0 = r0; + regs.r1 = r1; + regs.r2 = r2; + regs.r3 = r3; + regs.r4 = r4; + regs.r5 = r5; + regs.r6 = r6; + regs.r7 = r7; + regs.r8 = r8; + regs.r9 = r9; + regs.r10 = r10; + regs.r11 = r11; + regs.r12 = r12; + regs.r13 = r13; + regs.r14 = r14; + regs.r15 = r15; + + __asm__ __volatile__("mrs %0, cpsr " : "=r" (regs.psr) : : "memory", "cc"); + + show_registers(®s); +#endif +} + + +void dump_all_execution_state(void) +{ + ulong sp; + ulong lr; + + dump_execution_state(); + sp = (ulong)__builtin_frame_address(0); + lr = (ulong)__builtin_return_address(0); + + show_backtrace(sp, lr, lr); +} + +void vcpu_show_execution_state(struct domain *d) +{ + printk("*** Dumping Dom%d state: ***\n", d->domain_id); + + if (d == current) + { + dump_execution_state(); + return; + } + + vcpu_pause(d); /* acceptably dangerous */ + + dump_execution_state(); + + vcpu_unpause(d); +} + diff --git a/so3/arch/arm64/backtrace_asm.S b/so3/arch/arm64/backtrace_asm.S new file mode 100644 index 000000000..fc64d643f --- /dev/null +++ b/so3/arch/arm64/backtrace_asm.S @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014-2021 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include + +#include + + +ENTRY(__backtrace) + + +// To be completed + diff --git a/so3/arch/arm64/boot/Makefile b/so3/arch/arm64/boot/Makefile new file mode 100644 index 000000000..be260c871 --- /dev/null +++ b/so3/arch/arm64/boot/Makefile @@ -0,0 +1 @@ +obj-y += head.o hyp-stub.o diff --git a/so3/arch/arm64/boot/head.S b/so3/arch/arm64/boot/head.S new file mode 100644 index 000000000..68f08e4bc --- /dev/null +++ b/so3/arch/arm64/boot/head.S @@ -0,0 +1,424 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include + +#include + +#include +#include +#include +#include + +.global __start +.global __fdt_addr + +.global cpu1_stack +.global cpu2_stack +.global cpu3_stack + +.extern clear_bss +.extern mmu_configure + +.extern __stack_top + +.section ".head.text","ax" + + +__pre_head: + b __start // branch to the main entry point + .long 0 // reserved + + .quad 0 // Image load offset from start of RAM, little-endian + .quad __end - __start // Effective size of kernel image, little-endian + .quad __HEAD_FLAGS // Informative flags, little-endian + .quad 0 // reserved + .quad 0 // reserved + .quad __end // reserved + .ascii __HEAD_MAGIC // Magic number + .long 0 // reserved + +__start: + + // Preserve the fdt addr (device tree) which is stored in x0 by U-boot + mov x9, x0 + + // Mostly for future usage... + bl el2_setup // Drop to EL1, w0=cpu_boot_mode + + // Initialize stack pointers for current mode (normal case if no MMU is used) + adrp x0, cpu0_stack + mov sp, x0 + + /* Clear the BSS */ + + adrp x0, __bss_start + adrp x1, __bss_end +1: + strb wzr, [x0], #1 + + cmp x0, x1 + b.cc 1b + + mov x0, #3 << 20 + msr cpacr_el1, x0 // Enable FP/ASIMD + + // Up to here, a stack should be initialized + + // Set up the MMU + b mmu_setup + +__kernel_main: + + // C main entry point + b kernel_start + + // never returns... + +.align 2 + +mmu_setup: + + // Use a temporary stack + adrp x0, cpu0_stack + mov sp, x0 + + mov x0, x9 // fdt addr + bl mmu_configure + + // Readjust the stack + ldr x0, =cpu0_stack + mov sp, x0 + + // Keep executing in the kernel space + + // Store the virtual address which will be used to continue + // the execution after the MMU enabled. + ldr x0, .LCvirt_entry + blr x0 + +/* + * If we're fortunate enough to boot at EL2, ensure that the world is + * sane before dropping to EL1. + * + * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if + * booted in EL1 or EL2 respectively. + */ +el2_setup: + msr SPsel, #1 // We want to use SP_EL{1,2} + mrs x0, CurrentEL + cmp x0, #CurrentEL_EL2 + b.eq 1f + ldr x0, =(SCTLR_EL1_RES1 | ENDIAN_SET_EL1) + + msr sctlr_el1, x0 + mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 + isb + ret + +1: ldr x0, =(SCTLR_EL2_RES1 | ENDIAN_SET_EL2) + msr sctlr_el2, x0 + + /* + * Check for VHE being present. For the rest of the EL2 setup, + * x2 being non-zero indicates that we do have VHE, and that the + * kernel is intended to run at EL2. + */ + mrs x2, id_aa64mmfr1_el1 + ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4 + + /* Hyp configuration. */ + ldr x0, =HCR_HOST_NVHE_FLAGS + cbz x2, set_hcr + ldr x0, =HCR_HOST_VHE_FLAGS + +set_hcr: + msr hcr_el2, x0 + isb + + /* + * Allow Non-secure EL1 and EL0 to access physical timer and counter. + * This is not necessary for VHE, since the host kernel runs in EL2, + * and EL0 accesses are configured in the later stage of boot process. + * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout + * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined + * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 + * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in + * EL2. + */ + cbnz x2, 1f + mrs x0, cnthctl_el2 + orr x0, x0, #3 // Enable EL1 physical timers + msr cnthctl_el2, x0 +1: + msr cntvoff_el2, xzr // Clear virtual offset + + /* GICv3 system register access */ + mrs x0, id_aa64pfr0_el1 + ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 + cbz x0, 3f + + mrs_s x0, SYS_ICC_SRE_EL2 + orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 + orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 + msr_s SYS_ICC_SRE_EL2, x0 + isb // Make sure SRE is now set + mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, + tbz x0, #0, 3f // and check that it sticks + msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults + +3: + + /* Populate ID registers. */ + mrs x0, midr_el1 + mrs x1, mpidr_el1 + msr vpidr_el2, x0 + msr vmpidr_el2, x1 + + msr hstr_el2, xzr // Disable CP15 traps to EL2 + + /* EL2 debug */ + mrs x1, id_aa64dfr0_el1 + sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 + cmp x0, #1 + b.lt 4f // Skip if no PMU present + mrs x0, pmcr_el0 // Disable debug access traps + ubfx x0, x0, #11, #5 // to EL2 and allow access to +4: + csel x3, xzr, x0, lt // all PMU counters from EL1 + + /* Statistical profiling */ + ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 + cbz x0, 7f // Skip if SPE not present + cbnz x2, 6f // VHE? + mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, + and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) + cbnz x4, 5f // then permit sampling of physical + mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ + 1 << SYS_PMSCR_EL2_PA_SHIFT) + msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter +5: + mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) + orr x3, x3, x1 // If we don't have VHE, then + b 7f // use EL1&0 translation. +6: // For VHE, use EL2 translation + orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 +7: + msr mdcr_el2, x3 // Configure debug traps + + /* LORegions */ + mrs x1, id_aa64mmfr1_el1 + ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 + cbz x0, 1f + msr_s SYS_LORC_EL1, xzr +1: + + /* Stage-2 translation */ + msr vttbr_el2, xzr + + cbz x2, install_el2_stub + + mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 + isb + ret + +install_el2_stub: + /* + * When VHE is not in use, early init of EL2 and EL1 needs to be + * done here. + * When VHE _is_ in use, EL1 will not be used in the host and + * requires no configuration, and all non-hyp-specific EL2 setup + * will be done via the _EL1 system register aliases in __cpu_setup. + */ + ldr x0, =(SCTLR_EL1_RES1 | ENDIAN_SET_EL1) + msr sctlr_el1, x0 + + /* Coprocessor traps. */ + mov x0, #0x33ff + msr cptr_el2, x0 // Disable copro. traps to EL2 + + /* SVE register access */ + mrs x1, id_aa64pfr0_el1 + ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 + cbz x1, 7f + + bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps + msr cptr_el2, x0 // Disable copro. traps to EL2 + isb + mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector + msr_s SYS_ZCR_EL2, x1 // length for EL1. + + /* Hypervisor stub */ +7: adrp x0, __hyp_stub_vectors + msr vbar_el2, x0 + + /* spsr */ + mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL1h) + msr spsr_el2, x0 + msr elr_el2, lr + mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 + eret + + +#if 0 + +_start: + + + @ r1 = machine id + @ r2 = dtb address + + bl __hyp_stub_install + + @ Make sure we start in SVC mode + + safe_svcmode_maskall r9 + + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | PSR_MODE_SVC @ ensure svc mode and irqs disabled + + @ Preserve the (physical address of) device tree base in r9 + mov r9, r2 + + @ Initialize stack pointers for current mode (normal case if no MMU is used) + ldr sp, =cpu0_stack + + @ Up to here, a stack should be initialized + + @ Set up the MMU + b mmu_setup + +__kernel_main: + + ldr r0, =__fdt_addr + str r9, [r0] + + @ C main entry point + b kernel_start + + @ never returns... + + .align 2 + +mmu_setup: + + @ Use a temporary stack + adrl sp, cpu0_stack + + @ Refer to the 1st-level page table + ldr r0, =CONFIG_RAM_BASE + TTB_L1_SYS_OFFSET + + mov r1, r9 @ fdt addr + + bl mmu_configure + + @ Readjust the stack + ldr sp, =cpu0_stack + + @ Keep executing in the kernel space + + @ Store the virtual address which will be used to continue + @ the execution after the MMU enabled. + ldr pc, .LCvirt_entry + + +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + + safe_svcmode_maskall r9 + + /* + * Use the page tables supplied from __cpu_up. + */ + adr r4, __secondary_data + ldmia r4, {r5, r7} @ retrieve secondary_data field values (pgdir, stack) / r5 & r7 are virtual addresses. + sub lr, r4, r5 @ r4 is a physical address since the adr pseudo is based on relative pc (mmu is off at this point). + + ldr r0, [lr, r7] @ get secondary_data.pgdir (r7 is virtual, r0 is phys). + add r7, r7, #4 + ldr sp, [lr, r7] @ get the stack pointer (virt. address) + add sp, sp, lr @ convert to phys address. + + @ Up to here, a stack should be initialized + + @ Set up the MMU - The second argument (fdt addr) is not used in this context. + bl mmu_configure + + @ Readjust the stack (r7 has still the virt. address of the stack) + ldr sp, [r7] + + @ Keep executing in the kernel space + + @ Store the virtual address which will be used to continue + @ the execution after the MMU enabled. + ldr pc, .LC_virt_secondary_entry + + +#endif /* 0 */ + + + .align + + .type __secondary_data, %object +__secondary_data: + .quad . + .quad secondary_data + + +.ltorg + +__fdt_addr: + .quad 0 + +.LCvirt_entry: + .quad __kernel_main + +.LC_virt_secondary_entry: + .quad secondary_start_kernel + +.align 8 + +// Before MMU is enabled, we cannot refer to the normal stack as declared in the linker script +cpu0_stack_bottom: + .fill STACK_SIZE, 1, 0 +cpu0_stack: + +.skip 8 +.align 13 +cpu1_stack_bottom: + .fill STACK_SIZE, 1, 0 +cpu1_stack: + +.skip 8 +.align 13 +cpu2_stack_bottom: + .fill STACK_SIZE, 1, 0 +cpu2_stack: + +.skip 8 +.align 13 +cpu3_stack_bottom: + .fill STACK_SIZE, 1, 0 +cpu3_stack: + diff --git a/so3/arch/arm64/boot/hyp-stub.S b/so3/arch/arm64/boot/hyp-stub.S new file mode 100644 index 000000000..d91778194 --- /dev/null +++ b/so3/arch/arm64/boot/hyp-stub.S @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + + +#include + +#include + +ENTRY(__hyp_stub_vectors) +#if 0 + ventry el2_sync_invalid // Synchronous EL2t + ventry el2_irq_invalid // IRQ EL2t + ventry el2_fiq_invalid // FIQ EL2t + ventry el2_error_invalid // Error EL2t + + ventry el2_sync_invalid // Synchronous EL2h + ventry el2_irq_invalid // IRQ EL2h + ventry el2_fiq_invalid // FIQ EL2h + ventry el2_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq_invalid // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync_invalid // Synchronous 32-bit EL1 + ventry el1_irq_invalid // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +#endif + +ENDPROC(__hyp_stub_vectors) + + .align 11 + +el1_sync: + cmp x0, #HVC_SET_VECTORS + b.ne 2f + msr vbar_el2, x1 + b 9f + +2: cmp x0, #HVC_SOFT_RESTART + b.ne 3f + mov x0, x2 + mov x2, x4 + mov x4, x1 + mov x1, x3 + br x4 // no return + +3: cmp x0, #HVC_RESET_VECTORS + beq 9f // Nothing to reset! + + /* Someone called kvm_call_hyp() against the hyp-stub... */ + ldr x0, =HVC_STUB_ERR + eret + +9: mov x0, xzr + eret +ENDPROC(el1_sync) + +.macro invalid_vector label +\label: + b \label +ENDPROC(\label) +.endm + + invalid_vector el2_sync_invalid + invalid_vector el2_irq_invalid + invalid_vector el2_fiq_invalid + invalid_vector el2_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + +/* + * __hyp_set_vectors: Call this after boot to set the initial hypervisor + * vectors as part of hypervisor installation. On an SMP system, this should + * be called on each CPU. + * + * x0 must be the physical address of the new vector table, and must be + * 2KB aligned. + * + * Before calling this, you must check that the stub hypervisor is installed + * everywhere, by waiting for any secondary CPUs to be brought up and then + * checking that is_hyp_mode_available() is true. + * + * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or + * something else went wrong... in such cases, trying to install a new + * hypervisor is unlikely to work as desired. + * + * When you call into your shiny new hypervisor, sp_el2 will contain junk, + * so you will need to set that to something sensible at the new hypervisor's + * initialisation entry point. + */ + +ENTRY(__hyp_set_vectors) + mov x1, x0 + mov x0, #HVC_SET_VECTORS + hvc #0 + ret +ENDPROC(__hyp_set_vectors) + +ENTRY(__hyp_reset_vectors) + mov x0, #HVC_RESET_VECTORS + hvc #0 + ret +ENDPROC(__hyp_reset_vectors) + diff --git a/so3/arch/arm64/cache.S b/so3/arch/arm64/cache.S new file mode 100644 index 000000000..387549f15 --- /dev/null +++ b/so3/arch/arm64/cache.S @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2013 + * David Feng + * + * This file is based on sample code from ARMv8 ARM. + */ + +#include + +#include + + +ENTRY(__flush_tlb_all) + + dsb nshst + tlbi vmalle1 + dsb nsh + isb + + +/* + * Invalidate the TLB entry corresponding to a VA (stored in x0) + */ +ENTRY(__asm_invalidate_tlb) + + dsb ish // Barrier instructions + tlbi vaae1is, x0 // Invalidate VA specified by X0, in EL0/1 + // virtual address space for all ASIDs + dsb ish // Barrier instructions - not covered in this guide + isb // Synchronize context on this processor + + ret + + +/* + * void __asm_invalidate_tlb_all(void) + * + * invalidate all tlb entries. +*/ + +ENTRY(__asm_invalidate_tlb_all) + + tlbi vmalle1 + dsb sy + isb + + ret + + +/* + * void __asm_dcache_level(level) + * + * flush or invalidate one level cache. + * + * x0: cache level + * x1: 0 clean & invalidate, 1 invalidate only + * x2~x9: clobbered + */ + +ENTRY(__asm_dcache_level) + lsl x12, x0, #1 + msr csselr_el1, x12 /* select cache level */ + isb /* sync change of cssidr_el1 */ + mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ + and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ + add x2, x2, #4 /* x2 <- log2(cache line size) */ + mov x3, #0x3ff + and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ + clz w5, w3 /* bit position of #ways */ + mov x4, #0x7fff + and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ + /* x12 <- cache level << 1 */ + /* x2 <- line length offset */ + /* x3 <- number of cache ways - 1 */ + /* x4 <- number of cache sets - 1 */ + /* x5 <- bit position of #ways */ + +loop_set: + mov x6, x3 /* x6 <- working copy of #ways */ +loop_way: + lsl x7, x6, x5 + orr x9, x12, x7 /* map way and level to cisw value */ + lsl x7, x4, x2 + orr x9, x9, x7 /* map set number to cisw value */ + tbz w1, #0, 1f + dc isw, x9 + b 2f +1: dc cisw, x9 /* clean & invalidate by set/way */ +2: subs x6, x6, #1 /* decrement the way */ + b.ge loop_way + subs x4, x4, #1 /* decrement the set */ + b.ge loop_set + + ret + +/* + * void __asm_flush_dcache_all(int invalidate_only) + * + * x0: 0 clean & invalidate, 1 invalidate only + * + * flush or invalidate all data cache by SET/WAY. + */ + +ENTRY(__asm_dcache_all) + mov x1, x0 + dsb sy + mrs x10, clidr_el1 /* read clidr_el1 */ + lsr x11, x10, #24 + and x11, x11, #0x7 /* x11 <- loc */ + cbz x11, finished /* if loc is 0, exit */ + mov x15, lr + mov x0, #0 /* start flush at cache level 0 */ + /* x0 <- cache level */ + /* x10 <- clidr_el1 */ + /* x11 <- loc */ + /* x15 <- return address */ + +loop_level: + lsl x12, x0, #1 + add x12, x12, x0 /* x0 <- tripled cache level */ + lsr x12, x10, x12 + and x12, x12, #7 /* x12 <- cache type */ + cmp x12, #2 + b.lt skip /* skip if no cache or icache */ + bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */ +skip: + add x0, x0, #1 /* increment cache level */ + cmp x11, x0 + b.gt loop_level + + mov x0, #0 + msr csselr_el1, x0 /* restore csselr_el1 */ + dsb sy + isb + mov lr, x15 + +finished: + ret + +ENTRY(__asm_flush_dcache_all) + mov x0, #0 + b __asm_dcache_all + +ENTRY(__asm_invalidate_dcache_all) + mov x0, #0x1 + b __asm_dcache_all + +/* + * void __asm_flush_dcache_range(start, end) + * + * clean & invalidate data cache in the range + * + * x0: start address + * x1: end address + */ + +ENTRY(__asm_flush_dcache_range) + mrs x3, ctr_el0 + lsr x3, x3, #16 + and x3, x3, #0xf + mov x2, #4 + lsl x2, x2, x3 /* cache line size */ + + /* x2 <- minimal cache line size in cache system */ + sub x3, x2, #1 + bic x0, x0, x3 +1: dc civac, x0 /* clean & invalidate data or unified cache */ + add x0, x0, x2 + cmp x0, x1 + b.lo 1b + dsb sy + ret + +/* + * void __asm_invalidate_dcache_range(start, end) + * + * invalidate data cache in the range + * + * x0: start address + * x1: end address + */ + +ENTRY(__asm_invalidate_dcache_range) + mrs x3, ctr_el0 + ubfm x3, x3, #16, #19 + mov x2, #4 + lsl x2, x2, x3 /* cache line size */ + + /* x2 <- minimal cache line size in cache system */ + sub x3, x2, #1 + bic x0, x0, x3 +1: dc ivac, x0 /* invalidate data or unified cache */ + add x0, x0, x2 + cmp x0, x1 + b.lo 1b + dsb sy + ret + +/* + * void __asm_invalidate_icache_all(void) + * + * invalidate all tlb entries. + */ + +ENTRY(__asm_invalidate_icache_all) + ic ialluis + isb sy + ret + +/* + * void __asm_switch_ttbr(ulong new_ttbr) + * + * Safely switches to a new page table. + */ + +ENTRY(__asm_switch_ttbr) + + mrs x2, sctlr_el1 + + + /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ + movn x1, #(CR_M | CR_C | CR_I) + and x1, x2, x1 + + msr sctlr_el1, x1 + isb + + /* This call only clobbers x30 (lr) and x9 (unused) */ + mov x3, x30 + bl __asm_invalidate_tlb_all + + /* From here on we're running safely with caches disabled */ + + /* Set TTBR to our first argument */ + msr ttbr0_el1, x0 + isb + + /* Restore original SCTLR and thus enable caches again */ + msr sctlr_el1, x2 + isb + + ret x3 + diff --git a/so3/arch/arm64/cache_v8.c b/so3/arch/arm64/cache_v8.c new file mode 100644 index 000000000..75996767a --- /dev/null +++ b/so3/arch/arm64/cache_v8.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * (C) Copyright 2013 + * David Feng + * + * (C) Copyright 2016 + * Alexander Graf + */ + +#include + +#include +#include +#include + +/* + * With 4k page granule, a virtual address is split into 4 lookup parts + * spanning 9 bits each: + * + * _______________________________________________ + * | | | | | | | + * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | + * |_______|_______|_______|_______|_______|_______| + * 63-48 47-39 38-30 29-21 20-12 11-00 + * + * mask page size + * + * Lv0: FF8000000000 -- + * Lv1: 7FC0000000 1G + * Lv2: 3FE00000 2M + * Lv3: 1FF000 4K + * off: FFF + */ + +/* to activate the MMU we need to set up virtual memory */ +void mmu_setup(u64 *pgtable) +{ + u64 attr, tcr; + + /* Output address size is 48-bit (tcr.ips = 5) */ + tcr = TCR_EL1_RSVD | (5UL << 32UL); // | TCR_EPD1_DISABLE; + + /* PTWs cacheable, inner/outer WBWA and inner shareable */ + tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; + tcr |= TCR_T0SZ(48); + + attr = MEMORY_ATTRIBUTES; + + asm volatile("dsb sy"); + + asm volatile("msr ttbr0_el1, %0" : : "r" (pgtable) : "memory"); + asm volatile("msr ttbr1_el1, %0" : : "r" (pgtable) : "memory"); + + asm volatile("msr tcr_el1, %0" : : "r" (tcr) : "memory"); + asm volatile("msr mair_el1, %0" : : "r" (attr) : "memory"); + + asm volatile("isb"); + + invalidate_dcache_all(); + __asm_invalidate_tlb_all(); + + /* enable the mmu */ + set_sctlr(get_sctlr() | CR_M); + +} + +/* + * Performs a invalidation of the entire data cache at all levels + */ +void invalidate_dcache_all(void) +{ + __asm_invalidate_dcache_all(0); +} + +/* + * Performs a clean & invalidation of the entire data cache at all levels. + * This function needs to be inline to avoid using stack. + * __asm_flush_l3_dcache return status of timeout + */ +inline void flush_dcache_all(void) +{ + __asm_flush_dcache_all(0); +} + +/* + * Flush all TLBs on local CPU + */ +inline void flush_tlb_all(void) { + __flush_tlb_all(); +} + +/* + * Invalidates range in all levels of D-cache/unified cache + */ +void invalidate_dcache_range(unsigned long start, unsigned long stop) +{ + __asm_invalidate_dcache_range(start, stop); +} + +/* + * Flush range(clean & invalidate) from all levels of D-cache/unified cache + */ +void flush_dcache_range(unsigned long start, unsigned long stop) +{ + __asm_flush_dcache_range(start, stop); +} + +/* + * Flush an individual PTE entry + */ +void flush_pte_entry(addr_t va, u64 *pte) { + __asm_invalidate_tlb(va); + invalidate_dcache_range((u64) pte, (u64) (pte+1)); + +} + +void dcache_enable(void) +{ + set_sctlr(get_sctlr() | CR_C); +} + +void dcache_disable(void) +{ + uint32_t sctlr; + + sctlr = get_sctlr(); + + /* if cache isn't enabled no need to disable */ + if (!(sctlr & CR_C)) + return; + + set_sctlr(sctlr & ~(CR_C|CR_M)); + + flush_dcache_all(); + __asm_invalidate_tlb_all(); +} + +int dcache_status(void) +{ + return (get_sctlr() & CR_C) != 0; +} + +void icache_enable(void) +{ + set_sctlr(get_sctlr() | CR_I); +} + +void icache_disable(void) +{ + set_sctlr(get_sctlr() & ~CR_I); +} + +int icache_status(void) +{ + return (get_sctlr() & CR_I) != 0; +} + +void invalidate_icache_all(void) +{ + __asm_invalidate_icache_all(); +} + +void mmu_page_table_flush(unsigned long start, unsigned long stop) { + flush_dcache_range(start, stop); + flush_tlb_all(); + __asm_invalidate_tlb_all(); +} + diff --git a/so3/arch/arm64/context.S b/so3/arch/arm64/context.S new file mode 100644 index 000000000..a447bb2bf --- /dev/null +++ b/so3/arch/arm64/context.S @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +// Manage various context-related code (context switch) + + +#include + +#include +#include + + +// Switch the MMU to a L0 page table +// x0 contains the TTBR related to this CPU for the L0 page table + +ENTRY(__mmu_switch) + + dsb sy /* Ensure the flushes happen before continuing */ + isb /* Ensure synchronization with previous changes to text */ + + // At the hypervisor level, we take care about TTBR1 page table only. + msr TTBR1_EL1, x0 + + isb + + ret + +ENTRY(cpu_do_idle) + + dsb sy // WFI may enter a low-power mode + wfi + + ret diff --git a/so3/arch/arm64/domain.c b/so3/arch/arm64/domain.c new file mode 100644 index 000000000..99c59bef3 --- /dev/null +++ b/so3/arch/arm64/domain.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2014-2021 Daniel Rossier + * Copyright (C) 2016-2019 Baptiste Delporte + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include + +#include +#include + +void arch_setup_domain_frame(struct domain *d, struct cpu_regs *domain_frame, addr_t fdt_addr, addr_t start_info, addr_t start_stack, addr_t start_pc) { + + domain_frame->x21 = fdt_addr; + domain_frame->x22 = start_info; + + domain_frame->sp = start_stack; + domain_frame->pc = start_pc; + + d->cpu_regs.sp = (unsigned long) domain_frame; + d->cpu_regs.lr = (unsigned long) pre_ret_to_user; +} + +/* + * Setup of domain consists in setting up the 1st-level and 2nd-level page tables within the domain. + */ +void __setup_dom_pgtable(struct domain *d, addr_t v_start, unsigned long map_size, addr_t p_start) { + u64 *new_pt; + + ASSERT(d); + + /* Make sure that the size is 2 MB block aligned */ + map_size = ALIGN_UP(map_size, SZ_2M); + + printk("*** Setup page tables of the domain: ***\n"); + printk(" v_start : 0x%lx\n", v_start); + printk(" map size (bytes) : 0x%lx\n", map_size); + printk(" phys address : 0x%lx\n", p_start); + + /* Initial L0 page table for the domain */ + new_pt = new_sys_pgtable(); + + d->addrspace.pgtable_vaddr = (addr_t) new_pt; + d->addrspace.pgtable_paddr = __pa(new_pt); + d->addrspace.ttbr1[d->processor] = __pa(new_pt); + + /* Copy the hypervisor area */ + *l0pte_offset(new_pt, CONFIG_HYPERVISOR_VIRT_ADDR) = *l0pte_offset(__sys_l0pgtable, CONFIG_HYPERVISOR_VIRT_ADDR); + + /* Do the mapping of new domain at its virtual address location */ + create_mapping(new_pt, v_start, p_start, map_size, false); +} + +void arch_domain_create(struct domain *d, int cpu_id) { + + if (is_idle_domain(d)) { + d->addrspace.pgtable_paddr = __pa(__sys_l0pgtable); + d->addrspace.pgtable_vaddr = (addr_t) __sys_l0pgtable; + + d->addrspace.ttbr1[cpu_id] = __pa(__sys_l0pgtable); + } +} + diff --git a/so3/arch/arm64/exception.S b/so3/arch/arm64/exception.S new file mode 100644 index 000000000..d43b041f8 --- /dev/null +++ b/so3/arch/arm64/exception.S @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2021 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#include + +#include + +#include + +.globl pseudo_usr_mode +.globl hypervisor_stack + +/* + * This function is called at bootstrap and + * reboot time. It initializes some registers + */ +ENTRY(pre_ret_to_user) + + // Initial state - IRQs off + disable_irq + + // Get a reference to our domain descriptor + curdom x10, x11 + str xzr, [x10, #OFFSET_HYPERVISOR_CALLBACK] + + current_cpu x11 + + // Switch to the guest stack + ldr x0, .LChypervisor_stack + mov x12, sp + str x12, [x0, x11, lsl #3] + + // Setting pseudo_usr_mode + ldr x0, .LCpseudo_usr_mode + mov x1, #1 + str x1, [x0, x11, lsl #3] + + ldr x2, [sp, #S_PC] // Entry point of the guest + ldr x21, [sp, #S_X21] // Device tree (fdt_addr) + ldr x22, [sp, #S_X22] // Address of start_info + + // Ready to jump into the Linux domain... + blr x2 + +/* + * The following function is used to restore the migrated domain. + * Indeed, the receiver environment has not saved anything on its stack regarding + * a context switch. We can not pursue on right-after-context-switch in the schedule function! + * But we do not start from boot either. So, we have an appropriate restore glue code to perform + * an upcall in the newly migrated ME. A first timer IRQ has been set in domain_migration_restore() to + * avoid a problem in the guest when testing for upcall pending. + * + * + */ +ENTRY(after_migrate_to_user) +#if 0 + @ should be enough + + @ We need to set up a correct vector offset in S_CONTEXT + + current_cpu r11 + + curdom r10 + + ldr r0, .LChypervisor_stack @ running SVC hypervisor stack + str sp, [r0, r11, lsl #2] + + @ get guest stack (already stacked from save_svc_context) + ldr sp, [r10, #OFFSET_G_SP] + + mov r9, #0x18 @ IRQ -> will drive to evtchn_do_upcall() in the guest + str r9, [sp, #S_CONTEXT] + + ldr sp, [r0, r11, lsl #2] + + b do_upcall +#endif + +ENTRY(ret_to_user) +#if 0 + disable_irq @ ensure IRQs are disabled + + bl do_softirq + + vcpu r10 + ldr r11, [r10, #OFFSET_SHARED_INFO] + + @ If the softirq handling leads to trigger an interrupt in the guest, + @ it will be processed by do_evtchn_do_upcall. The way how to + @ process an interrupt with potentially IRQs off is under the + @ responsibility of the guest + + @ are some IRQs pending? + ldrb r12, [r11, #OFFSET_EVTCHN_UPCALL_PENDING] + tst r12, #0xff + + beq restore + + b do_upcall +#endif + +/* + * Send event to guest domain + */ +ENTRY(do_upcall) +#if 0 + disable_irq + + current_cpu r11 + + curdom r10 + + ldr lr, [r10, #OFFSET_HYPERVISOR_CALLBACK] + cmp lr, #0 + beq restore + + ldr r0, .LChypervisor_stack @ running SVC hypervisor stack + str sp, [r0, r11, lsl #2] + + @ get guest stack (already stacked from save_svc_context) + ldr sp, [r10, #OFFSET_G_SP] + + @ setting pseudo_usr_mode / r0, r1 re-assigned right after + ldr r0, .LCpseudo_usr_mode + mov r1, #1 + str r1, [r0, r11, lsl #2] + + @ r0 contains a reference to the stack pointer + mov r0, sp + + ldr r1, [sp, #S_R1] + + mov pc, lr +#endif + +ENTRY(restore) +#if 0 + current_cpu r11 + + @ setting pseudo_usr_mode / r0, r1 re-assigned right after + ldr r0, .LCpseudo_usr_mode + mov r1, #1 + str r1, [r0, r11, lsl #2] + + + @ restore saved registers + + ldr r0, .LChypervisor_stack @ running SVC hypervisor stack + str sp, [r0, r11, lsl #2] + + curdom r10 + + @ get guest stack (already stacked from save_svc_context) + ldr sp, [r10, #OFFSET_G_SP] + + ldr r0, [sp, #S_PSR] @ Check if return is in guest SVC or guest USR + msr spsr_cxsf, r0 + + and r0, r0, #PSR_MODE_MASK + cmp r0, #PSR_MODE_USR @ usr ? + bne restore_svc + + ldr lr, [sp, #S_PC]! @ Get PC + + ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr + mov r0, r0 + add sp, sp, #S_FRAME_SIZE - S_PC + + movs pc, lr @ return & move spsr_svc into cpsr + +restore_svc: + + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#endif + +/* + * Register switch + * r0 = previous vcpu, r1 = previous vcpu_guest_context, r2 = next vcpu_guest_context + * previous and next are guaranteed not to be the same. + * + */ +ENTRY(__switch_to) + + mov x10, #(OFFSET_CPU_REGS + OFFSET_X19) + add x8, x0, x10 + mov x9, sp + +save_ctx: + + stp x19, x20, [x8], #16 // store callee-saved registers + stp x21, x22, [x8], #16 + stp x23, x24, [x8], #16 + stp x25, x26, [x8], #16 + stp x27, x28, [x8], #16 + stp x29, lr, [x8], #16 + str x9, [x8] + + // Prepare to retrieve the regs from the stack + add x8, x1, x10 + +load_ctx: + + ldp x19, x20, [x8], #16 // restore callee-saved registers + ldp x21, x22, [x8], #16 + ldp x23, x24, [x8], #16 + ldp x25, x26, [x8], #16 + ldp x27, x28, [x8], #16 + ldp x29, lr, [x8], #16 + ldr x9, [x8] + mov sp, x9 + + ret + +pseudo_usr_mode: + .space NR_CPUS * 8 + +// Hypervisor stack is used for the *current* (running) vcpu svc stack address +hypervisor_stack: + .space NR_CPUS * 8 + + +.LCpseudo_usr_mode: + .quad pseudo_usr_mode + +.LChypervisor_stack: + .quad hypervisor_stack + diff --git a/so3/arch/arm64/fault.c b/so3/arch/arm64/fault.c new file mode 100644 index 000000000..ad9bb7ae7 --- /dev/null +++ b/so3/arch/arm64/fault.c @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include + +#include + +void __stack_alignment_fault(void) { + lprintk("### wrong stack alignment (8-bytes not respected) !! ###"); + kernel_panic(); +} + +#if 0 + +void __prefetch_abort(uint32_t ifar, uint32_t ifsr, uint32_t lr) { + lprintk("### prefetch abort exception ifar: %x ifsr: %x lr(r14)-8: %x cr: %x ###\n", ifar, ifsr, lr-8, get_cr()); + + kernel_panic(); +} + +void __data_abort(uint32_t far, uint32_t fsr, uint32_t lr) { + lprintk("### abort exception far: %x fsr: %x lr(r14)-8: %x cr: %x ###\n", far, fsr, lr-8, get_cr()); + + kernel_panic(); +} + +void __undefined_instruction(uint32_t lr) { + lprintk("### undefined instruction lr(r14)-8: %x ###\n", lr-8); + + kernel_panic(); +} +#endif + +void __div0(void) { + lprintk("### division by 0\n"); + kernel_panic(); +} + +void kernel_panic(void) +{ + if (user_mode()) + printk("%s: entering infinite loop...\n", __func__); + else { + lprintk("%s: entering infinite loop... CPU: %d\n", __func__, smp_processor_id()); + + } + /* Stop all activities. */ + local_irq_disable(); + + while (1); +} + +void _bug(char *file, int line) +{ + lprintk("BUG in %s at line: %d\n", file, line); + + kernel_panic(); +} + diff --git a/so3/arch/arm64/include/asm/arm_timer.h b/so3/arch/arm64/include/asm/arm_timer.h new file mode 100644 index 000000000..bd58ab762 --- /dev/null +++ b/so3/arch/arm64/include/asm/arm_timer.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2014-2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef ASM_ARM_TIMER_H +#define ASM_ARM_TIMER_H + +#include + +/* + * These register accessors are marked inline so the compiler can + * nicely work out which register we want, and chuck away the rest of + * the code. At least it does so with a recent GCC (4.6.3). + */ +static inline void arch_timer_reg_write(enum arch_timer_reg reg, u32 val) +{ + switch (reg) { + case ARCH_TIMER_REG_CTRL: + //asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); + break; + + case ARCH_TIMER_REG_TVAL: + //asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); + break; + } + + isb(); +} + +static inline u32 arch_timer_reg_read(enum arch_timer_reg reg) +{ + u32 val = 0; + + switch (reg) { + case ARCH_TIMER_REG_CTRL: + //asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); + break; + + case ARCH_TIMER_REG_TVAL: + //asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); + break; + } + + + return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ + u32 val = 0; + + //asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); + + return val; +} + +static inline u64 arch_counter_get_cntvct(void) +{ + u64 cval = 0; + + isb(); + //asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); + + return cval; +} + +static inline u32 arch_timer_get_cntkctl(void) +{ + u32 cntkctl; + + //asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl)); + + return cntkctl; +} + +static inline void arch_timer_set_cntkctl(u32 cntkctl) +{ + //asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); +} + +#endif /* ASM_ARM_TIMER_H */ diff --git a/so3/arch/arm64/include/asm/atomic.h b/so3/arch/arm64/include/asm/atomic.h new file mode 100644 index 000000000..2c2814120 --- /dev/null +++ b/so3/arch/arm64/include/asm/atomic.h @@ -0,0 +1,324 @@ +/* + * linux/include/asm-arm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_ATOMIC_H +#define ASM_ATOMIC_H + +#include +#include + +#include + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#define _atomic_read(v) ((v).counter) +#define atomic_read(v) ((v)->counter) + +/* + * AArch64 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i)); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add_return\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "memory"); + + smp_mb(); + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i)); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub_return\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "memory"); + + smp_mb(); + return result; +} + +static inline void atomic_and(int m, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_and\n" +"1: ldxr %w0, %2\n" +" and %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (m)); +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long tmp; + int oldval; + + smp_mb(); + + asm volatile("// atomic_cmpxchg\n" +"1: ldxr %w1, %2\n" +" cmp %w1, %w3\n" +" b.ne 2f\n" +" stxr %w0, %w4, %2\n" +" cbnz %w0, 1b\n" +"2:" + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc"); + + smp_mb(); + return oldval; +} + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c; +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +/****************************/ + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, tmp; + + switch (size) { + case 1: + asm volatile("// __xchg1\n" + "1: ldxrb %w0, %2\n" + " stlxrb %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) + : "r" (x) + : "memory"); + break; + case 2: + asm volatile("// __xchg2\n" + "1: ldxrh %w0, %2\n" + " stlxrh %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) + : "r" (x) + : "memory"); + break; + case 4: + asm volatile("// __xchg4\n" + "1: ldxr %w0, %2\n" + " stlxr %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) + : "r" (x) + : "memory"); + break; + case 8: + asm volatile("// __xchg8\n" + "1: ldxr %0, %2\n" + " stlxr %w1, %3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) + : "r" (x) + : "memory"); + break; + default: + BUG(); + break; + } + + smp_mb(); + return ret; +} + +#define xchg(ptr,x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) + +extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); + +#define __CMPXCHG_CASE(w, sz, name) \ +static inline bool __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long *old, \ + unsigned long new, \ + bool timeout, \ + unsigned int max_try) \ +{ \ + unsigned long oldval; \ + unsigned long res; \ + \ + do { \ + asm volatile("// __cmpxchg_case_" #name "\n" \ + " ldxr" #sz " %" #w "1, %2\n" \ + " mov %w0, #0\n" \ + " cmp %" #w "1, %" #w "3\n" \ + " b.ne 1f\n" \ + " stxr" #sz " %w0, %" #w "4, %2\n" \ + "1:\n" \ + : "=&r" (res), "=&r" (oldval), \ + "+Q" (*(unsigned long *)ptr) \ + : "Ir" (*old), "r" (new) \ + : "cc"); \ + \ + if (!res) \ + break; \ + } while (!timeout || ((--max_try) > 0)); \ + \ + *old = oldval; \ + \ + return !res; \ +} + +__CMPXCHG_CASE(w, b, 1) +__CMPXCHG_CASE(w, h, 2) +__CMPXCHG_CASE(w, , 4) +__CMPXCHG_CASE( , , 8) + +static inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old, + unsigned long new, int size, + bool timeout, unsigned int max_try) +{ + switch (size) { + case 1: + return __cmpxchg_case_1(ptr, old, new, timeout, max_try); + case 2: + return __cmpxchg_case_2(ptr, old, new, timeout, max_try); + case 4: + return __cmpxchg_case_4(ptr, old, new, timeout, max_try); + case 8: + return __cmpxchg_case_8(ptr, old, new, timeout, max_try); + default: + BUG(); + } + + return false; +} + +static inline unsigned long __cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, + int size) +{ + smp_mb(); + if (!__int_cmpxchg(ptr, &old, new, size, false, 0)) + BUG(); + smp_mb(); + + return old; +} + +/* + * The helper may fail to update the memory if the action takes too long. + * + * @old: On call the value pointed contains the expected old value. It will be + * updated to the actual old value. + * @max_try: Maximum number of iterations + * + * The helper will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +static always_inline bool __cmpxchg_timeout(volatile void *ptr, + unsigned long *old, + unsigned long new, + int size, + unsigned int max_try) +{ + bool ret; + + smp_mb(); + ret = __int_cmpxchg(ptr, old, new, size, true, max_try); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ + __ret; \ +}) + + +#endif /* ASM_ATOMIC_H */ diff --git a/so3/arch/arm64/include/asm/backtrace.h b/so3/arch/arm64/include/asm/backtrace.h new file mode 100644 index 000000000..81cc311f2 --- /dev/null +++ b/so3/arch/arm64/include/asm/backtrace.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2016-2020 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef BACKTRACE_H +#define BACKTRACE_H + +void dump_stack(void); + +void show_registers(struct cpu_regs *regs); + +void dump_execution_state(void); +void dump_all_execution_state(void); + +static inline void show_execution_state(struct cpu_regs *regs) +{ + show_registers(regs); +} + +#endif /* BACKTRACE_H */ + diff --git a/so3/arch/arm64/include/asm/bitops.h b/so3/arch/arm64/include/asm/bitops.h new file mode 100644 index 000000000..f2ee6b8a9 --- /dev/null +++ b/so3/arch/arm64/include/asm/bitops.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 1995, Russell King. + * Various bits and pieces copyrights include: + * Linus Torvalds (test_bit). + * Big endian support: Copyright 2001, Nicolas Pitre + * reworked by rmk. + * + * bit 0 is the LSB of an "unsigned long" quantity. + * + * Please note that the code in this file should never be included + * from user space. Many of these are not implemented in assembler + * since they would be too costly. Also, they require privileged + * instructions (which are not available from user mode) to ensure + * that they are atomic. + */ + +#ifndef ASM_BITOPS_H +#define ASM_BITOPS_H + +#include + +#include + +#define __L2(_x) (((_x) & 0x00000002) ? 1 : 0) +#define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x)) +#define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x)) +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x)) +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x)) + +#ifndef __ASSEMBLY__ + +#define smp_mb__before_clear_bit() mb() +#define smp_mb__after_clear_bit() mb() + +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + + + +/* + * These functions are the basis of our bit ops. + * + * First, the atomic bitops. These use native endian. + */ +static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p |= mask; + local_irq_restore(flags); +} + +static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p &= ~mask; + local_irq_restore(flags); +} + +static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p ^= mask; + local_irq_restore(flags); +} + +static inline int +____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res | mask; + local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res & ~mask; + local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res ^ mask; + local_irq_restore(flags); + + return res & mask; +} + + + +/* + * A note about Endian-ness. + * ------------------------- + * + * When the ARM is put into big endian mode via CR15, the processor + * merely swaps the order of bytes within words, thus: + * + * ------------ physical data bus bits ----------- + * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 + * little byte 3 byte 2 byte 1 byte 0 + * big byte 0 byte 1 byte 2 byte 3 + * + * This means that reading a 32-bit word at address 0 returns the same + * value irrespective of the endian mode bit. + * + * Peripheral devices should be connected with the data bus reversed in + * "Big Endian" mode. ARM Application Note 61 is applicable, and is + * available from http://www.arm.com/. + * + * The following assumes that the data bus connectivity for big endian + * mode has been followed. + * + * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. + */ + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern void _set_bit_le(int nr, volatile unsigned long * p); +extern void _clear_bit_le(int nr, volatile unsigned long * p); +extern void _change_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); + + +/* + * The __* form of bitops are non-atomic and may be reordered. + */ + +#define ATOMIC_BITOP_LE(name,nr,p) (____atomic_##name(nr, p) ) +#define ATOMIC_BITOP_BE(name,nr,p) (____atomic_##name(nr, p) ) + + +#define NONATOMIC_BITOP(name,nr,p) \ + (____nonatomic_##name(nr, p)) + + +/* + * These are the little endian, atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) + +#endif /*__ASSEMBLY__ */ + +#endif /* ASM_BITOPS_H */ diff --git a/so3/arch/arm64/include/asm/byteorder.h b/so3/arch/arm64/include/asm/byteorder.h new file mode 100644 index 000000000..63aa791c7 --- /dev/null +++ b/so3/arch/arm64/include/asm/byteorder.h @@ -0,0 +1,25 @@ +/* + * linux/include/asm-arm/byteorder.h + * + * ARM Endian-ness. In little endian mode, the data bus is connected such + * that byte accesses appear as: + * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31 + * and word accesses (data or instruction) appear as: + * d0...d31 + * + * When in big endian mode, byte accesses appear as: + * 0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7 + * and word accesses (data or instruction) appear as: + * d0...d31 + */ +#ifndef __ASM_ARM_BYTEORDER_H +#define __ASM_ARM_BYTEORDER_H + +#include + +#define __BYTEORDER_HAS_U64__ +#define __SWAB_64_THRU_32__ + +#include + +#endif diff --git a/so3/arch/arm64/include/asm/cacheflush.h b/so3/arch/arm64/include/asm/cacheflush.h new file mode 100644 index 000000000..b33d002a7 --- /dev/null +++ b/so3/arch/arm64/include/asm/cacheflush.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef CACHEFLUSH_H +#define CACHEFLUSH_H + +#include + +void flush_pte_entry(addr_t va, u64 *pte); + +void mmu_page_table_flush(unsigned long start, unsigned long stop); + +void __asm_invalidate_tlb_all(void); +void __asm_invalidate_tlb(addr_t va); +void __asm_dcache_level(int level); +void __asm_invalidate_dcache_range(addr_t start, addr_t end); +void __asm_flush_dcache_range(addr_t start, addr_t end); +void __asm_invalidate_icache_all(void); + +void __asm_flush_dcache_all(int invalidate_only); +void __asm_invalidate_dcache_all(int invalidate_only); +void __flush_tlb_all(void); + +void invalidate_dcache_all(void); +void invalidate_icache_all(void); +inline void flush_dcache_all(void); +void flush_tlb_all(void); + +void cache_enable(uint32_t cache_bit); +void cache_disable(uint32_t cache_bit); +void icache_enable(void); +void icache_disable(void); +int icache_status(void); +void dcache_enable(void); +void dcache_disable(void); +int dcache_status(void); + +#endif /* CACHEFLUSH_H */ diff --git a/so3/arch/arm64/include/asm/cpregs.h b/so3/arch/arm64/include/asm/cpregs.h new file mode 100644 index 000000000..6f358e4a0 --- /dev/null +++ b/so3/arch/arm64/include/asm/cpregs.h @@ -0,0 +1,356 @@ +/* + * Copyright (C) 2016,2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __ASM_ARM_CPREGS_H +#define __ASM_ARM_CPREGS_H + /* + * AArch32 Co-processor registers. + * + */ + +#define __HSR_CPREG_c0 0 +#define __HSR_CPREG_c1 1 +#define __HSR_CPREG_c2 2 +#define __HSR_CPREG_c3 3 +#define __HSR_CPREG_c4 4 +#define __HSR_CPREG_c5 5 +#define __HSR_CPREG_c6 6 +#define __HSR_CPREG_c7 7 +#define __HSR_CPREG_c8 8 +#define __HSR_CPREG_c9 9 +#define __HSR_CPREG_c10 10 +#define __HSR_CPREG_c11 11 +#define __HSR_CPREG_c12 12 +#define __HSR_CPREG_c13 13 +#define __HSR_CPREG_c14 14 +#define __HSR_CPREG_c15 15 + +#define __HSR_CPREG_0 0 +#define __HSR_CPREG_1 1 +#define __HSR_CPREG_2 2 +#define __HSR_CPREG_3 3 +#define __HSR_CPREG_4 4 +#define __HSR_CPREG_5 5 +#define __HSR_CPREG_6 6 +#define __HSR_CPREG_7 7 + +#define _HSR_CPREG32(cp,op1,crn,crm,op2) \ + ((__HSR_CPREG_##crn) << HSR_CP32_CRN_SHIFT) | \ + ((__HSR_CPREG_##crm) << HSR_CP32_CRM_SHIFT) | \ + ((__HSR_CPREG_##op1) << HSR_CP32_OP1_SHIFT) | \ + ((__HSR_CPREG_##op2) << HSR_CP32_OP2_SHIFT) + +#define _HSR_CPREG64(cp,op1,crm) \ + ((__HSR_CPREG_##crm) << HSR_CP64_CRM_SHIFT) | \ + ((__HSR_CPREG_##op1) << HSR_CP64_OP1_SHIFT) + +/* Encode a register as per HSR ISS pattern */ +#define HSR_CPREG32(X) _HSR_CPREG32(X) +#define HSR_CPREG64(X) _HSR_CPREG64(X) + +/* + * Order registers by Coprocessor-> CRn-> Opcode 1-> CRm-> Opcode 2 + * + * This matches the ordering used in the ARM as well as the groupings + * which the CP registers are allocated in. + * + * This is slightly different to the form of the instruction + * arguments, which are cp,opc1,crn,crm,opc2. + */ + +/* Coprocessor 10 */ + +#define FPSID p10,7,c0,c0,0 /* Floating-Point System ID Register */ +#define FPSCR p10,7,c1,c0,0 /* Floating-Point Status and Control Register */ +#define MVFR0 p10,7,c7,c0,0 /* Media and VFP Feature Register 0 */ +#define FPEXC p10,7,c8,c0,0 /* Floating-Point Exception Control Register */ +#define FPINST p10,7,c9,c0,0 /* Floating-Point Instruction Register */ +#define FPINST2 p10,7,c10,c0,0 /* Floating-point Instruction Register 2 */ + +/* Coprocessor 14 */ + +/* CP14 0: Debug Register interface */ +#define DBGDIDR p14,0,c0,c0,0 /* Debug ID Register */ +#define DBGDSCRINT p14,0,c0,c1,0 /* Debug Status and Control Internal */ +#define DBGDSCREXT p14,0,c0,c2,2 /* Debug Status and Control External */ +#define DBGVCR p14,0,c0,c7,0 /* Vector Catch */ +#define DBGBVR0 p14,0,c0,c0,4 /* Breakpoint Value 0 */ +#define DBGBCR0 p14,0,c0,c0,5 /* Breakpoint Control 0 */ +#define DBGWVR0 p14,0,c0,c0,6 /* Watchpoint Value 0 */ +#define DBGWCR0 p14,0,c0,c0,7 /* Watchpoint Control 0 */ +#define DBGBVR1 p14,0,c0,c1,4 /* Breakpoint Value 1 */ +#define DBGBCR1 p14,0,c0,c1,5 /* Breakpoint Control 1 */ +#define DBGOSLAR p14,0,c1,c0,4 /* OS Lock Access */ +#define DBGOSDLR p14,0,c1,c3,4 /* OS Double Lock */ + +/* CP14 CR0: */ +#define TEECR p14,6,c0,c0,0 /* ThumbEE Configuration Register */ + +/* CP14 CR1: */ +#define TEEHBR p14,6,c1,c0,0 /* ThumbEE Handler Base Register */ +#define JOSCR p14,7,c1,c0,0 /* Jazelle OS Control Register */ + +/* CP14 CR2: */ +#define JMCR p14,7,c2,c0,0 /* Jazelle Main Configuration Register */ + + +/* Coprocessor 15 */ + +/* CP15 CR0: CPUID and Cache Type Registers */ +#define MIDR p15,0,c0,c0,0 /* Main ID Register */ +#define MPIDR p15,0,c0,c0,5 /* Multiprocessor Affinity Register */ +#define ID_PFR0 p15,0,c0,c1,0 /* Processor Feature Register 0 */ +#define ID_PFR1 p15,0,c0,c1,1 /* Processor Feature Register 1 */ +#define ID_DFR0 p15,0,c0,c1,2 /* Debug Feature Register 0 */ +#define ID_AFR0 p15,0,c0,c1,3 /* Auxiliary Feature Register 0 */ +#define ID_MMFR0 p15,0,c0,c1,4 /* Memory Model Feature Register 0 */ +#define ID_MMFR1 p15,0,c0,c1,5 /* Memory Model Feature Register 1 */ +#define ID_MMFR2 p15,0,c0,c1,6 /* Memory Model Feature Register 2 */ +#define ID_MMFR3 p15,0,c0,c1,7 /* Memory Model Feature Register 3 */ +#define ID_ISAR0 p15,0,c0,c2,0 /* ISA Feature Register 0 */ +#define ID_ISAR1 p15,0,c0,c2,1 /* ISA Feature Register 1 */ +#define ID_ISAR2 p15,0,c0,c2,2 /* ISA Feature Register 2 */ +#define ID_ISAR3 p15,0,c0,c2,3 /* ISA Feature Register 3 */ +#define ID_ISAR4 p15,0,c0,c2,4 /* ISA Feature Register 4 */ +#define ID_ISAR5 p15,0,c0,c2,5 /* ISA Feature Register 5 */ +#define CCSIDR p15,1,c0,c0,0 /* Cache Size ID Registers */ +#define CLIDR p15,1,c0,c0,1 /* Cache Level ID Register */ +#define CSSELR p15,2,c0,c0,0 /* Cache Size Selection Register */ +#define VPIDR p15,4,c0,c0,0 /* Virtualization Processor ID Register */ +#define VMPIDR p15,4,c0,c0,5 /* Virtualization Multiprocessor ID Register */ + +/* CP15 CR1: System Control Registers */ +#define SCTLR p15,0,c1,c0,0 /* System Control Register */ +#define ACTLR p15,0,c1,c0,1 /* Auxiliary Control Register */ +#define CPACR p15,0,c1,c0,2 /* Coprocessor Access Control Register */ +#define SCR p15,0,c1,c1,0 /* Secure Configuration Register */ +#define NSACR p15,0,c1,c1,2 /* Non-Secure Access Control Register */ +#define HSCTLR p15,4,c1,c0,0 /* Hyp. System Control Register */ +#define HCR p15,4,c1,c1,0 /* Hyp. Configuration Register */ +#define HDCR p15,4,c1,c1,1 /* Hyp. Debug Configuration Register */ +#define HCPTR p15,4,c1,c1,2 /* Hyp. Coprocessor Trap Register */ +#define HSTR p15,4,c1,c1,3 /* Hyp. System Trap Register */ + +/* CP15 CR2: Translation Table Base and Control Registers */ +#define TTBCR p15,0,c2,c0,2 /* Translatation Table Base Control Register */ +#define TTBR0 p15,0,c2 /* Translation Table Base Reg. 0 */ +#define TTBR1 p15,1,c2 /* Translation Table Base Reg. 1 */ +#define HTTBR p15,4,c2 /* Hyp. Translation Table Base Register */ +#define TTBR0_32 p15,0,c2,c0,0 /* 32-bit access to TTBR0 */ +#define TTBR1_32 p15,0,c2,c0,1 /* 32-bit access to TTBR1 */ +#define HTCR p15,4,c2,c0,2 /* Hyp. Translation Control Register */ +#define VTCR p15,4,c2,c1,2 /* Virtualization Translation Control Register */ +#define VTTBR p15,6,c2 /* Virtualization Translation Table Base Register */ + +/* CP15 CR3: Domain Access Control Register */ +#define DACR p15,0,c3,c0,0 /* Domain Access Control Register */ + +/* CP15 CR4: */ + +/* CP15 CR5: Fault Status Registers */ +#define DFSR p15,0,c5,c0,0 /* Data Fault Status Register */ +#define IFSR p15,0,c5,c0,1 /* Instruction Fault Status Register */ +#define ADFSR p15,0,c5,c1,0 /* Auxiliary Data Fault Status Register */ +#define AIFSR p15,0,c5,c1,1 /* Auxiliary Instruction Fault Status Register */ +#define HSR p15,4,c5,c2,0 /* Hyp. Syndrome Register */ + +/* CP15 CR6: Fault Address Registers */ +#define DFAR p15,0,c6,c0,0 /* Data Fault Address Register */ +#define IFAR p15,0,c6,c0,2 /* Instruction Fault Address Register */ +#define HDFAR p15,4,c6,c0,0 /* Hyp. Data Fault Address Register */ +#define HIFAR p15,4,c6,c0,2 /* Hyp. Instruction Fault Address Register */ +#define HPFAR p15,4,c6,c0,4 /* Hyp. IPA Fault Address Register */ + +/* CP15 CR7: Cache and address translation operations */ +#define PAR p15,0,c7 /* Physical Address Register */ + +#define ICIALLUIS p15,0,c7,c1,0 /* Invalidate all instruction caches to PoU inner shareable */ +#define BPIALLIS p15,0,c7,c1,6 /* Invalidate entire branch predictor array inner shareable */ +#define ICIALLU p15,0,c7,c5,0 /* Invalidate all instruction caches to PoU */ +#define ICIMVAU p15,0,c7,c5,1 /* Invalidate instruction caches by MVA to PoU */ +#define BPIALL p15,0,c7,c5,6 /* Invalidate entire branch predictor array */ +#define BPIMVA p15,0,c7,c5,7 /* Invalidate MVA from branch predictor array */ +#define DCIMVAC p15,0,c7,c6,1 /* Invalidate data cache line by MVA to PoC */ +#define DCISW p15,0,c7,c6,2 /* Invalidate data cache line by set/way */ +#define ATS1CPR p15,0,c7,c8,0 /* Address Translation Stage 1. Non-Secure Kernel Read */ +#define ATS1CPW p15,0,c7,c8,1 /* Address Translation Stage 1. Non-Secure Kernel Write */ +#define ATS1CUR p15,0,c7,c8,2 /* Address Translation Stage 1. Non-Secure User Read */ +#define ATS1CUW p15,0,c7,c8,3 /* Address Translation Stage 1. Non-Secure User Write */ +#define ATS12NSOPR p15,0,c7,c8,4 /* Address Translation Stage 1+2 Non-Secure Kernel Read */ +#define ATS12NSOPW p15,0,c7,c8,5 /* Address Translation Stage 1+2 Non-Secure Kernel Write */ +#define ATS12NSOUR p15,0,c7,c8,6 /* Address Translation Stage 1+2 Non-Secure User Read */ +#define ATS12NSOUW p15,0,c7,c8,7 /* Address Translation Stage 1+2 Non-Secure User Write */ +#define DCCMVAC p15,0,c7,c10,1 /* Clean data or unified cache line by MVA to PoC */ +#define DCCSW p15,0,c7,c10,2 /* Clean data cache line by set/way */ +#define DCCMVAU p15,0,c7,c11,1 /* Clean data cache line by MVA to PoU */ +#define DCCIMVAC p15,0,c7,c14,1 /* Data cache clean and invalidate by MVA */ +#define DCCISW p15,0,c7,c14,2 /* Clean and invalidate data cache line by set/way */ +#define ATS1HR p15,4,c7,c8,0 /* Address Translation Stage 1 Hyp. Read */ +#define ATS1HW p15,4,c7,c8,1 /* Address Translation Stage 1 Hyp. Write */ + +/* CP15 CR8: TLB maintenance operations */ +#define TLBIALLIS p15,0,c8,c3,0 /* Invalidate entire TLB innrer shareable */ +#define TLBIMVAIS p15,0,c8,c3,1 /* Invalidate unified TLB entry by MVA inner shareable */ +#define TLBIASIDIS p15,0,c8,c3,2 /* Invalidate unified TLB by ASID match inner shareable */ +#define TLBIMVAAIS p15,0,c8,c3,3 /* Invalidate unified TLB entry by MVA all ASID inner shareable */ +#define ITLBIALL p15,0,c8,c5,0 /* Invalidate instruction TLB */ +#define ITLBIMVA p15,0,c8,c5,1 /* Invalidate instruction TLB entry by MVA */ +#define ITLBIASID p15,0,c8,c5,2 /* Invalidate instruction TLB by ASID match */ +#define DTLBIALL p15,0,c8,c6,0 /* Invalidate data TLB */ +#define DTLBIMVA p15,0,c8,c6,1 /* Invalidate data TLB entry by MVA */ +#define DTLBIASID p15,0,c8,c6,2 /* Invalidate data TLB by ASID match */ +#define TLBIALL p15,0,c8,c7,0 /* invalidate unified TLB */ +#define TLBIMVA p15,0,c8,c7,1 /* invalidate unified TLB entry by MVA */ +#define TLBIASID p15,0,c8,c7,2 /* invalid unified TLB by ASID match */ +#define TLBIMVAA p15,0,c8,c7,3 /* invalidate unified TLB entries by MVA all ASID */ +#define TLBIALLHIS p15,4,c8,c3,0 /* Invalidate Entire Hyp. Unified TLB inner shareable */ +#define TLBIMVAHIS p15,4,c8,c3,1 /* Invalidate Unified Hyp. TLB by MVA inner shareable */ +#define TLBIALLNSNHIS p15,4,c8,c3,4 /* Invalidate Entire Non-Secure Non-Hyp. Unified TLB inner shareable */ +#define TLBIALLH p15,4,c8,c7,0 /* Invalidate Entire Hyp. Unified TLB */ +#define TLBIMVAH p15,4,c8,c7,1 /* Invalidate Unified Hyp. TLB by MVA */ +#define TLBIALLNSNH p15,4,c8,c7,4 /* Invalidate Entire Non-Secure Non-Hyp. Unified TLB */ + +/* CP15 CR9: Performance monitors */ +#define PMCR p15,0,c9,c12,0 /* Perf. Mon. Control Register */ +#define PMCNTENSET p15,0,c9,c12,1 /* Perf. Mon. Count Enable Set register */ +#define PMCNTENCLR p15,0,c9,c12,2 /* Perf. Mon. Count Enable Clear register */ +#define PMOVSR p15,0,c9,c12,3 /* Perf. Mon. Overflow Flag Status Register */ +#define PMSWINC p15,0,c9,c12,4 /* Perf. Mon. Software Increment register */ +#define PMSELR p15,0,c9,c12,5 /* Perf. Mon. Event Counter Selection Register */ +#define PMCEID0 p15,0,c9,c12,6 /* Perf. Mon. Common Event Identification register 0 */ +#define PMCEID1 p15,0,c9,c12,7 /* Perf. Mon. Common Event Identification register 1 */ +#define PMCCNTR p15,0,c9,c13,0 /* Perf. Mon. Cycle Count Register */ +#define PMXEVCNTR p15,0,c9,c13,1 /* Perf. Mon. Event Type Select Register */ +#define PMXEVCNR p15,0,c9,c13,2 /* Perf. Mon. Event Count Register */ +#define PMUSERENR p15,0,c9,c14,0 /* Perf. Mon. User Enable Register */ +#define PMINTENSET p15,0,c9,c14,1 /* Perf. Mon. Interrupt Enable Set Register */ +#define PMINTENCLR p15,0,c9,c14,2 /* Perf. Mon. Interrupt Enable Clear Register */ +#define PMOVSSET p15,0,c9,c14,3 /* Perf. Mon. Overflow Flag Status Set register */ + +/* CP15 CR10: */ +#define MAIR0 p15,0,c10,c2,0 /* Memory Attribute Indirection Register 0 AKA PRRR */ +#define MAIR1 p15,0,c10,c2,1 /* Memory Attribute Indirection Register 1 AKA NMRR */ +#define HMAIR0 p15,4,c10,c2,0 /* Hyp. Memory Attribute Indirection Register 0 */ +#define HMAIR1 p15,4,c10,c2,1 /* Hyp. Memory Attribute Indirection Register 1 */ +#define AMAIR0 p15,0,c10,c3,0 /* Aux. Memory Attribute Indirection Register 0 */ +#define AMAIR1 p15,0,c10,c3,1 /* Aux. Memory Attribute Indirection Register 1 */ + +/* CP15 CR11: DMA Operations for TCM Access */ + +/* CP15 CR12: */ +#define VBAR p15,0,c12,c0,0 /* Vector Base Address Register */ +#define HVBAR p15,4,c12,c0,0 /* Hyp. Vector Base Address Register */ + +/* CP15 CR13: */ +#define FCSEIDR p15,0,c13,c0,0 /* FCSE Process ID Register */ +#define CONTEXTIDR p15,0,c13,c0,1 /* Context ID Register */ +#define TPIDRURW p15,0,c13,c0,2 /* Software Thread ID, User, R/W */ +#define TPIDRURO p15,0,c13,c0,3 /* Software Thread ID, User, R/O */ +#define TPIDRPRW p15,0,c13,c0,4 /* Software Thread ID, Priveleged */ +#define HTPIDR p15,4,c13,c0,2 /* HYp Software Thread Id Register */ + +/* CP15 CR14: */ +#define CNTPCT p15,0,c14 /* Time counter value */ +#define CNTFRQ p15,0,c14,c0,0 /* Time counter frequency */ +#define CNTKCTL p15,0,c14,c1,0 /* Time counter kernel control */ +#define CNTP_TVAL p15,0,c14,c2,0 /* Physical Timer value */ +#define CNTP_CTL p15,0,c14,c2,1 /* Physical Timer control register */ +#define CNTVCT p15,1,c14 /* Time counter value + offset */ +#define CNTP_CVAL p15,2,c14 /* Physical Timer comparator */ +#define CNTV_CVAL p15,3,c14 /* Virt. Timer comparator */ +#define CNTVOFF p15,4,c14 /* Time counter offset */ +#define CNTHCTL p15,4,c14,c1,0 /* Time counter hyp. control */ +#define CNTHP_TVAL p15,4,c14,c2,0 /* Hyp. Timer value */ +#define CNTHP_CTL p15,4,c14,c2,1 /* Hyp. Timer control register */ +#define CNTV_TVAL p15,0,c14,c3,0 /* Virt. Timer value */ +#define CNTV_CTL p15,0,c14,c3,1 /* Virt. TImer control register */ +#define CNTHP_CVAL p15,6,c14 /* Hyp. Timer comparator */ + +/* CP15 CR15: Implementation Defined Registers */ + +/* Aliases of AArch64 names for use in common code when building for AArch32 */ +//#ifdef CONFIG_ARM_32 +/* Alphabetically... */ +#define ACTLR_EL1 ACTLR +#define AFSR0_EL1 ADFSR +#define AFSR1_EL1 AIFSR +#define CCSIDR_EL1 CCSIDR +#define CLIDR_EL1 CLIDR +#define CNTFRQ_EL0 CNTFRQ +#define CNTHCTL_EL2 CNTHCTL +#define CNTHP_CTL_EL2 CNTHP_CTL +#define CNTHP_CVAL_EL2 CNTHP_CVAL +#define CNTKCTL_EL1 CNTKCTL +#define CNTPCT_EL0 CNTPCT +#define CNTP_CTL_EL0 CNTP_CTL +#define CNTP_CVAL_EL0 CNTP_CVAL +#define CNTVCT_EL0 CNTVCT +#define CNTVOFF_EL2 CNTVOFF +#define CNTV_CTL_EL0 CNTV_CTL +#define CNTV_CVAL_EL0 CNTV_CVAL +#define CONTEXTIDR_EL1 CONTEXTIDR +#define CPACR_EL1 CPACR +#define CPTR_EL2 HCPTR +#define CSSELR_EL1 CSSELR +#define DACR32_EL2 DACR +#define ESR_EL1 DFSR +#define ESR_EL2 HSR +#define FAR_EL1 HIFAR +#define FAR_EL2 HIFAR +#define HCR_EL2 HCR +#define HPFAR_EL2 HPFAR +#define HSTR_EL2 HSTR +#define ID_AFR0_EL1 ID_AFR0 +#define ID_DFR0_EL1 ID_DFR0 +#define ID_ISAR0_EL1 ID_ISAR0 +#define ID_ISAR1_EL1 ID_ISAR1 +#define ID_ISAR2_EL1 ID_ISAR2 +#define ID_ISAR3_EL1 ID_ISAR3 +#define ID_ISAR4_EL1 ID_ISAR4 +#define ID_ISAR5_EL1 ID_ISAR5 +#define ID_MMFR0_EL1 ID_MMFR0 +#define ID_MMFR1_EL1 ID_MMFR1 +#define ID_MMFR2_EL1 ID_MMFR2 +#define ID_MMFR3_EL1 ID_MMFR3 +#define ID_PFR0_EL1 ID_PFR0 +#define ID_PFR1_EL1 ID_PFR1 +#define IFSR32_EL2 IFSR +#define MDCR_EL2 HDCR +#define MIDR_EL1 MIDR +#define MPIDR_EL1 MPIDR +#define PAR_EL1 PAR +#define SCTLR_EL1 SCTLR +#define SCTLR_EL2 HSCTLR +#define TCR_EL1 TTBCR +#define TEECR32_EL1 TEECR +#define TEEHBR32_EL1 TEEHBR +#define TPIDRRO_EL0 TPIDRURO +#define TPIDR_EL0 TPIDRURW +#define TPIDR_EL1 TPIDRPRW +#define TPIDR_EL2 HTPIDR +#define TTBR0_EL1 TTBR0 +#define TTBR0_EL2 HTTBR +#define TTBR1_EL1 TTBR1 +#define VBAR_EL1 VBAR +#define VBAR_EL2 HVBAR +#define VMPIDR_EL2 VMPIDR +#define VPIDR_EL2 VPIDR +#define VTCR_EL2 VTCR +#define VTTBR_EL2 VTTBR + +#endif diff --git a/so3/arch/arm64/include/asm/errno.h b/so3/arch/arm64/include/asm/errno.h new file mode 100644 index 000000000..e4767c811 --- /dev/null +++ b/so3/arch/arm64/include/asm/errno.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include diff --git a/so3/arch/arm64/include/asm/hardware.h b/so3/arch/arm64/include/asm/hardware.h new file mode 100644 index 000000000..be56e220f --- /dev/null +++ b/so3/arch/arm64/include/asm/hardware.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef ASM_HARDWARE_H +#define ASM_HARDWARE_H + +#include + +#endif /* ASM_HARDWARE_H */ diff --git a/so3/arch/arm64/include/asm/image.h b/so3/arch/arm64/include/asm/image.h new file mode 100644 index 000000000..38de3b60f --- /dev/null +++ b/so3/arch/arm64/include/asm/image.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* HEAD flags as follows: 4K kernel page size, 2MB aligned base as close possible to the base of DRAM */ +#define __HEAD_FLAGS 0x2 + +#define __HEAD_MAGIC "ARM\x64" + + diff --git a/so3/arch/arm64/include/asm/io.h b/so3/arch/arm64/include/asm/io.h new file mode 100644 index 000000000..2a18c8fd9 --- /dev/null +++ b/so3/arch/arm64/include/asm/io.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include + +#define ioread8(p) (*((volatile uint8_t *) (p))) +#define ioread16(p) (*((volatile uint16_t *) (p))) +#define ioread32(p) (*((volatile uint32_t *) (p))) + +#define iowrite8(p,v) (*((volatile uint8_t *) (p)) = v) +#define iowrite16(p,v) (*((volatile uint16_t *) (p)) = v) +#define iowrite32(p,v) (*((volatile uint32_t *) (p)) = v) diff --git a/so3/arch/arm64/include/asm/mmu.h b/so3/arch/arm64/include/asm/mmu.h new file mode 100644 index 000000000..420a79ce4 --- /dev/null +++ b/so3/arch/arm64/include/asm/mmu.h @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2015-2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MMU_H +#define MMU_H + +#ifndef __ASSEMBLY__ +#include +#include +#endif + +#include + +#define SZ_256G (256UL * SZ_1G) + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define PAGE_OFFSET UL(0xffff700000000000) +#define L_PAGE_OFFSET UL(0xffff800010000000) + +/* Order of size which makes sense in block mapping */ +#define BLOCK_256G_OFFSET (SZ_256G - 1) +#define BLOCK_1G_OFFSET (SZ_1G - 1) +#define BLOCK_2M_OFFSET (SZ_2M - 1) + +#define BLOCK_256G_MASK (~BLOCK_256G_OFFSET) +#define BLOCK_1G_MASK (~BLOCK_1G_OFFSET) +#define BLOCK_2M_MASK (~BLOCK_2M_OFFSET) + +/* + * We add two functions for retrieving virt and phys address relative to + * Linux offset according to the memory map (used to access guest mem) + */ +#define __lpa(vaddr) ((vaddr) - L_PAGE_OFFSET + CONFIG_RAM_BASE) +#define __lva(paddr) ((paddr) - CONFIG_RAM_BASE + L_PAGE_OFFSET) + +#define __pa(vaddr) (((addr_t) vaddr) - PAGE_OFFSET + ((addr_t) CONFIG_RAM_BASE)) +#define __va(paddr) (((addr_t) paddr) - ((addr_t) CONFIG_RAM_BASE) + PAGE_OFFSET) + +#define virt_to_phys(x) (__pa(x)) +#define phys_to_virt(x) (__va(x)) + +#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define phys_to_pfn(phys) (((addr_t) phys) >> PAGE_SHIFT) +#define virt_to_pfn(virt) (phys_to_pfn(__va((addr_t) virt))) +#define pfn_to_virt(pfn) (phys_to_virt(pfn_to_phys(pfn))) + +/* + * With 4k page granule, a virtual address is split into 4 lookup parts + * spanning 9 bits each: + * + * _______________________________________________ + * | | | | | | | + * | 0 | L0 | L1 | L2 | L3 | off | + * |_______|_______|_______|_______|_______|_______| + * 63-48 47-39 38-30 29-21 20-12 11-00 + * + * mask block size + * + * L0: FF8000000000 512GB + * L1: 7FC0000000 1G + * L2: 3FE00000 2M + * L3: 1FF000 4K + * off: FFF + */ + +/* Define the number of entries in each page table */ + +#define TTB_L0_ORDER 9 +#define TTB_L1_ORDER 9 +#define TTB_L2_ORDER 9 +#define TTB_L3_ORDER 9 + +#define TTB_I0_SHIFT 39 +#define TTB_I0_MASK (~((1 << TTB_I0_SHIFT)-1)) + +#define TTB_I1_SHIFT 30 +#define TTB_I1_MASK (~((1 << TTB_I1_SHIFT)-1)) + +#define TTB_I2_SHIFT 21 +#define TTB_I2_MASK (~((1 << TTB_I2_SHIFT)-1)) + +#define TTB_I3_SHIFT 12 +#define TTB_I3_MASK (~((1 << TTB_I3_SHIFT)-1)) + +#define TTB_L0_ENTRIES (1 << TTB_L0_ORDER) +#define TTB_L1_ENTRIES (1 << TTB_L1_ORDER) +#define TTB_L2_ENTRIES (1 << TTB_L2_ORDER) +#define TTB_L3_ENTRIES (1 << TTB_L3_ORDER) + +/* Size of the page tables */ +#define TTB_L0_SIZE (8 << TTB_L0_ORDER) +#define TTB_L1_SIZE (8 << TTB_L1_ORDER) +#define TTB_L2_SIZE (8 << TTB_L2_ORDER) +#define TTB_L3_SIZE (8 << TTB_L3_ORDER) + +/* + * Memory types + */ +#define MT_DEVICE_NGNRNE 0 +#define MT_DEVICE_NGNRE 1 +#define MT_DEVICE_GRE 2 +#define MT_NORMAL_NC 3 +#define MT_NORMAL 4 + +#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_NGNRNE * 8)) | \ + (0x04 << (MT_DEVICE_NGNRE * 8)) | \ + (0x0c << (MT_DEVICE_GRE * 8)) | \ + (0x44 << (MT_NORMAL_NC * 8)) | \ + (UL(0xff) << (MT_NORMAL * 8))) + +/* + * Hardware page table definitions. + * + */ + +#define PTE_TYPE_MASK (3 << 0) +#define PTE_TYPE_FAULT (0 << 0) +#define PTE_TYPE_TABLE (3 << 0) +#define PTE_TYPE_PAGE (3 << 0) +#define PTE_TYPE_BLOCK (1 << 0) +#define PTE_TYPE_VALID (1 << 0) + +#define PTE_TABLE_PXN (1UL << 59) +#define PTE_TABLE_XN (1UL << 60) +#define PTE_TABLE_AP (1UL << 61) +#define PTE_TABLE_NS (1UL << 63) + +/* + * Block + */ +#define PTE_BLOCK_MEMTYPE(x) ((x) << 2) +#define PTE_BLOCK_NS (1 << 5) +#define PTE_BLOCK_AP1 (1 << 6) +#define PTE_BLOCK_AP2 (1 << 7) +#define PTE_BLOCK_NON_SHARE (0 << 8) +#define PTE_BLOCK_OUTER_SHARE (2 << 8) +#define PTE_BLOCK_INNER_SHARE (3 << 8) +#define PTE_BLOCK_AF (1 << 10) +#define PTE_BLOCK_NG (1 << 11) +#define PTE_BLOCK_PXN (UL(1) << 53) +#define PTE_BLOCK_UXN (UL(1) << 54) + +/* + * TCR flags. + */ +#define TCR_T0SZ(x) ((64 - (x)) << 0) +#define TCR_IRGN_NC (0 << 8) +#define TCR_IRGN_WBWA (1 << 8) +#define TCR_IRGN_WT (2 << 8) +#define TCR_IRGN_WBNWA (3 << 8) +#define TCR_IRGN_MASK (3 << 8) +#define TCR_ORGN_NC (0 << 10) +#define TCR_ORGN_WBWA (1 << 10) +#define TCR_ORGN_WT (2 << 10) +#define TCR_ORGN_WBNWA (3 << 10) +#define TCR_ORGN_MASK (3 << 10) +#define TCR_SHARED_NON (0 << 12) +#define TCR_SHARED_OUTER (2 << 12) +#define TCR_SHARED_INNER (3 << 12) +#define TCR_TG0_4K (0 << 14) +#define TCR_TG0_64K (1 << 14) +#define TCR_TG0_16K (2 << 14) +#define TCR_EPD1_DISABLE (1 << 23) + +#define TCR_EL1_RSVD (1 << 31) +#define TCR_EL2_RSVD (1 << 31 | 1 << 23) +#define TCR_EL3_RSVD (1 << 31 | 1 << 23) + +/* Block related */ +#define TTB_L1_BLOCK_ADDR_SHIFT 30 +#define TTB_L1_BLOCK_ADDR_OFFSET (1 << TTB_L1_BLOCK_ADDR_SHIFT) +#define TTB_L1_BLOCK_ADDR_MASK ((~(TTB_L1_BLOCK_ADDR_OFFSET - 1)) & ((1UL << 48) - 1)) + +#define TTB_L2_BLOCK_ADDR_SHIFT 21 +#define TTB_L2_BLOCK_ADDR_OFFSET (1 << TTB_L2_BLOCK_ADDR_SHIFT) +#define TTB_L2_BLOCK_ADDR_MASK ((~(TTB_L2_BLOCK_ADDR_OFFSET - 1)) & ((1UL << 48) - 1)) + +/* Table related */ +#define TTB_L0_TABLE_ADDR_SHIFT 12 +#define TTB_L0_TABLE_ADDR_OFFSET (1 << TTB_L0_TABLE_ADDR_SHIFT) +#define TTB_L0_TABLE_ADDR_MASK ((~(TTB_L0_TABLE_ADDR_OFFSET - 1)) & ((1UL << 48) - 1)) + +#define TTB_L1_TABLE_ADDR_SHIFT TTB_L0_TABLE_ADDR_SHIFT +#define TTB_L1_TABLE_ADDR_OFFSET TTB_L0_TABLE_ADDR_OFFSET +#define TTB_L1_TABLE_ADDR_MASK TTB_L0_TABLE_ADDR_MASK + +#define TTB_L2_TABLE_ADDR_SHIFT TTB_L0_TABLE_ADDR_SHIFT +#define TTB_L2_TABLE_ADDR_OFFSET TTB_L0_TABLE_ADDR_OFFSET +#define TTB_L2_TABLE_ADDR_MASK TTB_L0_TABLE_ADDR_MASK + +#define TTB_L3_PAGE_ADDR_SHIFT 12 +#define TTB_L3_PAGE_ADDR_OFFSET (1 << TTB_L3_PAGE_ADDR_SHIFT) +#define TTB_L3_PAGE_ADDR_MASK ((~(TTB_L3_PAGE_ADDR_OFFSET - 1)) & ((1UL << 48) - 1)) + +/* Given a virtual address, get an entry offset into a page table. */ +#define l0pte_index(a) ((((addr_t) a) >> TTB_I0_SHIFT) & (TTB_L0_ENTRIES - 1)) +#define l1pte_index(a) ((((addr_t) a) >> TTB_I1_SHIFT) & (TTB_L1_ENTRIES - 1)) +#define l2pte_index(a) ((((addr_t) a) >> TTB_I2_SHIFT) & (TTB_L2_ENTRIES - 1)) +#define l3pte_index(a) ((((addr_t) a) >> TTB_I3_SHIFT) & (TTB_L3_ENTRIES - 1)) + +#define pte_index_to_vaddr(i0, i1, i2, i3) ((i0 << TTB_I0_SHIFT) | i1 << TTB_I1_SHIFT) | (i2 << TTB_I2_SHIFT) | (i3 << TTB_I3_SHIFT)) + +#define l0pte_offset(pgtable, addr) ((u64 *) (pgtable + l0pte_index(addr))) +#define l1pte_offset(l0pte, addr) ((u64 *) (__va(*l0pte & TTB_L0_TABLE_ADDR_MASK)) + l1pte_index(addr)) +#define l2pte_offset(l1pte, addr) ((u64 *) (__va(*l1pte & TTB_L1_TABLE_ADDR_MASK)) + l2pte_index(addr)) +#define l3pte_offset(l2pte, addr) ((u64 *) (__va(*l2pte & TTB_L2_TABLE_ADDR_MASK)) + l3pte_index(addr)) + +#define l1pte_first(l0pte) ((u64 *) __va(*l0pte & TTB_L0_TABLE_ADDR_MASK)) +#define l2pte_first(l1pte) ((u64 *) __va(*l1pte & TTB_L1_TABLE_ADDR_MASK)) +#define l3pte_first(l2pte) ((u64 *) __va(*l2pte & TTB_L2_TABLE_ADDR_MASK)) + +#define l0_addr_end(addr, end) \ + ({ unsigned long __boundary = ((addr) + SZ_256G) & BLOCK_256G_MASK; \ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ + }) + +#define l1_addr_end(addr, end) \ + ({ unsigned long __boundary = ((addr) + SZ_1G) & BLOCK_1G_MASK; \ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ + }) + +#define l2_addr_end(addr, end) \ + ({ unsigned long __boundary = ((addr) + SZ_2M) & BLOCK_2M_MASK; \ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ + }) + + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) + +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ + +/* These constants need to be synced to the MT_ types */ +enum dcache_option { + DCACHE_OFF = MT_DEVICE_NGNRNE, + DCACHE_WRITETHROUGH = MT_NORMAL_NC, + DCACHE_WRITEBACK = MT_NORMAL, + DCACHE_WRITEALLOC = MT_NORMAL, +}; + +static inline void set_pte_block(u64 *pte, enum dcache_option option) +{ + u64 attrs = PTE_BLOCK_MEMTYPE(option); + + *pte |= PTE_TYPE_BLOCK | PTE_BLOCK_AF | PTE_BLOCK_INNER_SHARE | PTE_BLOCK_NS; + *pte |= attrs; +} + +static inline void set_pte_table(u64 *pte, enum dcache_option option) +{ + u64 attrs = PTE_TABLE_NS; + + *pte |= PTE_TYPE_TABLE; + *pte |= attrs; +} + +static inline void set_pte_page(u64 *pte, enum dcache_option option) +{ + u64 attrs = PTE_BLOCK_MEMTYPE(option); + + *pte |= PTE_TYPE_PAGE | PTE_BLOCK_AF | PTE_BLOCK_INNER_SHARE | PTE_BLOCK_NS; + *pte |= attrs; +} + +static inline int pte_type(u64 *pte) +{ + return *pte & PTE_TYPE_MASK; +} + +/* + * This structure holds internal fields required to + * manage the MMU configuration regarding address space. + */ +typedef struct { + uint64_t ttbr1[NR_CPUS]; + addr_t pgtable_paddr; + addr_t pgtable_vaddr; +} addrspace_t; + +#define cpu_get_l1pgtable() \ +({ \ + unsigned long ttbr; \ + __asm__("mrs %0, ttbr1_el1" \ + : "=r" (ttbr) : : "cc"); \ + ttbr &= TTBR0_BASE_ADDR_MASK; \ +}) + +#define cpu_get_ttbr0() \ +({ \ + unsigned long ttbr; \ + __asm__("mrs %0, ttbr1_el1" \ + : "=r" (ttbr) : : "cc"); \ + ttbr; \ +}) + +static inline unsigned int get_sctlr(void) +{ + unsigned int val; + + asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc"); + + return val; +} + +static inline void set_sctlr(unsigned int val) +{ + asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc"); + asm volatile("isb"); +} + +extern u64 __sys_l0pgtable[], __sys_idmap_l1pgtable[], __sys_linearmap_l1pgtable[]; + +void set_pte(addr_t *pte, enum dcache_option option); + +extern void __mmu_switch(uint32_t l1pgtable_phys); + +void pgtable_copy_kernel_area(uint32_t *l1pgtable); + +void create_mapping(u64 *l0pgtable, addr_t virt_base, addr_t phys_base, size_t size, bool nocache); +void release_mapping(u64 *pgtable, addr_t virt_base, addr_t size); + +u64 *new_sys_pgtable(void); +void reset_l1pgtable(uint32_t *l1pgtable, bool remove); + +void clear_l1pte(uint32_t *l1pgtable, uint32_t vaddr); + +void mmu_switch(addrspace_t *addrspace); +void dump_pgtable(u64 *l1pgtable); + +void dump_current_pgtable(void); + +void mmu_setup(u64 *pgtable); + +void vectors_init(void); + +void set_current_pgtable(uint64_t *pgtable); +void replace_current_pgtable_with(uint64_t *pgtable); + +#endif + + +#endif /* MMU_H */ + diff --git a/so3/arch/arm64/include/asm/percpu.h b/so3/arch/arm64/include/asm/percpu.h new file mode 100644 index 000000000..bfa244f1b --- /dev/null +++ b/so3/arch/arm64/include/asm/percpu.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2016,2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __ARM_PERCPU +#define __ARM_PERCPU + + +#define PERCPU_SHIFT 13 +#define PERCPU_SIZE (1UL << PERCPU_SHIFT) + +#define __GENERIC_PER_CPU + +void percpu_init_areas(void); +void free_percpu_area(unsigned int cpu); +int init_percpu_area(unsigned int cpu); + +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset +extern unsigned long __per_cpu_offset[NR_CPUS]; + +#define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif + +/* var is in discarded region: offset to particular copy we want */ +#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) +#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) +#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) + + +/* Separate out the type, so (int[3], foo) works. */ +#define __DEFINE_PER_CPU(type, name, suffix) \ + __attribute__((__section__(".bss.percpu"))) \ + __typeof__(type) per_cpu_##name + +#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name + +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) +#endif diff --git a/so3/arch/arm64/include/asm/posix_types.h b/so3/arch/arm64/include/asm/posix_types.h new file mode 100644 index 000000000..f24de5b28 --- /dev/null +++ b/so3/arch/arm64/include/asm/posix_types.h @@ -0,0 +1,55 @@ +/* + * linux/include/asm-arm/posix_types.h + * + * Copyright (C) 1996-1998 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 27-06-1996 RMK Created + */ +#ifndef __ARCH_ARM_POSIX_TYPES_H +#define __ARCH_ARM_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if defined(__KERNEL__) || defined(__USE_ALL) + int val[2]; +#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ + int __val[2]; +#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ +} __kernel_fsid_t; + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#undef __FD_SET +#define __FD_SET(fd, fdsetp) \ + (((fd_set *)fdsetp)->fds_bits[fd >> 5] |= (1<<(fd & 31))) + +#undef __FD_CLR +#define __FD_CLR(fd, fdsetp) \ + (((fd_set *)fdsetp)->fds_bits[fd >> 5] &= ~(1<<(fd & 31))) + +#undef __FD_ISSET +#define __FD_ISSET(fd, fdsetp) \ + ((((fd_set *)fdsetp)->fds_bits[fd >> 5] & (1<<(fd & 31))) != 0) + +#undef __FD_ZERO +#define __FD_ZERO(fdsetp) \ + (memset (fdsetp, 0, sizeof (*(fd_set *)fdsetp))) + +#endif + +#endif diff --git a/so3/arch/arm64/include/asm/processor.h b/so3/arch/arm64/include/asm/processor.h new file mode 100644 index 000000000..e55a5eaf7 --- /dev/null +++ b/so3/arch/arm64/include/asm/processor.h @@ -0,0 +1,1094 @@ +/* + * linux/include/asm-arm/processor.h + * + * Copyright (C) 1995-2002 Russell King + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef PROCESSOR_H +#define PROCESSOR_H + +#include +#include + +#include + +#define BIT(nr) (1UL << (nr)) + +#define sev() asm volatile("sev" : : : "memory") +#define wfe() asm volatile("wfe" : : : "memory") +#define wfi() asm volatile("wfi" : : : "memory") + +#define isb() asm volatile("isb" : : : "memory") +#define dsb(scope) asm volatile("dsb " #scope : : : "memory") +#define dmb(scope) asm volatile("dmb " #scope : : : "memory") + +#define mb() dsb(sy) +#define rmb() dsb(ld) + +#define wmb() dsb(st) + +#define smp_mb() dmb(ish) +#define smp_rmb() dmb(ishld) + +#define smp_wmb() dmb(ishst) + +/* + * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_C (1 << 2) /* Dcache enable */ +#define CR_SA (1 << 3) /* Stack Alignment Check Enable */ +#define CR_I (1 << 12) /* Icache enable */ +#define CR_WXN (1 << 19) /* Write Permision Imply XN */ +#define CR_EE (1 << 25) /* Exception (Big) Endian */ + +/* + * ARMv8 ARM reserves the following encoding for system registers: + * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", + * C5.2, version:ARM DDI 0487A.f) + * [20-19] : Op0 + * [18-16] : Op1 + * [15-12] : CRn + * [11-8] : CRm + * [7-5] : Op2 + */ +#define Op0_shift 19 +#define Op0_mask 0x3 +#define Op1_shift 16 +#define Op1_mask 0x7 +#define CRn_shift 12 +#define CRn_mask 0xf +#define CRm_shift 8 +#define CRm_mask 0xf +#define Op2_shift 5 +#define Op2_mask 0x7 + +#define sys_reg(op0, op1, crn, crm, op2) \ + (((op0) << Op0_shift) | ((op1) << Op1_shift) | \ + ((crn) << CRn_shift) | ((crm) << CRm_shift) | \ + ((op2) << Op2_shift)) + +#define sys_insn sys_reg + +#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask) +#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask) +#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask) +#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask) +#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask) + +#ifdef __ASSEMBLY__ +// The space separator is omitted so that __emit_inst(x) can be parsed as +// either an assembler directive or an assembler macro argument. +#define __emit_inst(x) .inst(x) +#else +#define __emit_inst(x) ".inst " __stringify((x)) "\n\t" +#endif + +/* + * PSR bits + */ +#define PSR_MODE_EL0t 0x00000000 +#define PSR_MODE_EL1t 0x00000004 +#define PSR_MODE_EL1h 0x00000005 +#define PSR_MODE_EL2t 0x00000008 +#define PSR_MODE_EL2h 0x00000009 +#define PSR_MODE_EL3t 0x0000000c +#define PSR_MODE_EL3h 0x0000000d +#define PSR_MODE_MASK 0x0000000f + +/* AArch64 SPSR bits */ +#define PSR_F_BIT 0x00000040 +#define PSR_I_BIT 0x00000080 +#define PSR_A_BIT 0x00000100 +#define PSR_D_BIT 0x00000200 +#define PSR_SSBS_BIT 0x00001000 +#define PSR_PAN_BIT 0x00400000 +#define PSR_UAO_BIT 0x00800000 +#define PSR_DIT_BIT 0x01000000 +#define PSR_V_BIT 0x10000000 +#define PSR_C_BIT 0x20000000 +#define PSR_Z_BIT 0x40000000 +#define PSR_N_BIT 0x80000000 + +/* + * Instructions for modifying PSTATE fields. + * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints, + * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions + * for accessing PSTATE fields have the following encoding: + * Op0 = 0, CRn = 4 + * Op1, Op2 encodes the PSTATE field modified and defines the constraints. + * CRm = Imm4 for the instruction. + * Rt = 0x1f + */ +#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift) +#define PSTATE_Imm_shift CRm_shift + +#define PSTATE_PAN pstate_field(0, 4) +#define PSTATE_UAO pstate_field(0, 3) +#define PSTATE_SSBS pstate_field(3, 1) + +#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) + +#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ + __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) + +#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) + +#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) +#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) +#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) + +#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2) +#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0) +#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2) +#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2) +#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2) +#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4) +#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5) +#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) +#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) +#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) +#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) +#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) +#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) +#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) +#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6) +#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6) +#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6) +#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0) +#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0) +#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0) +#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0) +#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) + +#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) +#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) +#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) + +#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0) +#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1) +#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2) +#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3) +#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4) +#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5) +#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6) +#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7) + +#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0) +#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1) +#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2) +#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3) +#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) +#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) +#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) + +#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) +#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) +#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) + +#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) +#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) +#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) + +#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) +#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) + +#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) +#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) + +#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) +#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) + +#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) +#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) +#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) + +#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0) +#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) +#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) + +#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) + +#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) +#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) +#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) + +#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) +#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1) +#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2) +#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3) + +#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0) +#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1) +#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2) +#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3) + +#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0) +#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1) + +#define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0) +#define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1) + +#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) + +#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0) +#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1) +#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0) + +#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0) +#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1) +#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0) +#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1) +#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2) +#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3) +#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0) +#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1) + +#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) +#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) + +#define SYS_PAR_EL1_F BIT(0) +#define SYS_PAR_EL1_FST GENMASK(6, 1) + +/*** Statistical Profiling Extension ***/ +/* ID registers */ +#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) +#define SYS_PMSIDR_EL1_FE_SHIFT 0 +#define SYS_PMSIDR_EL1_FT_SHIFT 1 +#define SYS_PMSIDR_EL1_FL_SHIFT 2 +#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 +#define SYS_PMSIDR_EL1_LDS_SHIFT 4 +#define SYS_PMSIDR_EL1_ERND_SHIFT 5 +#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 +#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL +#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 +#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL +#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 +#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL + +#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) +#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 +#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU +#define SYS_PMBIDR_EL1_P_SHIFT 4 +#define SYS_PMBIDR_EL1_F_SHIFT 5 + +/* Sampling controls */ +#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) +#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 +#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 +#define SYS_PMSCR_EL1_CX_SHIFT 3 +#define SYS_PMSCR_EL1_PA_SHIFT 4 +#define SYS_PMSCR_EL1_TS_SHIFT 5 +#define SYS_PMSCR_EL1_PCT_SHIFT 6 + +#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) +#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 +#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 +#define SYS_PMSCR_EL2_CX_SHIFT 3 +#define SYS_PMSCR_EL2_PA_SHIFT 4 +#define SYS_PMSCR_EL2_TS_SHIFT 5 +#define SYS_PMSCR_EL2_PCT_SHIFT 6 + +#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) + +#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) +#define SYS_PMSIRR_EL1_RND_SHIFT 0 +#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 +#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL + +/* Filtering controls */ +#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) +#define SYS_PMSFCR_EL1_FE_SHIFT 0 +#define SYS_PMSFCR_EL1_FT_SHIFT 1 +#define SYS_PMSFCR_EL1_FL_SHIFT 2 +#define SYS_PMSFCR_EL1_B_SHIFT 16 +#define SYS_PMSFCR_EL1_LD_SHIFT 17 +#define SYS_PMSFCR_EL1_ST_SHIFT 18 + +#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) +#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL + +#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) +#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 + +/* Buffer controls */ +#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) +#define SYS_PMBLIMITR_EL1_E_SHIFT 0 +#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 +#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL +#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) + +#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) + +/* Buffer error reporting */ +#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) +#define SYS_PMBSR_EL1_COLL_SHIFT 16 +#define SYS_PMBSR_EL1_S_SHIFT 17 +#define SYS_PMBSR_EL1_EA_SHIFT 18 +#define SYS_PMBSR_EL1_DL_SHIFT 19 +#define SYS_PMBSR_EL1_EC_SHIFT 26 +#define SYS_PMBSR_EL1_EC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) +#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) +#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) + +#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 +#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 +#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) + +/*** End of Statistical Profiling Extension ***/ + +#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) +#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) + +#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) +#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) + +#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0) +#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1) +#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2) +#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3) +#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7) + +#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) +#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1) + +#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0) +#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1) +#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2) +#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3) +#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n) +#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0) +#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1) +#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2) +#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3) +#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n) +#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0) +#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1) +#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2) +#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3) +#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) +#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3) +#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) +#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6) +#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7) +#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) +#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) +#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2) +#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3) +#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) +#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) +#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) +#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) + +#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) +#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) + +#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) + +#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) +#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) +#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) + +#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) + +#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) +#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) + +#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) +#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) +#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) +#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3) +#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4) +#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5) +#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6) +#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) +#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0) +#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1) +#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2) +#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0) +#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3) + +#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) +#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) + +#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) + +#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) +#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) +#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) + +#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1) +#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2) + +#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) +#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) +#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) + +#define __PMEV_op2(n) ((n) & 0x7) +#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) +#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n)) +#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3)) +#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n)) + +#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) + +#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) +#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) +#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) +#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) +#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) +#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0) +#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) +#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) +#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) + +#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) +#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) +#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0) +#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1) +#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2) +#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3) + +#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) +#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0) +#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1) +#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2) +#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3) + +#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) +#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) +#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) +#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) +#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) +#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) +#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5) +#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) + +#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x) +#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0) +#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1) +#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2) +#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3) +#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4) +#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5) +#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6) +#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7) + +#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x) +#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0) +#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1) +#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2) +#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3) +#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4) +#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5) +#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) +#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) + +/* VHE encodings for architectural EL0/1 system registers */ +#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) +#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) +#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) +#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) +#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) +#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) +#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) +#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) +#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) +#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) +#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) +#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) +#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) +#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) +#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) +#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) +#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) +#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) +#define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2) +#define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0) +#define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1) +#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) + +/* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_DSSBS (BIT(44)) +#define SCTLR_ELx_ENIA (BIT(31)) +#define SCTLR_ELx_ENIB (BIT(30)) +#define SCTLR_ELx_ENDA (BIT(27)) +#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_IESB (BIT(21)) +#define SCTLR_ELx_WXN (BIT(19)) +#define SCTLR_ELx_ENDB (BIT(13)) +#define SCTLR_ELx_I (BIT(12)) +#define SCTLR_ELx_SA (BIT(3)) +#define SCTLR_ELx_C (BIT(2)) +#define SCTLR_ELx_A (BIT(1)) +#define SCTLR_ELx_M (BIT(0)) + +#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ + SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB) + +/* SCTLR_EL2 specific flags. */ +#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ + (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ + (BIT(29))) + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ENDIAN_SET_EL2 SCTLR_ELx_EE +#else +#define ENDIAN_SET_EL2 0 +#endif + +/* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_UCI (BIT(26)) +#define SCTLR_EL1_E0E (BIT(24)) +#define SCTLR_EL1_SPAN (BIT(23)) +#define SCTLR_EL1_NTWE (BIT(18)) +#define SCTLR_EL1_NTWI (BIT(16)) +#define SCTLR_EL1_UCT (BIT(15)) +#define SCTLR_EL1_DZE (BIT(14)) +#define SCTLR_EL1_UMA (BIT(9)) +#define SCTLR_EL1_SED (BIT(8)) +#define SCTLR_EL1_ITD (BIT(7)) +#define SCTLR_EL1_CP15BEN (BIT(5)) +#define SCTLR_EL1_SA0 (BIT(4)) + +#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | (BIT(29))) + +#define ENDIAN_SET_EL1 0 + +#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ + SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\ + SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ + SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ + ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) + +/* id_aa64isar0 */ +#define ID_AA64ISAR0_TS_SHIFT 52 +#define ID_AA64ISAR0_FHM_SHIFT 48 +#define ID_AA64ISAR0_DP_SHIFT 44 +#define ID_AA64ISAR0_SM4_SHIFT 40 +#define ID_AA64ISAR0_SM3_SHIFT 36 +#define ID_AA64ISAR0_SHA3_SHIFT 32 +#define ID_AA64ISAR0_RDM_SHIFT 28 +#define ID_AA64ISAR0_ATOMICS_SHIFT 20 +#define ID_AA64ISAR0_CRC32_SHIFT 16 +#define ID_AA64ISAR0_SHA2_SHIFT 12 +#define ID_AA64ISAR0_SHA1_SHIFT 8 +#define ID_AA64ISAR0_AES_SHIFT 4 + +/* id_aa64isar1 */ +#define ID_AA64ISAR1_SB_SHIFT 36 +#define ID_AA64ISAR1_FRINTTS_SHIFT 32 +#define ID_AA64ISAR1_GPI_SHIFT 28 +#define ID_AA64ISAR1_GPA_SHIFT 24 +#define ID_AA64ISAR1_LRCPC_SHIFT 20 +#define ID_AA64ISAR1_FCMA_SHIFT 16 +#define ID_AA64ISAR1_JSCVT_SHIFT 12 +#define ID_AA64ISAR1_API_SHIFT 8 +#define ID_AA64ISAR1_APA_SHIFT 4 +#define ID_AA64ISAR1_DPB_SHIFT 0 + +#define ID_AA64ISAR1_APA_NI 0x0 +#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_API_NI 0x0 +#define ID_AA64ISAR1_API_IMP_DEF 0x1 +#define ID_AA64ISAR1_GPA_NI 0x0 +#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_GPI_NI 0x0 +#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 + +/* id_aa64pfr0 */ +#define ID_AA64PFR0_CSV3_SHIFT 60 +#define ID_AA64PFR0_CSV2_SHIFT 56 +#define ID_AA64PFR0_DIT_SHIFT 48 +#define ID_AA64PFR0_SVE_SHIFT 32 +#define ID_AA64PFR0_RAS_SHIFT 28 +#define ID_AA64PFR0_GIC_SHIFT 24 +#define ID_AA64PFR0_ASIMD_SHIFT 20 +#define ID_AA64PFR0_FP_SHIFT 16 +#define ID_AA64PFR0_EL3_SHIFT 12 +#define ID_AA64PFR0_EL2_SHIFT 8 +#define ID_AA64PFR0_EL1_SHIFT 4 +#define ID_AA64PFR0_EL0_SHIFT 0 + +#define ID_AA64PFR0_SVE 0x1 +#define ID_AA64PFR0_RAS_V1 0x1 +#define ID_AA64PFR0_FP_NI 0xf +#define ID_AA64PFR0_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_ASIMD_NI 0xf +#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 + +/* id_aa64pfr1 */ +#define ID_AA64PFR1_SSBS_SHIFT 4 + +#define ID_AA64PFR1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 + +/* id_aa64zfr0 */ +#define ID_AA64ZFR0_SM4_SHIFT 40 +#define ID_AA64ZFR0_SHA3_SHIFT 32 +#define ID_AA64ZFR0_BITPERM_SHIFT 16 +#define ID_AA64ZFR0_AES_SHIFT 4 +#define ID_AA64ZFR0_SVEVER_SHIFT 0 + +#define ID_AA64ZFR0_SM4 0x1 +#define ID_AA64ZFR0_SHA3 0x1 +#define ID_AA64ZFR0_BITPERM 0x1 +#define ID_AA64ZFR0_AES 0x1 +#define ID_AA64ZFR0_AES_PMULL 0x2 +#define ID_AA64ZFR0_SVEVER_SVE2 0x1 + +/* id_aa64mmfr0 */ +#define ID_AA64MMFR0_TGRAN4_SHIFT 28 +#define ID_AA64MMFR0_TGRAN64_SHIFT 24 +#define ID_AA64MMFR0_TGRAN16_SHIFT 20 +#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 +#define ID_AA64MMFR0_SNSMEM_SHIFT 12 +#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 +#define ID_AA64MMFR0_ASID_SHIFT 4 +#define ID_AA64MMFR0_PARANGE_SHIFT 0 + +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 +#define ID_AA64MMFR0_PARANGE_48 0x5 +#define ID_AA64MMFR0_PARANGE_52 0x6 + +#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 + +/* id_aa64mmfr1 */ +#define ID_AA64MMFR1_PAN_SHIFT 20 +#define ID_AA64MMFR1_LOR_SHIFT 16 +#define ID_AA64MMFR1_HPD_SHIFT 12 +#define ID_AA64MMFR1_VHE_SHIFT 8 +#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 +#define ID_AA64MMFR1_HADBS_SHIFT 0 + +#define ID_AA64MMFR1_VMIDBITS_8 0 +#define ID_AA64MMFR1_VMIDBITS_16 2 + +/* id_aa64mmfr2 */ +#define ID_AA64MMFR2_FWB_SHIFT 40 +#define ID_AA64MMFR2_AT_SHIFT 32 +#define ID_AA64MMFR2_LVA_SHIFT 16 +#define ID_AA64MMFR2_IESB_SHIFT 12 +#define ID_AA64MMFR2_LSM_SHIFT 8 +#define ID_AA64MMFR2_UAO_SHIFT 4 +#define ID_AA64MMFR2_CNP_SHIFT 0 + +/* id_aa64dfr0 */ +#define ID_AA64DFR0_PMSVER_SHIFT 32 +#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 +#define ID_AA64DFR0_WRPS_SHIFT 20 +#define ID_AA64DFR0_BRPS_SHIFT 12 +#define ID_AA64DFR0_PMUVER_SHIFT 8 +#define ID_AA64DFR0_TRACEVER_SHIFT 4 +#define ID_AA64DFR0_DEBUGVER_SHIFT 0 + +#define ID_ISAR5_RDM_SHIFT 24 +#define ID_ISAR5_CRC32_SHIFT 16 +#define ID_ISAR5_SHA2_SHIFT 12 +#define ID_ISAR5_SHA1_SHIFT 8 +#define ID_ISAR5_AES_SHIFT 4 +#define ID_ISAR5_SEVL_SHIFT 0 + +#define MVFR0_FPROUND_SHIFT 28 +#define MVFR0_FPSHVEC_SHIFT 24 +#define MVFR0_FPSQRT_SHIFT 20 +#define MVFR0_FPDIVIDE_SHIFT 16 +#define MVFR0_FPTRAP_SHIFT 12 +#define MVFR0_FPDP_SHIFT 8 +#define MVFR0_FPSP_SHIFT 4 +#define MVFR0_SIMD_SHIFT 0 + +#define MVFR1_SIMDFMAC_SHIFT 28 +#define MVFR1_FPHP_SHIFT 24 +#define MVFR1_SIMDHP_SHIFT 20 +#define MVFR1_SIMDSP_SHIFT 16 +#define MVFR1_SIMDINT_SHIFT 12 +#define MVFR1_SIMDLS_SHIFT 8 +#define MVFR1_FPDNAN_SHIFT 4 +#define MVFR1_FPFTZ_SHIFT 0 + + +#define ID_AA64MMFR0_TGRAN4_SHIFT 28 +#define ID_AA64MMFR0_TGRAN64_SHIFT 24 +#define ID_AA64MMFR0_TGRAN16_SHIFT 20 + +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 + +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED + +/* + * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which + * are reserved by the SVE architecture for future expansion of the LEN + * field, with compatible semantics. + */ +#define ZCR_ELx_LEN_SHIFT 0 +#define ZCR_ELx_LEN_SIZE 9 +#define ZCR_ELx_LEN_MASK 0x1ff + +#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */ +#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ +#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) + + +/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ +#define SYS_MPIDR_SAFE_VAL (BIT(31)) + +#define S_FRAME_SIZE 0x110 + +/* 34 registers */ + +#define S_PSTATE 0x108 +#define S_PC 0x100 +#define S_SP 0xf8 +#define S_LR 0xf0 +#define S_X29 0xe8 +#define S_X28 0xe0 +#define S_X27 0xd8 +#define S_X26 0xd0 +#define S_X25 0xc8 +#define S_X24 0xc0 +#define S_X23 0xb8 +#define S_X22 0xb0 +#define S_X21 0xa8 +#define S_X20 0xa0 +#define S_X19 0x98 +#define S_X18 0x90 +#define S_X17 0x88 +#define S_X16 0x80 +#define S_X15 0x78 +#define S_X14 0x70 +#define S_X13 0x68 +#define S_X12 0x60 +#define S_X11 0x58 +#define S_X10 0x50 +#define S_X9 0x48 +#define S_X8 0x40 +#define S_X7 0x38 +#define S_X6 0x30 +#define S_X5 0x28 +#define S_X4 0x20 +#define S_X3 0x18 +#define S_X2 0x10 +#define S_X1 0x8 +#define S_X0 0x0 + +#ifdef __ASSEMBLY__ + +.macro current_cpu reg + mrs \reg, mpidr_el1 // read Multiprocessor ID register reg + and \reg, \reg, #0x3 // mask on CPU ID bits +.endm + +.macro curdom rd, tmp + + // Compute the address of the stack bottom where cpu_info is located. + ldr \rd, =(~(STACK_SIZE - 1)) + mov \tmp, sp + and \rd, \tmp, \rd + + // Get the address of the domain descriptor + ldr \rd, [\rd] +.endm + +.macro disable_irq + msr daifset, #2 // arch_local_irq_disable + nop +.endm + +.macro enable_irq + msr daifclr, #2 // arch_local_irq_enable + nop +.endm + + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + .equ .L__reg_num_x\num, \num + .endr + .equ .L__reg_num_xzr, 31 + + .macro mrs_s, rt, sreg + __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt)) + .endm + + .macro msr_s, sreg, rt + __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt)) + .endm + +#else + +static inline int cpu_mode(void) +{ + uint32_t el; + + asm volatile( + "mrs %0, CurrentEL" : "=r" (el) : : "memory", "cc"); + + return el; +} + +#define user_mode() (cpu_mode() == 0) + +#define __DEFINE_MRS_MSR_S_REGNUM \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ +" .equ .L__reg_num_x\\num, \\num\n" \ +" .endr\n" \ +" .equ .L__reg_num_xzr, 31\n" + +#define DEFINE_MRS_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro mrs_s, rt, sreg\n" \ + __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \ +" .endm\n" + +#define DEFINE_MSR_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro msr_s, sreg, rt\n" \ + __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \ +" .endm\n" + +#define UNDEFINE_MRS_S \ +" .purgem mrs_s\n" + +#define UNDEFINE_MSR_S \ +" .purgem msr_s\n" + +#define __mrs_s(v, r) \ + DEFINE_MRS_S \ +" mrs_s " v ", " __stringify(r) "\n" \ + UNDEFINE_MRS_S + +#define __msr_s(r, v) \ + DEFINE_MSR_S \ +" msr_s " __stringify(r) ", " v "\n" \ + UNDEFINE_MSR_S + +/* + * Unlike read_cpuid, calls to read_sysreg are never expected to be + * optimized away or replaced with synthetic values. + */ +#define read_sysreg(r) ({ \ + u64 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +/* + * The "Z" constraint normally means a zero immediate, but when combined with + * the "%x0" template means XZR. + */ +#define write_sysreg(v, r) do { \ + u64 __val = (u64)(v); \ + asm volatile("msr " __stringify(r) ", %x0" \ + : : "rZ" (__val)); \ +} while (0) + +/* + * For registers without architectural names, or simply unsupported by + * GAS. + */ +#define read_sysreg_s(r) ({ \ + u64 __val; \ + asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ + __val; \ +}) + +#define write_sysreg_s(v, r) do { \ + u64 __val = (u64)(v); \ + asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ +} while (0) + +/* + * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the + * set mask are set. Other bits are left as-is. + */ +#define sysreg_clear_set(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg(__scs_new, sysreg); \ +} while (0) + +/* + * PMR values used to mask/unmask interrupts. + * + * GIC priority masking works as follows: if an IRQ's priority is a higher value + * than the value held in PMR, that IRQ is masked. Lowering the value of PMR + * means masking more IRQs (or at least that the same IRQs remain masked). + * + * To mask interrupts, we clear the most significant bit of PMR. + * + * Some code sections either automatically switch back to PSR.I or explicitly + * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included + * in the the priority mask, it indicates that PSR.I should be set and + * interrupt disabling temporarily does not rely on IRQ priorities. + */ +#define GIC_PRIO_IRQON 0xe0 +#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) +#define GIC_PRIO_PSR_I_SET (1 << 4) + +static inline int smp_processor_id(void) { + int cpu; + + /* Read Multiprocessor ID register */ + asm volatile ("mrs %0, mpidr_el1": "=r" (cpu)); + + /* Mask out all but CPU ID bits */ + return (cpu & 0x3); +} + +static inline void local_irq_enable(void) +{ + asm volatile( + "msr daifclr, #2 // arch_local_irq_enable\n" + "nop" + ::: "memory"); + +} + +static inline void local_irq_disable(void) +{ + asm volatile( + "msr daifset, #2 // arch_local_irq_disable\n" + "nop" + ::: "memory"); + +} + +#define local_irq_save(x) \ + ({ \ + __asm__ __volatile__( \ + "mrs %0, daif" \ + : "=r" (x) : : "memory", "cc"); \ + }) + +static inline void local_irq_restore(uint32_t flags) +{ + asm volatile( + "msr daif, %0" + :: "r" (flags) : "memory", "cc"); +} + +/* + * Save the current interrupt enable state. + */ +static inline uint32_t local_save_flags(void) +{ + uint32_t flags; + asm volatile( + "mrs %0, daif" + : "=r" (flags) : : "memory", "cc"); + + return flags; +} + + +#define local_irq_is_enabled() \ + ({ unsigned long flags; \ + flags = local_save_flags(); \ + !(flags & PSR_I_BIT); \ +}) + +#define local_irq_is_disabled() \ + (!local_irq_is_enabled()) + +static inline void cpu_relax(void) +{ + asm volatile("yield" ::: "memory"); +} + +typedef struct cpu_regs { + u64 x0; + u64 x1; + u64 x2; + u64 x3; + u64 x4; + u64 x5; + u64 x6; + u64 x7; + u64 x8; + u64 x9; + u64 x10; + u64 x11; + u64 x12; + u64 x13; + u64 x14; + u64 x15; + u64 x16; + u64 x17; + u64 x18; + u64 x19; + u64 x20; + u64 x21; + u64 x22; + u64 x23; + u64 x24; + u64 x25; + u64 x26; + u64 x27; + u64 x28; + u64 fp; + u64 lr; + u64 sp; + u64 pc; + u64 pstate; +} cpu_regs_t; + +typedef struct cpu_sys_regs { + u64 vksp; + u64 vusp; +} cpu_sys_regs_t; + + +struct vcpu_guest_context; +struct domain; + +void __switch_to(struct domain *prev, struct domain *next); +void ret_to_user(void); +void pre_ret_to_user(void); + +void cpu_do_idle(void); + +#endif /* __ASSEMBLY__ */ + + +#endif /* PROCESSOR_H */ diff --git a/so3/arch/arm64/include/asm/setup.h b/so3/arch/arm64/include/asm/setup.h new file mode 100644 index 000000000..c1acccd02 --- /dev/null +++ b/so3/arch/arm64/include/asm/setup.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef ASM_SETUP_H +#define ASM_SETUP_H + +#define L_TEXT_OFFSET 0x80000 + +/* + * Memory map description + */ +#define NR_BANKS 8 + +struct meminfo { + int nr_banks; + unsigned long end; + struct { + unsigned long start; + unsigned long size; + int node; + } bank[NR_BANKS]; +}; + +extern struct meminfo meminfo; +extern void *cpu1_stack, *cpu2_stack, *cpu3_stack; + +void setup_arch(void); +void cpu_init(void); + +#endif /* ASM_SETUP_H */ diff --git a/so3/arch/arm64/include/asm/spinlock.h b/so3/arch/arm64/include/asm/spinlock.h new file mode 100644 index 000000000..1f23bee2d --- /dev/null +++ b/so3/arch/arm64/include/asm/spinlock.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef ASM_SPINLOCK_H +#define ASM_SPINLOCK_H + +#include + +typedef struct { + volatile unsigned int lock; +} raw_spinlock_t; + +#define _RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock __attribute__((__packed__)); +} raw_rwlock_t; + + +/* + * ARMv6 Spin-locking. + * + * We exclusively read the old value. If it is zero, we may have + * won the lock, so we try exclusively storing it. A memory barrier + * is required after we get a lock, and before we release it, because + * V6 CPUs are assumed to have weakly ordered memory. + * + * Unlocked value: 0 + * Locked value: 1 + */ + +#define _raw_spin_is_locked(x) ((x)->lock != 0) +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +extern void _raw_spin_lock(raw_spinlock_t *lock); +extern void _raw_spin_unlock(raw_spinlock_t *lock); +extern int _raw_spin_trylock(raw_spinlock_t *lock); + +#if 0 +static inline int _raw_spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( +" ldrex %0, [%1]\n" +" teq %0, #0\n" +" strexeq %0, %2, [%1]" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "cc"); + + if (tmp == 0) { + smp_mb(); + return 1; + } else { + return 0; + } +} +#endif + +#endif /* ASM_SPINLOCK_H */ diff --git a/so3/arch/arm64/include/asm/syscall.h b/so3/arch/arm64/include/asm/syscall.h new file mode 100644 index 000000000..5c4e7c5a4 --- /dev/null +++ b/so3/arch/arm64/include/asm/syscall.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * Copyright (C) 2017 Xavier Ruppen + * Copyright (C) 2017 Alexandre Malki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef ASM_ARM_SYSCALL_H +#define ASM_ARM_SYSCALL_H + +#define SYSINFO_DUMP_HEAP 0 +#define SYSINFO_DUMP_SCHED 1 +#define SYSINFO_TEST_MALLOC 2 +#define SYSINFO_PRINTK 3 +#define SYSINFO_DUMP_PROC 4 + +/* + * Syscall number definition + */ + +#define SYSCALL_EXIT 1 +#define SYSCALL_EXECVE 2 +#define SYSCALL_WAITPID 3 +#define SYSCALL_READ 4 +#define SYSCALL_WRITE 5 +#define SYSCALL_FORK 7 +#define SYSCALL_PTRACE 8 +#define SYSCALL_READDIR 9 +#define SYSCALL_OPEN 14 +#define SYSCALL_CLOSE 15 +#define SYSCALL_THREAD_CREATE 16 +#define SYSCALL_THREAD_JOIN 17 +#define SYSCALL_THREAD_EXIT 18 +#define SYSCALL_PIPE 19 +#define SYSCALL_IOCTL 20 +#define SYSCALL_FCNTL 21 +#define SYSCALL_DUP 22 +#define SYSCALL_DUP2 23 + +#define SYSCALL_STAT 34 +#define SYSCALL_GETPID 37 + +#define SYSCALL_THREAD_YIELD 40 + +#define SYSCALL_SBRK 45 +#define SYSCALL_SIGACTION 46 +#define SYSCALL_KILL 47 +#define SYSCALL_SIGRETURN 48 + +#define SYSCALL_LSEEK 50 + +#define SYSCALL_MUTEX_LOCK 60 +#define SYSCALL_MUTEX_UNLOCK 61 + +#define SYSCALL_NANOSLEEP 70 + +#define SYSCALL_SYSINFO 99 + +#ifndef __ASSEMBLY__ + +#include +#include + +int syscall_handle(uint32_t, uint32_t, uint32_t, uint32_t); + +void set_errno(uint32_t val); +#endif /* __ASSEMBLY__ */ + +#endif /* ASM_ARM_SYSCALL_H */ diff --git a/so3/arch/arm64/include/asm/types.h b/so3/arch/arm64/include/asm/types.h new file mode 100644 index 000000000..0018bd5a4 --- /dev/null +++ b/so3/arch/arm64/include/asm/types.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __ASM_ARM_TYPES_H +#define __ASM_ARM_TYPES_H + +typedef unsigned short umode_t; + +#define BITS_PER_INT 32 + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +typedef __signed__ long __s64; +typedef unsigned long __u64; + + +#endif diff --git a/so3/arch/arm64/include/asm/utils.h b/so3/arch/arm64/include/asm/utils.h new file mode 100644 index 000000000..828b86cb3 --- /dev/null +++ b/so3/arch/arm64/include/asm/utils.h @@ -0,0 +1,56 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, + * Aneesh V + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#ifndef _UTILS_H_ +#define _UTILS_H_ + +static inline s32 log_2_n_round_up(u32 n) +{ + s32 log2n = -1; + u32 temp = n; + + while (temp) { + log2n++; + temp >>= 1; + } + + if (n & (n - 1)) + return log2n + 1; /* not power of 2 - round up */ + else + return log2n; /* power of 2 */ +} + +static inline s32 log_2_n_round_down(u32 n) +{ + s32 log2n = -1; + u32 temp = n; + + while (temp) { + log2n++; + temp >>= 1; + } + + return log2n; +} + +#endif diff --git a/so3/arch/arm64/include/asm/vfp.h b/so3/arch/arm64/include/asm/vfp.h new file mode 100644 index 000000000..33bc3a15d --- /dev/null +++ b/so3/arch/arm64/include/asm/vfp.h @@ -0,0 +1,50 @@ +/* + * linux/include/asm-arm/vfp.h + * + * VFP register definitions. + * First, the standard VFP set. + */ + +#ifndef VFP_H +#define VFP_H + +#define FPEXC_EX (1u << 31) +#define FPEXC_EN (1u << 30) +#define FPEXC_FP2V (1u << 28) + +#define MVFR0_A_SIMD_MASK (0xf << 0) + +#define FPSID_IMPLEMENTER_BIT (24) +#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT) +#define FPSID_ARCH_BIT (16) +#define FPSID_ARCH_MASK (0xf << FPSID_ARCH_BIT) +#define FPSID_PART_BIT (8) +#define FPSID_PART_MASK (0xff << FPSID_PART_BIT) +#define FPSID_VARIANT_BIT (4) +#define FPSID_VARIANT_MASK (0xf << FPSID_VARIANT_BIT) +#define FPSID_REV_BIT (0) +#define FPSID_REV_MASK (0xf << FPSID_REV_BIT) + +#ifndef __ASSEMBLY__ +struct vfp_state { + uint64_t fpregs1[16]; /* {d0-d15} */ + uint64_t fpregs2[16]; /* {d16-d31} */ + uint32_t fpexc; + uint32_t fpscr; + + /* VFP implementation specific state */ + uint32_t fpinst; + uint32_t fpinst2; +}; + +struct vcpu; +void vfp_save_state(struct domain *v); +void vfp_restore_state(struct domain *v); +void vfp_enable(void); + + +#endif /* __ASSEMBLY__ */ + + + +#endif /* VFP_H */ diff --git a/so3/arch/arm64/include/asm/virt.h b/so3/arch/arm64/include/asm/virt.h new file mode 100644 index 000000000..84ba6c4f0 --- /dev/null +++ b/so3/arch/arm64/include/asm/virt.h @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef VIRT_H +#define VIRT_H + +#include + +/* + * The arm64 hcall implementation uses x0 to specify the hcall + * number. A value less than HVC_STUB_HCALL_NR indicates a special + * hcall, such as set vector. Any other value is handled in a + * hypervisor specific way. + * + * The hypercall is allowed to clobber any of the caller-saved + * registers (x0-x18), so it is advisable to use it through the + * indirection of a function call (as implemented in hyp-stub.S). + */ + +/* + * HVC_SET_VECTORS - Set the value of the vbar_el2 register. + * + * @x1: Physical address of the new vector table. + */ +#define HVC_SET_VECTORS 0 + +/* + * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine. + */ +#define HVC_SOFT_RESTART 1 + +/* + * HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs + */ +#define HVC_RESET_VECTORS 2 + +/* Max number of HYP stub hypercalls */ +#define HVC_STUB_HCALL_NR 3 + +/* Error returned when an invalid stub number is passed into x0 */ +#define HVC_STUB_ERR 0xbadca11 + +#define BOOT_CPU_MODE_EL1 (0xe11) +#define BOOT_CPU_MODE_EL2 (0xe12) + +/* Current Exception Level values, as contained in CurrentEL */ +#define CurrentEL_EL1 (1 << 2) +#define CurrentEL_EL2 (2 << 2) + +/* Hyp Debug Configuration Register bits */ +#define MDCR_EL2_TPMS (1 << 14) +#define MDCR_EL2_E2PB_MASK (UL(0x3)) +#define MDCR_EL2_E2PB_SHIFT (UL(12)) +#define MDCR_EL2_TDRA (1 << 11) +#define MDCR_EL2_TDOSA (1 << 10) +#define MDCR_EL2_TDA (1 << 9) +#define MDCR_EL2_TDE (1 << 8) +#define MDCR_EL2_HPME (1 << 7) +#define MDCR_EL2_TPM (1 << 6) +#define MDCR_EL2_TPMCR (1 << 5) +#define MDCR_EL2_HPMN_MASK (0x1F) + +/* Hyp Coprocessor Trap Register */ +#define CPTR_EL2_TCPAC (1 << 31) +#define CPTR_EL2_TTA (1 << 20) +#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) +#define CPTR_EL2_TZ (1 << 8) +#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */ +#define CPTR_EL2_DEFAULT CPTR_EL2_RES1 + +/* Hyp Configuration Register (HCR) bits */ +#define HCR_FWB (UL(1) << 46) +#define HCR_API (UL(1) << 41) +#define HCR_APK (UL(1) << 40) +#define HCR_TEA (UL(1) << 37) +#define HCR_TERR (UL(1) << 36) +#define HCR_TLOR (UL(1) << 35) +#define HCR_E2H (UL(1) << 34) +#define HCR_ID (UL(1) << 33) +#define HCR_CD (UL(1) << 32) +#define HCR_RW_SHIFT 31 +#define HCR_RW (UL(1) << HCR_RW_SHIFT) +#define HCR_TRVM (UL(1) << 30) +#define HCR_HCD (UL(1) << 29) +#define HCR_TDZ (UL(1) << 28) +#define HCR_TGE (UL(1) << 27) +#define HCR_TVM (UL(1) << 26) +#define HCR_TTLB (UL(1) << 25) +#define HCR_TPU (UL(1) << 24) +#define HCR_TPC (UL(1) << 23) +#define HCR_TSW (UL(1) << 22) +#define HCR_TAC (UL(1) << 21) +#define HCR_TIDCP (UL(1) << 20) +#define HCR_TSC (UL(1) << 19) +#define HCR_TID3 (UL(1) << 18) +#define HCR_TID2 (UL(1) << 17) +#define HCR_TID1 (UL(1) << 16) +#define HCR_TID0 (UL(1) << 15) +#define HCR_TWE (UL(1) << 14) +#define HCR_TWI (UL(1) << 13) +#define HCR_DC (UL(1) << 12) +#define HCR_BSU (3 << 10) +#define HCR_BSU_IS (UL(1) << 10) +#define HCR_FB (UL(1) << 9) +#define HCR_VSE (UL(1) << 8) +#define HCR_VI (UL(1) << 7) +#define HCR_VF (UL(1) << 6) +#define HCR_AMO (UL(1) << 5) +#define HCR_IMO (UL(1) << 4) +#define HCR_FMO (UL(1) << 3) +#define HCR_PTW (UL(1) << 2) +#define HCR_SWIO (UL(1) << 1) +#define HCR_VM (UL(1) << 0) + +/* + * The bits we set in HCR: + * TLOR: Trap LORegion register accesses + * RW: 64bit by default, can be overridden for 32bit VMs + * TAC: Trap ACTLR + * TSC: Trap SMC + * TVM: Trap VM ops (until M+C set in SCTLR_EL1) + * TSW: Trap cache operations by set/way + * TWE: Trap WFE + * TWI: Trap WFI + * TIDCP: Trap L2CTLR/L2ECTLR + * BSU_IS: Upgrade barriers to the inner shareable domain + * FB: Force broadcast of all maintainance operations + * AMO: Override CPSR.A and enable signaling with VA + * IMO: Override CPSR.I and enable signaling with VI + * FMO: Override CPSR.F and enable signaling with VF + * SWIO: Turn set/way invalidates into set/way clean+invalidate + */ +#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ + HCR_FMO | HCR_IMO) +#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) +#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) + + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * __boot_cpu_mode records what mode CPUs were booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * In this case, both 32bit halves of __boot_cpu_mode will contain the + * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2). + * + * Should the bootloader fail to do this, the two values will be different. + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern u32 __boot_cpu_mode[2]; + +void __hyp_set_vectors(phys_addr_t phys_vector_base); +void __hyp_reset_vectors(void); + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ + return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && + __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ + return __boot_cpu_mode[0] != __boot_cpu_mode[1]; +} + +static inline bool is_kernel_in_hyp_mode(void) +{ + return read_sysreg(CurrentEL) == CurrentEL_EL2; +} + +static inline bool has_vhe(void) +{ + if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) + return true; + + return false; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* ! VIRT_H */ diff --git a/so3/arch/arm64/lib/Makefile b/so3/arch/arm64/lib/Makefile new file mode 100644 index 000000000..d59463ef5 --- /dev/null +++ b/so3/arch/arm64/lib/Makefile @@ -0,0 +1,3 @@ + +obj-y += strchr.o + diff --git a/so3/arch/arm64/lib/strchr.S b/so3/arch/arm64/lib/strchr.S new file mode 100644 index 000000000..fd772f2d3 --- /dev/null +++ b/so3/arch/arm64/lib/strchr.S @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Based on arch/arm/lib/strchr.S + * + * Copyright (C) 1995-2000 Russell King + * Copyright (C) 2013 ARM Ltd. + */ + +#include + +.global strchr + +/* + * Find the first occurrence of a character in a string. + * + * Parameters: + * x0 - str + * x1 - c + * Returns: + * x0 - address of first occurrence of 'c' or 0 + */ +strchr: + and w1, w1, #0xff +1: ldrb w2, [x0], #1 + cmp w2, w1 + ccmp w2, wzr, #4, ne + b.ne 1b + sub x0, x0, #1 + cmp w2, w1 + csel x0, x0, xzr, eq + ret diff --git a/so3/arch/arm64/mmu.c b/so3/arch/arm64/mmu.c new file mode 100644 index 000000000..dfa13c190 --- /dev/null +++ b/so3/arch/arm64/mmu.c @@ -0,0 +1,582 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#if 0 +#define DEBUG +#endif + +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include + + +void get_current_addrspace(addrspace_t *addrspace) { + int cpu; + + cpu = smp_processor_id(); +#if 0 + /* Get the current state of MMU */ + addrspace->ttbr0[cpu] = READ_CP32(TTBR0_32); + addrspace->pgtable_paddr = addrspace->ttbr0[cpu] & TTBR0_BASE_ADDR_MASK; +#endif +} + +/* + * Check if two address space are identical regarding the MMU configuration. + */ +bool is_addrspace_equal(addrspace_t *addrspace1, addrspace_t *addrspace2) { + return (addrspace1->pgtable_paddr == addrspace2->pgtable_paddr); +} + +static void alloc_init_l3(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys, bool nocache) +{ + u64 *l0pte, *l1pte, *l2pte, *l3pte; + u64 *l3pgtable; + + /* These PTEs must exist. */ + l0pte = l0pte_offset(l0pgtable, addr); + BUG_ON(!*l0pte); + + l1pte = l1pte_offset(l0pte, addr); + BUG_ON(!*l1pte); + + do { + l2pte = l2pte_offset(l1pte, addr); + + /* L2 page table already exist? */ + if (!*l2pte) { + + /* A L1 table must be created */ + l3pgtable = (u64 *) memalign(TTB_L3_SIZE, PAGE_SIZE); + BUG_ON(!l3pgtable); + + memset(l3pgtable, 0, TTB_L3_SIZE); + + /* Attach the L1 PTE to this L2 page table */ + *l2pte = __pa((addr_t) l3pgtable) & TTB_L2_TABLE_ADDR_MASK; + + set_pte_table(l2pte, DCACHE_WRITEALLOC); + + DBG("Allocating a L3 page table at %p in l1pte: %p with contents: %lx\n", l3pgtable, l2pte, *l2pte); + } + + l3pte = l3pte_offset(l2pte, addr); + + *l3pte = phys & TTB_L3_PAGE_ADDR_MASK; + + set_pte_page(l3pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC)); + + DBG("Allocating a 4 KB page at l2pte: %p content: %lx\n", l3pte, *l3pte); + + flush_pte_entry(addr, l3pte); + + phys += PAGE_SIZE; + addr += PAGE_SIZE; + + + } while (addr != end); + +} + +static void alloc_init_l2(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys, bool nocache) +{ + u64 *l0pte, *l1pte, *l2pte; + u64 *l2pgtable; + addr_t next; + + /* We are sure this pte exists */ + l0pte = l0pte_offset(l0pgtable, addr); + BUG_ON(!*l0pte); + + do { + l1pte = l1pte_offset(l0pte, addr); + + /* L1 page table already exist? */ + if (!*l1pte) { + + /* A L1 table must be created */ + l2pgtable = (u64 *) memalign(TTB_L2_SIZE, PAGE_SIZE); + BUG_ON(!l2pgtable); + + memset(l2pgtable, 0, TTB_L2_SIZE); + + /* Attach the L1 PTE to this L2 page table */ + *l1pte = __pa((addr_t) l2pgtable) & TTB_L1_TABLE_ADDR_MASK; + + set_pte_table(l1pte, DCACHE_WRITEALLOC); + + DBG("Allocating a L2 page table at %p in l1pte: %p with contents: %lx\n", l2pgtable, l1pte, *l1pte); + } + + l2pte = l2pte_offset(l1pte, addr); + + /* Get the next address to the boundary of a 2 GB block. + * - is <= 1 GB (L1) + */ + next = l2_addr_end(addr, end); + + if (((addr | next | phys) & ~BLOCK_2M_MASK) == 0) { + + *l2pte = phys & TTB_L2_BLOCK_ADDR_MASK; + + set_pte_block(l2pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC)); + + DBG("Allocating a 2 MB block at l2pte: %p content: %lx\n", l2pte, *l2pte); + + flush_pte_entry(addr, l2pte); + + phys += SZ_2M; + addr += SZ_2M; + + } else { + alloc_init_l3(l0pgtable, addr, next, phys, nocache); + phys += next - addr; + addr = next; + } + + } while (addr != end); + +} + +static void alloc_init_l1(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys, bool nocache) +{ + u64 *l0pte, *l1pte; + u64 *l1pgtable; + addr_t next; + + do { + l0pte = l0pte_offset(l0pgtable, addr); + + /* L1 page table already exist? */ + if (!*l0pte) { + + /* A L1 table must be created */ + l1pgtable = (u64 *) memalign(TTB_L1_SIZE, PAGE_SIZE); + BUG_ON(!l1pgtable); + + memset(l1pgtable, 0, TTB_L1_SIZE); + + /* Attach the L0 PTE to this L1 page table */ + *l0pte = __pa((addr_t) l1pgtable) & TTB_L0_TABLE_ADDR_MASK; + + set_pte_table(l0pte, DCACHE_WRITEALLOC); + + DBG("Allocating a L1 page table at %p in l0pte: %p with contents: %lx\n", l1pgtable, l0pte, *l0pte); + } + + l1pte = l1pte_offset(l0pte, addr); + + /* Get the next address to the boundary of a 1 GB block. + * - is <= 256 GB (L0) + */ + next = l1_addr_end(addr, end); + + if (((addr | next | phys) & ~BLOCK_1G_MASK) == 0) { + + *l1pte = phys & TTB_L1_BLOCK_ADDR_MASK; + + set_pte_block(l1pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC)); + + DBG("Allocating a 1 GB block at l1pte: %p content: %lx\n", l1pte, *l1pte); + + flush_pte_entry(addr, l1pte); + + phys += SZ_1G; + addr += SZ_1G; + + } else { + alloc_init_l2(l0pgtable, addr, next, phys, nocache); + phys += next - addr; + addr = next; + } + + } while (addr != end); + +} + +/* + * Create a static mapping between a virtual range and a physical range + * @l0pgtable refers to the level 0 page table - if NULL, the system page table is used + * @virt_base is the virtual address considered for this mapping + * @phys_base is the physical address to be mapped + * @size is the number of bytes to be mapped + * @nocache is true if no cache (TLB) must be used (typically for I/O) + * + * This function tries to do the minimal mapping, i.e. using a number of page tables as low as possible, depending + * on the granularity of mapping. In such a configuration, the function tries to map 1 GB first, then 2 MB, and finally 4 KB. + * Mapping of blocks at L0 level is not allowed with 4 KB granule (AArch64). + * + */ +void create_mapping(u64 *l0pgtable, addr_t virt_base, addr_t phys_base, size_t size, bool nocache) { + addr_t addr, end, length, next; + + /* If l0pgtable is NULL, we consider the system page table */ + if (l0pgtable == NULL) + l0pgtable = __sys_l0pgtable; + + BUG_ON(!size); + + addr = virt_base & PAGE_MASK; + length = ALIGN_UP(size + (virt_base & ~PAGE_MASK), PAGE_SIZE); + + end = addr + length; + + do { + next = l0_addr_end(addr, end); + + alloc_init_l1(l0pgtable, addr, next, phys_base, nocache); + + phys_base += next - addr; + addr = next; + + } while (addr != end); + + mmu_page_table_flush((addr_t) l0pgtable, (addr_t) (l0pgtable + TTB_L0_ENTRIES)); +} + +void release_mapping(u64 *pgtable, addr_t virt_base, addr_t size) { + +#if 0 + uint32_t addr, end, length, next; + uint32_t *l1pte; + + /* If l1pgtable is NULL, we consider the system page table */ + if (pgtable == NULL) + pgtable = __sys_l1pgtable; + + addr = virt_base & PAGE_MASK; + length = ALIGN_UP(size + (virt_base & ~PAGE_MASK), PAGE_SIZE); + + l1pte = l1pte_offset(pgtable, addr); + + end = addr + length; + + do { + next = l1sect_addr_end(addr, end); + + free_l1_mapping(l1pte, addr, next); + + addr = next; + + } while (l1pte++, addr != end); +#endif +} + +/* + * Allocate a new L1 page table. Return NULL if it fails. + * The page table must be 16-KB aligned. + */ +u64 *new_sys_pgtable(void) { + u64 *pgtable; + + pgtable = memalign(TTB_L0_SIZE, PAGE_SIZE); + if (!pgtable) { + printk("%s: heap overflow...\n", __func__); + kernel_panic(); + } + + /* Empty the page table */ + memset(pgtable, 0, TTB_L0_SIZE); + + return pgtable; +} + +void set_current_pgtable(uint64_t *pgtable) { + addrspace_t __addrspace; + + __addrspace.ttbr1[smp_processor_id()] = __pa(pgtable); + mmu_switch(&__addrspace); +} + +/** + * Replace the current page table with a new one. This is used + * typically during the initialization to have a better granulated + * memory mapping. + * + * @param pgtable + */ +void replace_current_pgtable_with(uint64_t *pgtable) { + addrspace_t __addrspace; + + /* + * Switch to the temporary page table in order to re-configure the original system page table + * Warning !! After the switch, we do not have any mapped I/O until the driver core gets initialized. + */ + + set_current_pgtable(pgtable); + + __addrspace.ttbr1[smp_processor_id()] = __pa(pgtable); + mmu_switch(&__addrspace); + + /* Re-configuring the original system page table */ + memcpy((void *) __sys_l0pgtable, (unsigned char *) pgtable, TTB_L1_SIZE); + + /* Finally, switch back to the original location of the system page table */ + set_current_pgtable(__sys_l0pgtable); +} + + +/* + * Initial configuration of system page table + */ +void mmu_configure(addr_t fdt_addr) { + + icache_disable(); + dcache_disable(); + + /* The initial page table is only set by CPU #0 (AGENCY_CPU). + * The secondary CPUs use the same page table. + */ + + if (smp_processor_id() == AGENCY_CPU) { + + /* Empty the page table */ + memset((void *) __sys_l0pgtable, 0, TTB_L0_SIZE); + memset((void *) __sys_idmap_l1pgtable, 0, TTB_L1_SIZE); + memset((void *) __sys_linearmap_l1pgtable, 0, TTB_L1_SIZE); + + /* Create an identity mapping of 1 GB on running kernel so that the kernel code can go ahead right after the MMU on */ + __sys_l0pgtable[l0pte_index(CONFIG_RAM_BASE)] = (u64) __sys_idmap_l1pgtable & TTB_L0_TABLE_ADDR_MASK; + set_pte_table(&__sys_l0pgtable[l0pte_index(CONFIG_RAM_BASE)], DCACHE_WRITEALLOC); + + __sys_idmap_l1pgtable[l1pte_index(CONFIG_RAM_BASE)] = CONFIG_RAM_BASE & TTB_L1_BLOCK_ADDR_MASK; + set_pte_block(&__sys_idmap_l1pgtable[l1pte_index(CONFIG_RAM_BASE)], DCACHE_WRITEALLOC); + + /* Create the mapping of the hypervisor code area. */ + + __sys_l0pgtable[l0pte_index(CONFIG_HYPERVISOR_VIRT_ADDR)] = (u64) __sys_linearmap_l1pgtable & TTB_L0_TABLE_ADDR_MASK; + set_pte_table(&__sys_l0pgtable[l0pte_index(CONFIG_HYPERVISOR_VIRT_ADDR)], DCACHE_WRITEALLOC); + + __sys_linearmap_l1pgtable[l1pte_index(CONFIG_HYPERVISOR_VIRT_ADDR)] = CONFIG_RAM_BASE & TTB_L1_BLOCK_ADDR_MASK; + set_pte_block(&__sys_linearmap_l1pgtable[l1pte_index(CONFIG_HYPERVISOR_VIRT_ADDR)], DCACHE_WRITEALLOC); + + /* Early mapping I/O for UART. Here, the UART is supposed to be in a different L1 entry than the RAM. */ + + __sys_idmap_l1pgtable[l1pte_index(UART_BASE)] = UART_BASE & TTB_L1_BLOCK_ADDR_MASK; + set_pte_block(&__sys_idmap_l1pgtable[l1pte_index(UART_BASE)], DCACHE_OFF); + } + + mmu_setup(__sys_l0pgtable); + + dcache_enable(); + icache_enable(); + + if (smp_processor_id() == AGENCY_CPU) { + __fdt_addr = (addr_t*) fdt_addr; + + /* The device tree is visible in the L_PAGE_OFFSET area */ + fdt_vaddr = (addr_t *) __lva(fdt_addr); + } +} + +#if 0 +/* + * Clear the L1 PTE used for mapping of a specific virtual address. + */ +void clear_l1pte(uint32_t *l1pgtable, uint32_t vaddr) { + uint32_t *l1pte; + + /* If l1pgtable is NULL, we consider the system page table */ + if (l1pgtable == NULL) + l1pgtable = __sys_l1pgtable; + + l1pte = l1pte_offset(l1pgtable, vaddr); + + *l1pte = 0; + + flush_pte_entry(l1pte); +} + +#endif + +/* + * Switch the MMU to a L0 page table. + * We *only* use ttbr1 when dealing with our hypervisor which is located in a kernel space area, + * i.e. starting with 0xffff.... So ttbr0 is not used as soon as the id mapping in the RAM + * is not necessary anymore. + */ +void mmu_switch(addrspace_t *aspace) { + flush_dcache_all(); + + __mmu_switch(aspace->ttbr1[smp_processor_id()]); + + invalidate_icache_all(); + __asm_invalidate_tlb_all(); + +} + +void dump_pgtable(u64 *l0pgtable) { + + u64 i, j, k, l; + u64 *l0pte, *l1pte, *l2pte, *l3pte; + + lprintk(" ***** Page table dump *****\n"); + + for (i = 0; i < TTB_L0_ENTRIES; i++) { + l0pte = l0pgtable + i; + if ((i != 0xe0) && *l0pte) { + + lprintk(" - L0 pte@%lx (idx %x) mapping %lx content: %lx\n", l0pgtable+i, i, i << TTB_I0_SHIFT, *l0pte); + BUG_ON(pte_type(l0pte) != PTE_TYPE_TABLE); + + /* Walking through the blocks/table entries */ + for (j = 0; j < TTB_L1_ENTRIES; j++) { + l1pte = ((u64 *) __va(*l0pte & TTB_L0_TABLE_ADDR_MASK)) + j; + if (*l1pte) { + if (pte_type(l1pte) == PTE_TYPE_TABLE) { + lprintk(" (TABLE) L1 pte@%lx (idx %x) mapping %lx content: %lx\n", l1pte, j, + (i << TTB_I0_SHIFT) + (j << TTB_I1_SHIFT), *l1pte); + + for (k = 0; k < TTB_L2_ENTRIES; k++) { + l2pte = ((u64 *) __va(*l1pte & TTB_L1_TABLE_ADDR_MASK)) + k; + if (*l2pte) { + if (pte_type(l2pte) == PTE_TYPE_TABLE) { + lprintk(" (TABLE) L2 pte@%lx (idx %x) mapping %lx content: %lx\n", l2pte, k, + (i << TTB_I0_SHIFT) + (j << TTB_I1_SHIFT) + (k << TTB_I2_SHIFT), *l2pte); + + for (l = 0; l < TTB_L3_ENTRIES; l++) { + l3pte = ((u64 *) __va(*l2pte & TTB_L2_TABLE_ADDR_MASK)) + l; + if (*l3pte) + lprintk(" (PAGE) L3 pte@%lx (idx %x) mapping %lx content: %lx\n", l3pte, l, + (i << TTB_I0_SHIFT) + (j << TTB_I1_SHIFT) + (k << TTB_I2_SHIFT) + (l << TTB_I3_SHIFT), *l3pte); + } + } else { + /* Necessary of BLOCK type */ + BUG_ON(pte_type(l2pte) != PTE_TYPE_BLOCK); + lprintk(" (PAGE) L2 pte@%lx (idx %x) mapping %lx content: %lx\n", l2pte, k, + (i << TTB_I0_SHIFT) + (j << TTB_I1_SHIFT) + (k << TTB_I2_SHIFT), *l2pte); + } + } + } + } else { + /* Necessary of BLOCK type */ + BUG_ON(pte_type(l1pte) != PTE_TYPE_BLOCK); + + lprintk(" (PAGE) L1 pte@%lx (idx %x) mapping %lx content: %lx\n", l1pte, j, (i << TTB_I0_SHIFT) + (j << TTB_I1_SHIFT), *l1pte); + } + } + } + + } + } +} + +#if 0 + +/* Duplicate the kernel area by doing a copy of L1 PTEs from the system page table */ +void pgtable_copy_kernel_area(uint32_t *l1pgtable) { + int i1; + + for (i1 = l1pte_index(L_PAGE_OFFSET); i1 < TTB_L1_ENTRIES; i1++) + l1pgtable[i1] = __sys_l1pgtable[i1]; + + mmu_page_table_flush((uint32_t) l1pgtable, (uint32_t) (l1pgtable + TTB_L1_ENTRIES)); +} + +void dump_pgtable(uint32_t *l1pgtable) { + + int i, j; + uint32_t *l1pte, *l2pte; + + lprintk(" ***** Page table dump *****\n"); + + for (i = 0; i < TTB_L1_ENTRIES; i++) { + l1pte = l1pgtable + i; + if (*l1pte) { + if (l1pte_is_sect(*l1pte)) + lprintk(" - L1 pte@%p (idx %x) mapping %x is section type content: %x\n", l1pgtable+i, i, i << (32 - TTB_L1_ORDER), *l1pte); + else + lprintk(" - L1 pte@%p (idx %x) is PT type content: %x\n", l1pgtable+i, i, *l1pte); + + if (!l1pte_is_sect(*l1pte)) { + + for (j = 0; j < 256; j++) { + l2pte = ((uint32_t *) __va(*l1pte & TTB_L1_PAGE_ADDR_MASK)) + j; + if (*l2pte) + lprintk(" - L2 pte@%p (i2=%x) mapping %x content: %x\n", l2pte, j, pte_index_to_vaddr(i, j), *l2pte); + } + } + } + } +} + +void dump_current_pgtable(void) { + dump_pgtable((uint32_t *) cpu_get_l1pgtable()); +} + +/* + * Get the physical address from a virtual address (valid for any virt. address). + * The function reads the page table(s). + */ +uint32_t virt_to_phys_pt(uint32_t vaddr) { + uint32_t *l1pte, *l2pte; + uint32_t offset; + addrspace_t current_addrspace; + + get_current_addrspace(¤t_addrspace); + + /* Get the L1 PTE. */ + l1pte = l1pte_offset((uint32_t *) current_addrspace.pgtable_vaddr, vaddr); + + offset = vaddr & ~PAGE_MASK; + BUG_ON(!*l1pte); + if (l1pte_is_sect(*l1pte)) { + + return (*l1pte & TTB_L1_SECT_ADDR_MASK) | offset; + + } else { + + l2pte = l2pte_offset(l1pte, vaddr); + + return (*l2pte & TTB_L2_ADDR_MASK) | offset; + } + +} + +void vectors_init(void) { + + extern char __stubs_start[], __stubs_end[]; + extern char __vectors_start[], __vectors_end[]; + void *vectors_page; + + memset(&pseudo_usr_mode, 0, NR_CPUS * sizeof(unsigned int)); + + /* Allocate a page for the vectors page */ + vectors_page = alloc_heap_page(); + BUG_ON(!vectors_page); + + create_mapping(NULL, VECTORS_BASE, virt_to_phys(vectors_page), PAGE_SIZE, false); + + memcpy(vectors_page, __vectors_start, __vectors_end - __vectors_start); + memcpy(vectors_page + 0x200, __stubs_start, __stubs_end - __stubs_start); + + flush_dcache_range((unsigned long) vectors_page, (unsigned long) vectors_page + PAGE_SIZE); + +} + +#endif diff --git a/so3/arch/arm64/rpi4_64/Kconfig b/so3/arch/arm64/rpi4_64/Kconfig new file mode 100644 index 000000000..47592aaef --- /dev/null +++ b/so3/arch/arm64/rpi4_64/Kconfig @@ -0,0 +1,10 @@ +config RAM_BASE + depends on RPI4_64 + hex "RAM Memory physical base address" + default 0x00000000 + +config RAM_SIZE + depends on RPI4_64 + hex "RAM memory size (Up to the hypervisor)" + default 0x3f000000 + diff --git a/so3/arch/arm64/rpi4_64/Makefile b/so3/arch/arm64/rpi4_64/Makefile new file mode 100644 index 000000000..b73d934f5 --- /dev/null +++ b/so3/arch/arm64/rpi4_64/Makefile @@ -0,0 +1,6 @@ +# +# Makefile +# + +obj-y += platsmp.o + diff --git a/so3/arch/arm64/rpi4_64/include/mach/gic.h b/so3/arch/arm64/rpi4_64/include/mach/gic.h new file mode 100644 index 000000000..a2955e255 --- /dev/null +++ b/so3/arch/arm64/rpi4_64/include/mach/gic.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2020 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MACH_GIC_H +#define MACH_GIC_H + +#define GIC_DIST_PHYS 0xff841000 +#define GIC_DIST_SIZE 0x1000 + +#define GIC_CPU_PHYS 0xff842000 +#define GIC_CPU_SIZE 0x1000 + + +#endif /* MACH_GIC_H */ + diff --git a/so3/arch/arm64/rpi4_64/include/mach/rpi4.h b/so3/arch/arm64/rpi4_64/include/mach/rpi4.h new file mode 100644 index 000000000..b54b589e4 --- /dev/null +++ b/so3/arch/arm64/rpi4_64/include/mach/rpi4.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2016,2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef RPI4_H +#define RPI4_H + +#define LOCAL_INTC_PHYS 0xff800000 +#define LOCAL_INTC_SIZE 0x100 + +/* + * From BCM2835 ARM peripherals documentation. + * + * Mini UART register offsets. + */ + +#define MU_IO_REG 0x00 +#define MU_IER_REG 0x04 +#define MU_IIR_REG 0x08 +#define MU_LCR_REG 0x0C +#define MU_MCR_REG 0x10 +#define MU_LSR_REG 0x14 +#define MU_MSR_REG 0x18 +#define MU_SCRATCH 0x1C +#define MU_CNTL_REG 0x20 +#define MU_STAT_REG 0x24 +#define MU_BAUD_REG 0x28 + +#define MU_IO_DATA (0xFF << 0) +#define MU_STAT_SP_AVAIL (1 << 0) +#define MU_STAT_RX_FIFO_FILL (0xF << 16) +#define MU_LSR_TX_EMPTY (1 << 5) +#define MU_LSR_DATA_READY (1 << 0) +/* TODO : all other bit offsets */ + + +/* + * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and + * the next 4 bits are the CPU's per-mailbox FIQ enables (which + * override the IRQ bits). + */ +#define LOCAL_MAILBOX_INT_CONTROL0 0x050 + +/* + * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and + * these bits are organized by mailbox number and then CPU number. We + * use mailbox 0 for IPIs. The mailbox's interrupt is raised while + * any bit is set. + */ +#define LOCAL_MAILBOX0_SET0 0x080 +#define LOCAL_MAILBOX3_SET0 0x08c + +/* Mailbox write-to-clear bits. */ +#define LOCAL_MAILBOX0_CLR0 0x0c0 +#define LOCAL_MAILBOX3_CLR0 0x0cc + + +#endif /* RPI4_H */ diff --git a/so3/arch/arm64/rpi4_64/include/mach/uart.h b/so3/arch/arm64/rpi4_64/include/mach/uart.h new file mode 100644 index 000000000..6eff940f8 --- /dev/null +++ b/so3/arch/arm64/rpi4_64/include/mach/uart.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2020 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MACH_UART_H +#define MACH_UART_H + +#define UART_BASE 0xfe215040 + +#endif /* MACH_UART_H */ + diff --git a/so3/arch/arm64/rpi4_64/platsmp.c b/so3/arch/arm64/rpi4_64/platsmp.c new file mode 100644 index 000000000..9a439372d --- /dev/null +++ b/so3/arch/arm64/rpi4_64/platsmp.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016,2017 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +extern void secondary_startup(void); + + +void smp_prepare_cpus(unsigned int max_cpus) +{ + /* Nothing to do for Rpi4 */ +} + +void smp_boot_secondary(unsigned int cpu) +{ + unsigned long secondary_startup_phys = (unsigned long) virt_to_phys((void *) secondary_startup); + void *intc_vaddr; + + printk("%s: booting CPU: %d...\n", __func__, cpu); + + intc_vaddr = ioremap(LOCAL_INTC_PHYS, LOCAL_INTC_SIZE); + + writel(secondary_startup_phys, intc_vaddr + LOCAL_MAILBOX3_SET0 + 16 * cpu); + + dsb(sy); + sev(); +} + +void smp_secondary_init(unsigned int cpu) { + /* Nothing to do for Rpi4 */ +} + diff --git a/so3/arch/arm64/setup.c b/so3/arch/arm64/setup.c new file mode 100644 index 000000000..db3108ac6 --- /dev/null +++ b/so3/arch/arm64/setup.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2014-2019 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* + * Low-level ARM-specific setup + */ + +#include + +#if 0 +extern unsigned char __irq_stack_start[]; + +/* To keep the original CPU ID so that we can avoid + * undesired activities running on another CPU. + */ +uint32_t origin_cpu; + +/* + * Only 3 32-bit fields are sufficient (see exception.S) + */ +struct stack { + u32 irq[3]; + u32 abt[3]; + u32 und[3]; + u32 fiq[3]; +}; + +struct stack stacks[NR_CPUS]; + +/* + * Setup exceptions stacks for all modes except SVC and USR + */ +void setup_exception_stacks(void) { + struct stack *stk = &stacks[smp_processor_id()]; + + /* Need to set the CPU in the different modes and back to SVC at the end */ + __asm__ ( + "msr cpsr_c, %1\n\t" + "add r14, %0, %2\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %3\n\t" + "add r14, %0, %4\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %5\n\t" + "add r14, %0, %6\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %7\n\t" + "add r14, %0, %8\n\t" + "mov sp, r14\n\t" + "msr cpsr_c, %9" + : + : "r" (stk), + "I" (PSR_F_BIT | PSR_I_BIT | PSR_MODE_IRQ), "I" (offsetof(struct stack, irq[0])), + "I" (PSR_F_BIT | PSR_I_BIT | PSR_MODE_ABT), "I" (offsetof(struct stack, abt[0])), + "I" (PSR_F_BIT | PSR_I_BIT | PSR_MODE_UND), "I" (offsetof(struct stack, und[0])), + "I" (PSR_F_BIT | PSR_I_BIT | PSR_MODE_FIQ), "I" (offsetof(struct stack, fiq[0])), + "I" (PSR_F_BIT | PSR_I_BIT | PSR_MODE_SVC) + : "r14"); + +} + +void arm_init_domains(void) +{ + u32 reg; + + reg = get_dacr(); + /* + * Set DOMAIN to client access so that all permissions + * set in pagetables are validated by the mmu. + */ + reg &= ~DOMAIN_MASK; + reg |= DOMAIN_MANAGER; + + set_dacr(reg); +} + +void cpu_init(void) { + /* Original boot CPU identification to prevent undesired activities on another CPU . */ + origin_cpu = smp_processor_id(); + + /* Set up the different stacks according to CPU mode */ + setup_exception_stacks(); +} +#endif + +/* + * Low-level initialization before the main boostrap process. + */ +void setup_arch(void) { +#if 0 + cpu_init(); + + vfp_enable(); + + /* A low-level UART should be initialized here so that subsystems initialization (like MMC) can already print out logs ... */ + + vectors_init(); + + /* + * Finally flush the caches and tlb to ensure that we're in a + * consistent state wrt the writebuffer. This also ensures that + * any write-allocated cache lines in the vector page are written + * back. After this point, we can start to touch devices again. + */ + flush_dcache_all(); +#endif +} diff --git a/so3/arch/arm64/so3.lds b/so3/arch/arm64/so3.lds new file mode 100644 index 000000000..c4c5856bf --- /dev/null +++ b/so3/arch/arm64/so3.lds @@ -0,0 +1,99 @@ +/* + * Linker script for so3 kernel + */ + +OUTPUT_ARCH(aarch64) +ENTRY(__start) + +/* The sizes of the stacks used by the application. */ + +/* + * SVC stack is used for kernel activities *and* for each newly created thread. + * Indeed a user thread (running in the user space) will also involve kernel activities + * with syscalls and other. + */ +SVC_STACK_SIZE = 32 * 1024; /* Initial system stack */ +HEAP_SIZE = 2 * 1024 * 1024; /* 2 MB dedicated to kernel heap */ + +SECTIONS +{ + /* We start at the same entry point than Linux */ + /* to avoid issues on boards like Raspberry Pi 4 */ + /* where some firmware code is placed at 0x00000000 with a size of 0x1000 */ + + . = ENTRY_ADDR + 0x80000; + + .head.text : + { + *(.head.text); + } + + .text : + { + *(.text*) + } + + . = ALIGN(4096); + .data : + { + *(.data*) + } + + . = ALIGN(4096); + + .bss : + { + __bss_start = .; + + *(.bss) + + . = ALIGN(4096); + + __per_cpu_start = .; + + *(.bss.percpu) + + . = ALIGN(128); + *(.bss.percpu.read_mostly) + + . = ALIGN(128); + + __per_cpu_data_end = .; + + . = ALIGN(128); + + __per_cpu_end = .; + + __bss_end = .; + } + + /* Here is the L0 system page table */ + . = ALIGN(4096); + + __sys_l0pgtable = .; + + /* The size of this page table is 4 KB */ + + . += 4096; + + /* Followed by the L1 system page table required for identity mapping */ + __sys_idmap_l1pgtable = .; + + . += 4096; + + /* Another L1 system page table required for the (first) linear mapping */ + __sys_linearmap_l1pgtable = .; + + . += 4096; + + .heap : + { + __heap_base_addr = . ; + + . += HEAP_SIZE; + } + + . = ALIGN(4096); + + __end = .; +} diff --git a/so3/arch/arm64/spinlock.S b/so3/arch/arm64/spinlock.S new file mode 100644 index 000000000..73f6c3577 --- /dev/null +++ b/so3/arch/arm64/spinlock.S @@ -0,0 +1,44 @@ +/* Borrowed from trusted-firmeware-a */ + +.global _raw_spin_lock +.global _raw_spin_unlock +.global _raw_spin_trylock + +/* + * Acquire lock using Compare and Swap instruction. + * + * Compare for 0 with acquire semantics, and swap 1. If failed to acquire, use + * load exclusive semantics to monitor the address and enter WFE. + * + * void spin_lock(spinlock_t *lock); + */ +_raw_spin_lock: + mov w2, #1 + sevl +l1: wfe +l2: ldaxr w1, [x0] + cbnz w1, l1 + stxr w1, w2, [x0] + cbnz w1, l2 + ret + +/* + * Release lock previously acquired by spin_lock. + * + * Use store-release to unconditionally clear the spinlock variable. + * Store operation generates an event to all cores waiting in WFE + * when address is monitored by the global monitor. + * + * void spin_unlock(spinlock_t *lock); + */ +_raw_spin_unlock: + stlr wzr, [x0] + ret + +/* + * Still to be written + */ +_raw_spin_trylock: + + ret + diff --git a/so3/arch/arm64/virt64/Kconfig b/so3/arch/arm64/virt64/Kconfig new file mode 100644 index 000000000..2ab1d916c --- /dev/null +++ b/so3/arch/arm64/virt64/Kconfig @@ -0,0 +1,12 @@ + +config RAM_BASE + depends on VIRT64 + hex "RAM Memory physical base address" + default 0x00000000 + +config RAM_SIZE + depends on VIRT64 + hex "RAM memory size (Up to the hypervisor)" + default 0x3f000000 + + diff --git a/so3/arch/arm64/virt64/include/mach/gic.h b/so3/arch/arm64/virt64/include/mach/gic.h new file mode 100644 index 000000000..0ac862aec --- /dev/null +++ b/so3/arch/arm64/virt64/include/mach/gic.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2020 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MACH_GIC_H +#define MACH_GIC_H + +#define GIC_DIST_PHYS 0x08000000 +#define GIC_DIST_SIZE 0x10000 + +#define GIC_CPU_PHYS 0x08010000 +#define GIC_CPU_SIZE 0x10000 + + +#endif /* MACH_GIC_H */ + diff --git a/so3/arch/arm64/virt64/include/mach/uart.h b/so3/arch/arm64/virt64/include/mach/uart.h new file mode 100644 index 000000000..46c889d7c --- /dev/null +++ b/so3/arch/arm64/virt64/include/mach/uart.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2020 Daniel Rossier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MACH_UART_H +#define MACH_UART_H + +#define UART_BASE 0x9000000 + +#endif /* MACH_UART_H */ + + diff --git a/so3/configs/rpi4_defconfig b/so3/configs/rpi4_defconfig index 5a87d0bb8..2ac259060 100644 --- a/so3/configs/rpi4_defconfig +++ b/so3/configs/rpi4_defconfig @@ -1,17 +1,16 @@ # -# Automatically generated make config: don't edit +# Automatically generated file; DO NOT EDIT. # SO3 Configuration # - -# -# General -# -CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-" +CONFIG_STANDALONE=y +CONFIG_ARCH_ARM32=y +# CONFIG_ARCH_ARM64 is not set +CONFIG_ARCH="arm32" +CONFIG_CROSS_COMPILE="arm-none-linux-gnueabihf-" # # Platform # -CONFIG_STANDALONE=y # CONFIG_VEXPRESS is not set CONFIG_RPI4=y CONFIG_RAM_BASE=0x00000000 @@ -72,10 +71,3 @@ CONFIG_IPC_PIPE=y # CONFIG_RTOS is not set CONFIG_MMU=y CONFIG_DEBUG_PRINTK=y - -# -# Generated files -# -CONFIG_ELF=y -CONFIG_BIN=y -# end of Generated files diff --git a/so3/configs/vexpress_fb_defconfig b/so3/configs/vexpress_fb_defconfig index b7bb72da0..454fcb675 100644 --- a/so3/configs/vexpress_fb_defconfig +++ b/so3/configs/vexpress_fb_defconfig @@ -2,17 +2,15 @@ # Automatically generated file; DO NOT EDIT. # SO3 Configuration # - -# -# General -# -CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-" -# end of General +CONFIG_STANDALONE=y +CONFIG_ARCH_ARM32=y +# CONFIG_ARCH_ARM64 is not set +CONFIG_ARCH="arm32" +CONFIG_CROSS_COMPILE="arm-none-linux-gnueabihf-" # # Platform # -CONFIG_STANDALONE=y CONFIG_VEXPRESS=y # CONFIG_RPI4 is not set CONFIG_RAM_BASE=0x80000000 @@ -75,10 +73,3 @@ CONFIG_IPC_PIPE=y # CONFIG_RTOS is not set CONFIG_MMU=y CONFIG_DEBUG_PRINTK=y - -# -# Generated files -# -CONFIG_ELF=y -CONFIG_BIN=y -# end of Generated files diff --git a/so3/configs/vexpress_full_defconfig b/so3/configs/vexpress_full_defconfig index afe451127..4bcc01c14 100644 --- a/so3/configs/vexpress_full_defconfig +++ b/so3/configs/vexpress_full_defconfig @@ -1,20 +1,21 @@ # -# Automatically generated make config: don't edit +# Automatically generated file; DO NOT EDIT. # SO3 Configuration # - -# -# General -# -CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-" +CONFIG_STANDALONE=y +CONFIG_ARCH_ARM32=y +# CONFIG_ARCH_ARM64 is not set +CONFIG_ARCH="arm32" +CONFIG_CROSS_COMPILE="arm-none-linux-gnueabihf-" # # Platform # -CONFIG_STANDALONE=y CONFIG_VEXPRESS=y # CONFIG_RPI4 is not set CONFIG_RAM_BASE=0x80000000 +# end of Platform + # CONFIG_THREAD_ENV is not set CONFIG_PROC_ENV=y @@ -24,6 +25,7 @@ CONFIG_PROC_ENV=y CONFIG_SCHED_RR=y # CONFIG_SCHED_PRIO is not set CONFIG_SCHED_FREQ_PREEMPTION=y +# end of SO3 Scheduling configuration # # Drivers @@ -43,11 +45,14 @@ CONFIG_GIC=y CONFIG_PL111_CLCD=y CONFIG_PL050_KMI=y CONFIG_SMC911X=y +# end of Drivers # # SO3 Applications # # CONFIG_APP_SAMPLE is not set +# end of SO3 Applications + CONFIG_KERNEL_VIRT_ADDR=0xc0000000 # @@ -57,18 +62,15 @@ CONFIG_FS_FAT=y # CONFIG_ROOTFS_NONE is not set # CONFIG_ROOTFS_MMC is not set CONFIG_ROOTFS_RAMDEV=y +# end of Filesystems # # IPC # CONFIG_IPC_SIGNAL=y CONFIG_IPC_PIPE=y +# end of IPC + # CONFIG_RTOS is not set CONFIG_MMU=y CONFIG_DEBUG_PRINTK=y - -# -# Generated files -# -CONFIG_ELF=y -CONFIG_BIN=y diff --git a/so3/configs/vexpress_mmc_defconfig b/so3/configs/vexpress_mmc_defconfig index 0aa815c10..1f2845cc1 100644 --- a/so3/configs/vexpress_mmc_defconfig +++ b/so3/configs/vexpress_mmc_defconfig @@ -1,20 +1,21 @@ # -# Automatically generated make config: don't edit +# Automatically generated file; DO NOT EDIT. # SO3 Configuration # - -# -# General -# -CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-" +CONFIG_STANDALONE=y +CONFIG_ARCH_ARM32=y +# CONFIG_ARCH_ARM64 is not set +CONFIG_ARCH="arm32" +CONFIG_CROSS_COMPILE="arm-none-linux-gnueabihf-" # # Platform # -CONFIG_STANDALONE=y CONFIG_VEXPRESS=y # CONFIG_RPI4 is not set CONFIG_RAM_BASE=0x80000000 +# end of Platform + # CONFIG_THREAD_ENV is not set CONFIG_PROC_ENV=y @@ -24,6 +25,7 @@ CONFIG_PROC_ENV=y CONFIG_SCHED_RR=y # CONFIG_SCHED_PRIO is not set CONFIG_SCHED_FREQ_PREEMPTION=y +# end of SO3 Scheduling configuration # # Drivers @@ -42,11 +44,14 @@ CONFIG_SP804=y CONFIG_GIC=y # CONFIG_PL111_CLCD is not set # CONFIG_PL050_KMI is not set +# end of Drivers # # SO3 Applications # # CONFIG_APP_SAMPLE is not set +# end of SO3 Applications + CONFIG_KERNEL_VIRT_ADDR=0xc0000000 # @@ -56,18 +61,15 @@ CONFIG_FS_FAT=y # CONFIG_ROOTFS_NONE is not set CONFIG_ROOTFS_MMC=y # CONFIG_ROOTFS_RAMDEV is not set +# end of Filesystems # # IPC # CONFIG_IPC_SIGNAL=y CONFIG_IPC_PIPE=y +# end of IPC + # CONFIG_RTOS is not set CONFIG_MMU=y CONFIG_DEBUG_PRINTK=y - -# -# Generated files -# -CONFIG_ELF=y -CONFIG_BIN=y diff --git a/so3/configs/vexpress_net_defconfig b/so3/configs/vexpress_net_defconfig index edf71bab6..e8683b875 100644 --- a/so3/configs/vexpress_net_defconfig +++ b/so3/configs/vexpress_net_defconfig @@ -1,20 +1,21 @@ # -# Automatically generated make config: don't edit +# Automatically generated file; DO NOT EDIT. # SO3 Configuration # - -# -# General -# -CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-" +CONFIG_STANDALONE=y +CONFIG_ARCH_ARM32=y +# CONFIG_ARCH_ARM64 is not set +CONFIG_ARCH="arm32" +CONFIG_CROSS_COMPILE="arm-none-linux-gnueabihf-" # # Platform # -CONFIG_STANDALONE=y CONFIG_VEXPRESS=y # CONFIG_RPI4 is not set CONFIG_RAM_BASE=0x80000000 +# end of Platform + # CONFIG_THREAD_ENV is not set CONFIG_PROC_ENV=y @@ -24,6 +25,7 @@ CONFIG_PROC_ENV=y CONFIG_SCHED_RR=y # CONFIG_SCHED_PRIO is not set CONFIG_SCHED_FREQ_PREEMPTION=y +# end of SO3 Scheduling configuration # # Drivers @@ -43,11 +45,14 @@ CONFIG_GIC=y # CONFIG_PL111_CLCD is not set # CONFIG_PL050_KMI is not set CONFIG_SMC911X=y +# end of Drivers # # SO3 Applications # # CONFIG_APP_SAMPLE is not set +# end of SO3 Applications + CONFIG_KERNEL_VIRT_ADDR=0xc0000000 # @@ -57,18 +62,15 @@ CONFIG_FS_FAT=y # CONFIG_ROOTFS_NONE is not set # CONFIG_ROOTFS_MMC is not set CONFIG_ROOTFS_RAMDEV=y +# end of Filesystems # # IPC # CONFIG_IPC_SIGNAL=y CONFIG_IPC_PIPE=y +# end of IPC + # CONFIG_RTOS is not set CONFIG_MMU=y CONFIG_DEBUG_PRINTK=y - -# -# Generated files -# -CONFIG_ELF=y -CONFIG_BIN=y diff --git a/so3/include/device/net.h b/so3/include/device/net.h index d99d68caa..d053f1ba3 100644 --- a/so3/include/device/net.h +++ b/so3/include/device/net.h @@ -34,7 +34,7 @@ struct eth_dev { char name[ETH_NAME_LEN]; unsigned char enetaddr[ARP_HLEN]; - phys_addr_t iobase; + addr_t iobase; int state; int (*init)(struct eth_dev *); diff --git a/so3/include/memory.h b/so3/include/memory.h index bfb7a47ec..4ec31fda1 100644 --- a/so3/include/memory.h +++ b/so3/include/memory.h @@ -121,9 +121,9 @@ uint32_t get_kernel_size(void); uint32_t *current_pgtable(void); void init_io_mapping(void); -uint32_t io_map(uint32_t phys, size_t size); -void io_unmap(uint32_t vaddr); -io_map_t *find_io_map_by_paddr(uint32_t paddr); +uint32_t io_map(addr_t phys, size_t size); +void io_unmap(addr_t vaddr); +io_map_t *find_io_map_by_paddr(addr_t paddr); void readjust_io_map(unsigned pfn_offset); void dump_io_maplist(void); diff --git a/so3/include/types.h b/so3/include/types.h index f408d8416..768ca31cb 100644 --- a/so3/include/types.h +++ b/so3/include/types.h @@ -192,7 +192,7 @@ typedef unsigned char bool; #define ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) #define ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) -typedef uint32_t phys_addr_t; +typedef unsigned long addr_t; /* * Fast implementation of tolower() for internal usage. Do not use in your diff --git a/so3/lib/vsprintf.c b/so3/lib/vsprintf.c index fa7a171bd..b49caca4c 100644 --- a/so3/lib/vsprintf.c +++ b/so3/lib/vsprintf.c @@ -964,8 +964,8 @@ static char *address_val(char *buf, char *end, const void *addr, switch (fmt[1]) { case 'p': default: - num = *(const phys_addr_t *)addr; - spec.field_width = sizeof(phys_addr_t) * 2 + 2; + num = *(const addr_t *)addr; + spec.field_width = sizeof(addr_t) * 2 + 2; break; } diff --git a/so3/mm/memory.c b/so3/mm/memory.c index 1566c3008..2dbfae839 100644 --- a/so3/mm/memory.c +++ b/so3/mm/memory.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014-2019 Daniel Rossier + * Copyright (C) 2014-2021 Daniel Rossier * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -283,7 +283,7 @@ void dump_io_maplist(void) { } /* Map a I/O address range to its physical range */ -uint32_t io_map(uint32_t phys, size_t size) { +uint32_t io_map(addr_t phys, size_t size) { io_map_t *io_map; struct list_head *pos; io_map_t *cur = NULL; @@ -348,9 +348,9 @@ uint32_t io_map(uint32_t phys, size_t size) { } /* - * Try to find an io_map entry corresponding to a specific pvaddr . + * Try to find an io_map entry corresponding to a specific paddr . */ -io_map_t *find_io_map_by_paddr(uint32_t paddr) { +io_map_t *find_io_map_by_paddr(addr_t paddr) { struct list_head *pos; io_map_t *io_map; @@ -366,7 +366,7 @@ io_map_t *find_io_map_by_paddr(uint32_t paddr) { /* * Remove a mapping. */ -void io_unmap(uint32_t vaddr) { +void io_unmap(addr_t vaddr) { io_map_t *cur = NULL; struct list_head *pos, *q; -- GitLab