Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 82 additions & 49 deletions driver/hvisor.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,58 +64,91 @@ static int hvisor_finish_req(void) {
return 0;
}

// static int flush_cache(__u64 phys_start, __u64 size)
// {
// void __iomem *vaddr;
// int err = 0;

// size = PAGE_ALIGN(size);

// // 使用 ioremap 映射物理地址
// vaddr = ioremap_cache(phys_start, size);
// if (!vaddr) {
// pr_err("hvisor.ko: failed to ioremap image\n");
// return -ENOMEM;
// }
// Flush mapped cache range to memory.
static int flush_cache_mapped(void __iomem *vaddr, __u64 size) {
unsigned long start, end, addr, line_size;
size = PAGE_ALIGN(size);
start = (unsigned long)vaddr;
end = start + size;

#ifdef ARM64
asm volatile("mrs %0, ctr_el0" : "=r"(line_size));
line_size = 4 << ((line_size >> 16) & 0xf);
// Clean and Invalidate Data Cache to Point of Coherency (PoC)
addr = start & ~(line_size - 1);
while (addr < end) {
asm volatile("dc civac, %0" : : "r"(addr) : "memory");
addr += line_size;
}
// barrier, confirm operations are completed and other cores can see the
// changes.
asm volatile("dsb sy" : : : "memory");
#elif RISCV64
// TODO: implement riscv64 flush operation
#elif LOONGARCH64
// TODO: implement loongarch64 flush operation
#elif X86_64
// TODO: implement x86_64 flush operation
#else
pr_err("hvisor.ko: unsupported architecture\n");
#endif
return 0;
}

// // flush I-cache(ARM64 平台中 flush_icache_range 是对 D/I 的处理)
// flush_icache_range((unsigned long)vaddr, (unsigned long)vaddr + size);
static int hvisor_load_image(struct hvisor_load_image_args __user *arg) {
struct hvisor_load_image_args kargs;
void __iomem *vaddr = NULL;
__u64 map_phys;
__u64 page_offs;
__u64 map_size;
void __iomem *dst;
int ret = 0;

// // 解除映射
// iounmap(vaddr);
// return err;
// }
// static int flush_cache(__u64 phys_start, __u64 size)
// {
// struct vm_struct *vma;
// int err = 0;
// size = PAGE_ALIGN(size);
// vma = get_vm_area(size, VM_IOREMAP);
// if (!vma)
// {
// pr_err("hvisor.ko: failed to allocate virtual kernel memory for
// image\n"); return -ENOMEM;
// }
// vma->phys_addr = phys_start;

// if (ioremap_page_range((unsigned long)vma->addr, (unsigned
// long)(vma->addr + size), phys_start, PAGE_KERNEL_EXEC))
// {
// pr_err("hvisor.ko: failed to ioremap image\n");
// err = -EFAULT;
// goto unmap_vma;
// }
// // flush icache will also flush dcache
// flush_icache_range((unsigned long)(vma->addr), (unsigned long)(vma->addr
// + size));
if (copy_from_user(&kargs, arg, sizeof(kargs)))
return -EFAULT;

// unmap_vma:
// vunmap(vma->addr);
// return err;
// }
if (!kargs.user_buffer || !kargs.size)
return -EINVAL;

// Align to page boundary
map_phys = kargs.load_paddr & PAGE_MASK;
page_offs = kargs.load_paddr - map_phys;
if (kargs.size > U64_MAX - page_offs)
return -EINVAL;
map_size = PAGE_ALIGN(kargs.size + page_offs);
if (map_size < kargs.size)
return -EINVAL;

// Map the memory to kernel space
vaddr = ioremap_cache(map_phys, map_size);
if (!vaddr) {
return -ENOMEM;
}

dst = (void __iomem *)((char __iomem *)vaddr + page_offs);
if (copy_from_user((void __force *)dst, u64_to_user_ptr(kargs.user_buffer),
kargs.size)) {
ret = -EFAULT;
goto out;
}

// Clean D-cache to PoC first so new contents are visible globally.
ret = flush_cache_mapped(vaddr, map_size);
if (ret)
goto out;

// Then invalidate I-cache for the written image range.
flush_icache_range((unsigned long)dst, (unsigned long)dst + kargs.size);

out:
iounmap(vaddr);
return ret;
}

static int hvisor_zone_start(zone_config_t __user *arg) {
int err = 0;
int i = 0;

zone_config_t *zone_config = kmalloc(sizeof(zone_config_t), GFP_KERNEL);

if (zone_config == NULL) {
Expand All @@ -128,9 +161,6 @@ static int hvisor_zone_start(zone_config_t __user *arg) {
return -EFAULT;
}

// flush_cache(zone_config->kernel_load_paddr, zone_config->kernel_size);
// flush_cache(zone_config->dtb_load_paddr, zone_config->dtb_size);

pr_info("hvisor.ko: invoking hypercall to start the zone\n");

err = hvisor_call(HVISOR_HC_START_ZONE, __pa(zone_config),
Expand Down Expand Up @@ -244,6 +274,9 @@ static long hvisor_ioctl(struct file *file, unsigned int ioctl,
}
break;
}
case HVISOR_LOAD_IMAGE:
err = hvisor_load_image((struct hvisor_load_image_args __user *)arg);
break;
#ifdef LOONGARCH64
case HVISOR_CLEAR_INJECT_IRQ:
err = hvisor_call(HVISOR_HC_CLEAR_INJECT_IRQ, 0, 0);
Expand Down
8 changes: 8 additions & 0 deletions include/hvisor.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,14 @@ typedef struct ioctl_zone_list_args zone_list_args_t;
#define HVISOR_CONFIG_CHECK _IOR(1, 6, __u64 *)
#define HVISOR_SET_EVENTFD _IOW(1, 7, int)

/* Copy user buffer to [load_paddr, load_paddr + size) in kernel. */
struct hvisor_load_image_args {
__u64 user_buffer;
__u64 size;
__u64 load_paddr;
};
#define HVISOR_LOAD_IMAGE _IOW(1, 8, struct hvisor_load_image_args)

#define HVISOR_HC_INIT_VIRTIO 0
#define HVISOR_HC_FINISH_REQ 1
#define HVISOR_HC_START_ZONE 2
Expand Down
2 changes: 1 addition & 1 deletion tools/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ ifndef CROSS_COMPILE
else ifeq ($(ARCH), x86_64)
CROSS_COMPILE := x86_64-linux-$(LIBC)-
else
$(error "Unsupported architecture $(ARCH)")
$(error "Unsupported architecture $(ARCH)")
endif
endif

Expand Down
102 changes: 40 additions & 62 deletions tools/hvisor.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
#include <fcntl.h>
#include <getopt.h>
#include <inttypes.h>
#include <limits.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
Expand Down Expand Up @@ -123,74 +123,51 @@ static size_t parse_json_size(const cJSON *const json_str) {
return strtoull(json_str->valuestring, NULL, 16);
}

static __u64 load_str_to_memory(const char *str, __u64 load_paddr) {
__u64 size, page_size,
map_size; // Define variables: image size, page size, and map size
static __u64 load_buffer_to_memory(const void *buf, __u64 size,
__u64 load_paddr) {
int fd;
long page_size;
__u64 map_size;
struct hvisor_load_image_args args;

int fd = open_dev();
void *virt_addr;
if (size == 0) {
return 0;
}

size = strlen(str);
page_size = sysconf(_SC_PAGESIZE);
map_size = (size + page_size - 1) & ~(page_size - 1);

virt_addr = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
load_paddr);
map_size = (size + page_size - 1) & ~((__u64)page_size - 1);

if (virt_addr == MAP_FAILED) {
perror("Error mapping memory");
fd = open_dev();
args.user_buffer = (__u64)(uintptr_t)buf;
args.size = size;
args.load_paddr = load_paddr;
if (ioctl(fd, HVISOR_LOAD_IMAGE, &args) != 0) {
perror("load_buffer_to_memory: HVISOR_LOAD_IMAGE failed");
close(fd);
exit(1);
}

memmove(virt_addr, str, map_size);

munmap(virt_addr, map_size);

close(fd);

return map_size;
}

static __u64 load_str_to_memory(const char *str, __u64 load_paddr) {
/* Include trailing '\0' so guest side can safely parse cmdline. */
__u64 size = strlen(str) + 1;
return load_buffer_to_memory(str, size, load_paddr);
}

static __u64 load_image_to_memory(const char *path, __u64 load_paddr) {
if (strcmp(path, "null") == 0) {
return 0;
}
__u64 size, page_size,
map_size; // Define variables: image size, page size, and map size
int fd; // File descriptor
void *image_content,
*virt_addr; // Pointers to image content and virtual address

fd = open("/dev/mem", O_RDWR);
if (fd < 0) {
log_error("Failed to open /dev/mem!");
exit(1);
}
// Load image content into memory
image_content = read_file(path, (uint64_t *)&size);

page_size = sysconf(_SC_PAGESIZE);
map_size = (size + page_size - 1) & ~(page_size - 1);

// Map the physical memory to virtual memory
#ifdef LOONGARCH64
virt_addr = (__u64)mmap(NULL, map_size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED, fd, load_paddr);
#else
virt_addr = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
load_paddr);
#endif

if (virt_addr == MAP_FAILED) {
perror("Error mapping memory");
exit(1);
}

memmove(virt_addr, image_content, map_size);
__u64 size;
__u64 map_size;
void *image_content;

image_content = read_file((char *)path, (uint64_t *)&size);
map_size = load_buffer_to_memory(image_content, size, load_paddr);
free(image_content);
munmap(virt_addr, map_size);

close(fd);
return map_size;
}

Expand Down Expand Up @@ -651,6 +628,7 @@ static int zone_start_from_json(const char *json_config_path,
char *buffer = malloc(file_size + 1);
if (fread(buffer, 1, file_size, file) == 0) {
log_error("Error reading json file: %s", json_config_path);
fclose(file);
goto err_out;
}
fclose(file);
Expand Down Expand Up @@ -713,6 +691,15 @@ static int zone_start_from_json(const char *json_config_path,
cJSON *region = SAFE_CJSON_GET_ARRAY_ITEM(memory_regions_json, i);
memory_region_t *mem_region = &config->memory_regions[i];

mem_region->physical_start = strtoull(
SAFE_CJSON_GET_OBJECT_ITEM(region, "physical_start")->valuestring,
NULL, 16);
mem_region->virtual_start = strtoull(
SAFE_CJSON_GET_OBJECT_ITEM(region, "virtual_start")->valuestring,
NULL, 16);
mem_region->size = strtoull(
SAFE_CJSON_GET_OBJECT_ITEM(region, "size")->valuestring, NULL, 16);

const char *type_str =
SAFE_CJSON_GET_OBJECT_ITEM(region, "type")->valuestring;
if (strcmp(type_str, "ram") == 0) {
Expand All @@ -728,15 +715,6 @@ static int zone_start_from_json(const char *json_config_path,
goto err_out;
}

mem_region->physical_start = strtoull(
SAFE_CJSON_GET_OBJECT_ITEM(region, "physical_start")->valuestring,
NULL, 16);
mem_region->virtual_start = strtoull(
SAFE_CJSON_GET_OBJECT_ITEM(region, "virtual_start")->valuestring,
NULL, 16);
mem_region->size = strtoull(
SAFE_CJSON_GET_OBJECT_ITEM(region, "size")->valuestring, NULL, 16);

log_debug("memory_region %d: type %d, physical_start %llx, "
"virtual_start %llx, size %llx",
i, mem_region->type, mem_region->physical_start,
Expand Down
Loading