rtsm_ve-cortex_a15x2.dtb(two cpus) to boot
guestos, it can't boot success!
Which resource I used as follows:
kvm-arm: git://github.com/virtualopensystems/linux-kvm-arm.git branch :kvm-arm-v12-vgic-timers
qemu: git://github.com/virtualopensystems/qemu.git branch :kvm-arm
guide: http://www.virtualopensystems.com/media/pdf/kvm-arm-guide.pdf
It is worth mentioning that i use the guest kernel which is same to host kernel,just like the guide said.
According to the guide,step by step,Qemu boot the guest with follow command line,that will be sucess!
./qemu-system-arm \
-enable-kvm \
-kernel zImage \
-sd guest.cramfs \
-dtb ./rtsm_ve-cortex_a15x1.dtb \
-m 512 -M vexpress-a15 -cpu cortex-a15 -nographic \
-append "console=ttyAMA0 mem=512M root=/dev/mmcblk0 rw"
Which resource I used as follows:
kvm-arm: git://github.com/virtualopensystems/linux-kvm-arm.git branch :kvm-arm-v12-vgic-timers
qemu: git://github.com/virtualopensystems/qemu.git branch :kvm-arm
guide: http://www.virtualopensystems.com/media/pdf/kvm-arm-guide.pdf
It is worth mentioning that i use the guest kernel which is same to host kernel,just like the guide said.
According to the guide,step by step,Qemu boot the guest with follow command line,that will be sucess!
./qemu-system-arm \
-enable-kvm \
-kernel zImage \
-sd guest.cramfs \
-dtb ./rtsm_ve-cortex_a15x1.dtb \
-m 512 -M vexpress-a15 -cpu cortex-a15 -nographic \
-append "console=ttyAMA0 mem=512M root=/dev/mmcblk0 rw"
But,I attempt to change the dtb file to rtsm_ve-cortex_a15x2.dtb which has two cpus ,there is
something wrong
happened! There is not any print info to help me find the problem.
I find that there is an end-less loop. depict as follow:
in the process of booting host :
init_hyp_mode ==> kvm_timer_hyp_init , this function enable physical virtual timer ppi, and
I find that there is an end-less loop. depict as follow:
in the process of booting host :
init_hyp_mode ==> kvm_timer_hyp_init , this function enable physical virtual timer ppi, and
register the irq handler:
kvm_arch_timer_handler
in the process of booting guest, some reasons make guest exit :
||===> VM_EXIT
|| ||
|| || save content module:
|| |--------------------------------------------------------------------------------------------------------------|
|| | save_timer_state (save hardware virtual timer,CNTV_CTL, |
|| | || CNTV_CVAL regiests to vcpu and disenable |
|| | || virtual timer) |
|| | || |
|| | kvm_timer_sync_from_cpu (software judge and decide to |
|| | || send virtual virtual timer ppi to guest or not) |
|| |-------------||------------------------------------------------------------------------------------------------|
|| ||
|| ||
|| ioctl(KVM_RUN)
|| ||
|| || restore content module:
|| |-----------------------------------------------------------------------------------------------------------------------|
|| | restore_timer_state (restore hardware virtual timer,CNTV_CTL, |
|| | || CNTV_CVAL regiests from vcpu) |
|| | || |
|| | GICD_ISPENDING (because hardware virtual timer has been enabled, and |
|| | || hostos has enabled the physical virtual timer ppi |
|| | || so there will be a physical virtual timer ppi pending |
|| | || for host) |
|| | || (host deal with the physical virtual timer ppi using |
|| | || handler kvm_arch_timer_handler, the handler do nothing, so that |
|| | || run here next time, GICD_ISPENDING must have the physical |
|| | || virtual timer ppi pending) |
|| | || |
|| | HCR register setting |
|| | || |
|| | || |
|| | ERET (want to exectue guest instructions,deal with virtual virtual |
|| | || timer ppi and read/write virtual timer registers to set next |
|| | || event timer,and only guest use and change virtual timer |
|| | || registers to set next event,but GICD_ISPENDING has pending irq , |
|| | || so next will trap to hyp_irq,that is VM_EXIT) |
|| |-------------||------------------------------------------------------------------ -------------------------------------|
|| ||
||<========||
Reasons are listed as follows:
(1) When storing the context, it will send(or not send) the virtual interrupt of virtual timer ppi to Guest
according to kvm_timer_sync_from_cpu.
(2) The physical virtual timer ppi is enabled by hostos, however, when restoring content , there must be
a physical virtual timer ppi pending, so that "eret" will trap to hyp_irq again immediately! this is the end-less loop.
The solution of the problem:
I think it is not necessary to enable the physical interrupt of virtual timer ppi in the function of
in the process of booting guest, some reasons make guest exit :
||===> VM_EXIT
|| ||
|| || save content module:
|| |--------------------------------------------------------------------------------------------------------------|
|| | save_timer_state (save hardware virtual timer,CNTV_CTL, |
|| | || CNTV_CVAL regiests to vcpu and disenable |
|| | || virtual timer) |
|| | || |
|| | kvm_timer_sync_from_cpu (software judge and decide to |
|| | || send virtual virtual timer ppi to guest or not) |
|| |-------------||------------------------------------------------------------------------------------------------|
|| ||
|| ||
|| ioctl(KVM_RUN)
|| ||
|| || restore content module:
|| |-----------------------------------------------------------------------------------------------------------------------|
|| | restore_timer_state (restore hardware virtual timer,CNTV_CTL, |
|| | || CNTV_CVAL regiests from vcpu) |
|| | || |
|| | GICD_ISPENDING (because hardware virtual timer has been enabled, and |
|| | || hostos has enabled the physical virtual timer ppi |
|| | || so there will be a physical virtual timer ppi pending |
|| | || for host) |
|| | || (host deal with the physical virtual timer ppi using |
|| | || handler kvm_arch_timer_handler, the handler do nothing, so that |
|| | || run here next time, GICD_ISPENDING must have the physical |
|| | || virtual timer ppi pending) |
|| | || |
|| | HCR register setting |
|| | || |
|| | || |
|| | ERET (want to exectue guest instructions,deal with virtual virtual |
|| | || timer ppi and read/write virtual timer registers to set next |
|| | || event timer,and only guest use and change virtual timer |
|| | || registers to set next event,but GICD_ISPENDING has pending irq , |
|| | || so next will trap to hyp_irq,that is VM_EXIT) |
|| |-------------||------------------------------------------------------------------ -------------------------------------|
|| ||
||<========||
Reasons are listed as follows:
(1) When storing the context, it will send(or not send) the virtual interrupt of virtual timer ppi to Guest
according to kvm_timer_sync_from_cpu.
(2) The physical virtual timer ppi is enabled by hostos, however, when restoring content , there must be
a physical virtual timer ppi pending, so that "eret" will trap to hyp_irq again immediately! this is the end-less loop.
The solution of the problem:
I think it is not necessary to enable the physical interrupt of virtual timer ppi in the function of
kvm_timer_hyp_init when hostos initializing
the hyp .
Signed-off-by: Wanghaibin<wanghaibin202@xxxxxxxxx>
---
arch/arm/kvm/timer.c | 27 ---------------------------
1 files changed, 0 insertions(+), 27 deletions(-)
diff --git a/arch/arm/kvm/timer.c b/arch/arm/kvm/timer.c
index a241298..a10ef3d 100644
--- a/arch/arm/kvm/timer.c
+++ b/arch/arm/kvm/timer.c
@@ -145,43 +145,16 @@ static const struct of_device_id arch_timer_of_match[] = {
int kvm_timer_hyp_init(void)
{
- struct device_node *np;
- unsigned int ppi;
- int err;
timecounter = arch_timer_get_timecounter();
if (!timecounter)
return -ENODEV;
- np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- kvm_err("kvm_arch_timer: can't find DT node\n");
- return -ENODEV;
- }
-
- ppi = irq_of_parse_and_map(np, 2);
- if (!ppi) {
- kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
- return -EINVAL;
- }
-
- err = request_percpu_irq(ppi, kvm_arch_timer_handler,
- "kvm guest timer", kvm_get_running_vcpus());
- if (err) {
- kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
- ppi, err);
- return err;
- }
-
wqueue = create_singlethread_workqueue("kvm_arch_timer");
if (!wqueue) {
- free_percpu_irq(ppi, kvm_get_running_vcpus());
return -ENOMEM;
}
- kvm_info("%s IRQ%d\n", np->name, ppi);
- on_each_cpu(kvm_timer_init_interrupt, &ppi, 1);
-
return 0;
}
Signed-off-by: Wanghaibin<wanghaibin202@xxxxxxxxx>
---
arch/arm/kvm/timer.c | 27 ---------------------------
1 files changed, 0 insertions(+), 27 deletions(-)
diff --git a/arch/arm/kvm/timer.c b/arch/arm/kvm/timer.c
index a241298..a10ef3d 100644
--- a/arch/arm/kvm/timer.c
+++ b/arch/arm/kvm/timer.c
@@ -145,43 +145,16 @@ static const struct of_device_id arch_timer_of_match[] = {
int kvm_timer_hyp_init(void)
{
- struct device_node *np;
- unsigned int ppi;
- int err;
timecounter = arch_timer_get_timecounter();
if (!timecounter)
return -ENODEV;
- np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- kvm_err("kvm_arch_timer: can't find DT node\n");
- return -ENODEV;
- }
-
- ppi = irq_of_parse_and_map(np, 2);
- if (!ppi) {
- kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
- return -EINVAL;
- }
-
- err = request_percpu_irq(ppi, kvm_arch_timer_handler,
- "kvm guest timer", kvm_get_running_vcpus());
- if (err) {
- kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
- ppi, err);
- return err;
- }
-
wqueue = create_singlethread_workqueue("kvm_arch_timer");
if (!wqueue) {
- free_percpu_irq(ppi, kvm_get_running_vcpus());
return -ENOMEM;
}
- kvm_info("%s IRQ%d\n", np->name, ppi);
- on_each_cpu(kvm_timer_init_interrupt, &ppi, 1);
-
return 0;
}
_______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm