@@ -628,12 +628,12 @@ void kvm_riscv_cove_vcpu_destroy(struct kvm_vcpu *vcpu)
628628
629629int kvm_riscv_cove_vcpu_init (struct kvm_vcpu * vcpu )
630630{
631- int rc ;
632- struct kvm * kvm ;
633631 struct kvm_cove_tvm_vcpu_context * tvcpuc ;
634632 struct kvm_cove_tvm_context * tvmc ;
635- struct page * vcpus_page ;
636633 unsigned long vcpus_phys_addr ;
634+ struct page * vcpus_page ;
635+ struct kvm * kvm ;
636+ int rc ;
637637
638638 if (!vcpu )
639639 return - EINVAL ;
@@ -654,36 +654,38 @@ int kvm_riscv_cove_vcpu_init(struct kvm_vcpu *vcpu)
654654 if (!tvcpuc )
655655 return - ENOMEM ;
656656
657- vcpus_page = alloc_pages (GFP_KERNEL | __GFP_ZERO ,
658- get_order_num_pages (tinfo .tvcpu_pages_needed ));
659- if (!vcpus_page ) {
660- rc = - ENOMEM ;
661- goto alloc_page_failed ;
662- }
663-
664657 tvcpuc -> vcpu = vcpu ;
665658 tvcpuc -> vcpu_state .npages = tinfo .tvcpu_pages_needed ;
666- tvcpuc -> vcpu_state .page = vcpus_page ;
667- vcpus_phys_addr = page_to_phys (vcpus_page );
668659
669- rc = cove_convert_pages (vcpus_phys_addr , tvcpuc -> vcpu_state .npages , true);
670- if (rc )
671- goto convert_failed ;
660+ if (tinfo .tvcpu_pages_needed > 0 ) {
661+ vcpus_page = alloc_pages (GFP_KERNEL | __GFP_ZERO , get_order_num_pages (tinfo .tvcpu_pages_needed ));
662+ if (!vcpus_page ) {
663+ rc = - ENOMEM ;
664+ goto alloc_page_failed ;
665+ }
666+ tvcpuc -> vcpu_state .page = vcpus_page ;
667+ vcpus_phys_addr = page_to_phys (vcpus_page );
672668
673- rc = sbi_covh_create_tvm_vcpu ( tvmc -> tvm_guest_id , vcpu -> vcpu_idx , vcpus_phys_addr );
674- if (rc )
675- goto vcpu_create_failed ;
669+ rc = cove_convert_pages ( vcpus_phys_addr , tvcpuc -> vcpu_state . npages , true );
670+ if (rc )
671+ goto convert_failed ;
676672
673+ rc = sbi_covh_create_tvm_vcpu (tvmc -> tvm_guest_id , vcpu -> vcpu_idx , vcpus_phys_addr );
674+ if (rc )
675+ goto vcpu_create_failed ;
676+ }
677677 vcpu -> arch .tc = tvcpuc ;
678678
679679 return 0 ;
680680
681681vcpu_create_failed :
682682 /* Reclaim all the pages or return to the confidential page pool */
683- sbi_covh_tsm_reclaim_pages (vcpus_phys_addr , tvcpuc -> vcpu_state .npages );
683+ if (tinfo .tvcpu_pages_needed > 0 )
684+ sbi_covh_tsm_reclaim_pages (vcpus_phys_addr , tvcpuc -> vcpu_state .npages );
684685
685686convert_failed :
686- __free_pages (vcpus_page , get_order_num_pages (tinfo .tvcpu_pages_needed ));
687+ if (tinfo .tvcpu_pages_needed > 0 )
688+ __free_pages (vcpus_page , get_order_num_pages (tinfo .tvcpu_pages_needed ));
687689
688690alloc_page_failed :
689691 kfree (tvcpuc );
@@ -877,7 +879,7 @@ void kvm_riscv_cove_vm_destroy(struct kvm *kvm)
877879 kvm_err ("Memory reclaim failed with rc %d\n" , rc );
878880}
879881
880- int kvm_riscv_cove_vm_init (struct kvm * kvm )
882+ int kvm_riscv_cove_vm_multi_step_init (struct kvm * kvm )
881883{
882884 struct kvm_cove_tvm_context * tvmc ;
883885 struct page * tvms_page , * pgt_page ;
@@ -980,6 +982,64 @@ int kvm_riscv_cove_vm_init(struct kvm *kvm)
980982 return rc ;
981983}
982984
985+ int kvm_riscv_cove_vm_single_step_init (struct kvm_vcpu * vcpu , unsigned long fdt_address ,
986+ unsigned long tap_addr )
987+ {
988+ struct kvm_cpu_context * cp = & vcpu -> arch .guest_context ;
989+ unsigned long tvm_gid , target_vcpuid ;
990+ struct kvm_cove_tvm_context * tvmc ;
991+ struct kvm_vcpu * target_vcpu ;
992+ struct kvm * kvm = vcpu -> kvm ;
993+ void * nshmem = nacl_shmem ();
994+ int rc = 0 , gpr_id , offset ;
995+
996+ tvmc = kzalloc (sizeof (* tvmc ), GFP_KERNEL );
997+ if (!tvmc )
998+ return - ENOMEM ;
999+
1000+ for (gpr_id = 1 ; gpr_id < 32 ; gpr_id ++ ) {
1001+ offset = KVM_ARCH_GUEST_ZERO + gpr_id * sizeof (unsigned long );
1002+ nacl_shmem_gpr_write_cove (nshmem , offset ,
1003+ ((unsigned long * )cp )[gpr_id ]);
1004+ }
1005+ kvm_arch_vcpu_load (vcpu , smp_processor_id ());
1006+ rc = sbi_covh_tsm_promote_to_tvm (fdt_address , tap_addr , cp -> sepc , & tvm_gid );
1007+ if (rc )
1008+ goto done ;
1009+
1010+ INIT_LIST_HEAD (& tvmc -> measured_pages );
1011+ INIT_LIST_HEAD (& tvmc -> zero_pages );
1012+ INIT_LIST_HEAD (& tvmc -> shared_pages );
1013+ INIT_LIST_HEAD (& tvmc -> reclaim_pending_pages );
1014+
1015+ tvmc -> tvm_guest_id = tvm_gid ;
1016+ tvmc -> kvm = kvm ;
1017+ kvm -> arch .tvmc = tvmc ;
1018+
1019+ kvm_for_each_vcpu (target_vcpuid , target_vcpu , kvm ) {
1020+ rc = kvm_riscv_cove_vcpu_init (target_vcpu );
1021+ if (rc )
1022+ goto vcpus_allocated ;
1023+
1024+ target_vcpu -> requests = 0 ;
1025+ if (target_vcpu -> vcpu_idx != 0 )
1026+ kvm_riscv_vcpu_power_off (target_vcpu );
1027+ }
1028+
1029+ tvmc -> finalized_done = true;
1030+ kvm_info ("Guest VM creation successful with guest id %lx\n" , tvm_gid );
1031+ return 0 ;
1032+
1033+ vcpus_allocated :
1034+ kvm_for_each_vcpu (target_vcpuid , target_vcpu , kvm )
1035+ if (target_vcpu -> arch .tc )
1036+ kfree (target_vcpu -> arch .tc );
1037+
1038+ done :
1039+ kfree (tvmc );
1040+ return rc ;
1041+ }
1042+
9831043int kvm_riscv_cove_init (void )
9841044{
9851045 int rc ;
0 commit comments