KVM: PPC: Add skip_page_out parameter to uvmem functions
Add 'skip_page_out' parameter to kvmppc_uvmem_drop_pages() so the callers can specify whetheter or not to skip paging out pages. This will be needed in a follow-on patch that implements H_SVM_INIT_ABORT hcall. Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
parent
e1bd0a7e24
commit
ce477a7a1c
|
@ -20,7 +20,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
|
||||||
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
|
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
|
||||||
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
|
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
|
||||||
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
||||||
struct kvm *kvm);
|
struct kvm *kvm, bool skip_page_out);
|
||||||
#else
|
#else
|
||||||
static inline int kvmppc_uvmem_init(void)
|
static inline int kvmppc_uvmem_init(void)
|
||||||
{
|
{
|
||||||
|
@ -69,6 +69,6 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
||||||
struct kvm *kvm) { }
|
struct kvm *kvm, bool skip_page_out) { }
|
||||||
#endif /* CONFIG_PPC_UV */
|
#endif /* CONFIG_PPC_UV */
|
||||||
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
|
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
|
||||||
|
|
|
@ -1102,7 +1102,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
|
||||||
unsigned int shift;
|
unsigned int shift;
|
||||||
|
|
||||||
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
|
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
|
||||||
kvmppc_uvmem_drop_pages(memslot, kvm);
|
kvmppc_uvmem_drop_pages(memslot, kvm, true);
|
||||||
|
|
||||||
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
|
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -5477,7 +5477,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
kvm_for_each_memslot(memslot, slots) {
|
kvm_for_each_memslot(memslot, slots) {
|
||||||
kvmppc_uvmem_drop_pages(memslot, kvm);
|
kvmppc_uvmem_drop_pages(memslot, kvm, true);
|
||||||
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
|
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,7 +258,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
|
||||||
* QEMU page table with normal PTEs from newly allocated pages.
|
* QEMU page table with normal PTEs from newly allocated pages.
|
||||||
*/
|
*/
|
||||||
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
||||||
struct kvm *kvm)
|
struct kvm *kvm, bool skip_page_out)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct kvmppc_uvmem_page_pvt *pvt;
|
struct kvmppc_uvmem_page_pvt *pvt;
|
||||||
|
@ -276,7 +276,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
||||||
|
|
||||||
uvmem_page = pfn_to_page(uvmem_pfn);
|
uvmem_page = pfn_to_page(uvmem_pfn);
|
||||||
pvt = uvmem_page->zone_device_data;
|
pvt = uvmem_page->zone_device_data;
|
||||||
pvt->skip_page_out = true;
|
pvt->skip_page_out = skip_page_out;
|
||||||
mutex_unlock(&kvm->arch.uvmem_lock);
|
mutex_unlock(&kvm->arch.uvmem_lock);
|
||||||
|
|
||||||
pfn = gfn_to_pfn(kvm, gfn);
|
pfn = gfn_to_pfn(kvm, gfn);
|
||||||
|
|
Loading…
Reference in a new issue