1
0
Fork 0

xen: branch for v5.12-rc1

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCYCu8dgAKCRCAXGG7T9hj
 vuxTAP0S1iJ6DR5Y2pdSy2dfxn/gItNqUlR7vbFdxgf/mBSNxAD/fxbtVWM1GuTs
 3Fwz0T60BcxsHZXhDcPAA2cjoqORbQs=
 =2b0M
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
 "A series of Xen related security fixes, all related to limited error
  handling in Xen backend drivers"

* tag 'for-linus-5.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-blkback: fix error handling in xen_blkbk_map()
  xen-scsiback: don't "handle" error by BUG()
  xen-netback: don't "handle" error by BUG()
  xen-blkback: don't "handle" error by BUG()
  xen/arm: don't ignore return errors from set_phys_to_machine
  Xen/gntdev: correct error checking in gntdev_map_grant_pages()
  Xen/gntdev: correct dev_bus_addr handling in gntdev_map_grant_pages()
  Xen/x86: also check kernel mapping in set_foreign_p2m_mapping()
  Xen/x86: don't bail early from clear_foreign_p2m_mapping()
master
Linus Torvalds 2021-02-21 13:06:08 -08:00
commit 4a037ad5d1
7 changed files with 53 additions and 46 deletions

View File

@ -95,8 +95,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
for (i = 0; i < count; i++) {
if (map_ops[i].status)
continue;
set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
return -ENOMEM;
}
}
return 0;

View File

@ -712,7 +712,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
unsigned long mfn, pfn;
/* Do not add to override if the map failed. */
if (map_ops[i].status)
if (map_ops[i].status != GNTST_okay ||
(kmap_ops && kmap_ops[i].status != GNTST_okay))
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
@ -750,17 +751,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
else
ret = -EINVAL;
goto out;
}
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
if (kunmap_ops)
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
kunmap_ops, count);
out:
kunmap_ops, count) ?: ret;
return ret;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);

View File

@ -794,8 +794,13 @@ again:
pages[i]->persistent_gnt = persistent_gnt;
} else {
if (gnttab_page_cache_get(&ring->free_pages,
&pages[i]->page))
goto out_of_memory;
&pages[i]->page)) {
gnttab_page_cache_put(&ring->free_pages,
pages_to_gnt,
segs_to_map);
ret = -ENOMEM;
goto out;
}
addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page;
pages[i]->persistent_gnt = NULL;
@ -811,10 +816,8 @@ again:
break;
}
if (segs_to_map) {
if (segs_to_map)
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
BUG_ON(ret);
}
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
@ -830,7 +833,7 @@ again:
gnttab_page_cache_put(&ring->free_pages,
&pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1;
ret |= !ret;
goto next;
}
pages[seg_idx]->handle = map[new_map_idx].handle;
@ -882,17 +885,18 @@ next:
}
segs_to_map = 0;
last_map = map_until;
if (map_until != num)
if (!ret && map_until != num)
goto again;
return ret;
out_of_memory:
pr_alert("%s: out of memory\n", __func__);
gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
for (i = last_map; i < num; i++)
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
if(i >= last_map + segs_to_map)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
return -ENOMEM;
}
return ret;
}
static int xen_blkbk_map_seg(struct pending_req *pending_req)

View File

@ -1343,13 +1343,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
return 0;
gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
if (nr_mops != 0) {
if (nr_mops != 0)
ret = gnttab_map_refs(queue->tx_map_ops,
NULL,
queue->pages_to_map,
nr_mops);
BUG_ON(ret);
}
work_done = xenvif_tx_submit(queue);

View File

@ -309,44 +309,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
* to the kernel linear addresses of the struct pages.
* These ptes are completely different from the user ptes dealt
* with find_grant_ptes.
* Note that GNTMAP_device_map isn't needed here: The
* dev_bus_addr output field gets consumed only from ->map_ops,
* and by not requesting it when mapping we also avoid needing
* to mirror dev_bus_addr into ->unmap_ops (and holding an extra
* reference to the page in the hypervisor).
*/
unsigned int flags = (map->flags & ~GNTMAP_device_map) |
GNTMAP_host_map;
for (i = 0; i < map->count; i++) {
unsigned long address = (unsigned long)
pfn_to_kaddr(page_to_pfn(map->pages[i]));
BUG_ON(PageHighMem(map->pages[i]));
gnttab_set_map_op(&map->kmap_ops[i], address,
map->flags | GNTMAP_host_map,
gnttab_set_map_op(&map->kmap_ops[i], address, flags,
map->grants[i].ref,
map->grants[i].domid);
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
map->flags | GNTMAP_host_map, -1);
flags, -1);
}
}
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
map->pages, map->count);
if (err)
return err;
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status) {
if (map->map_ops[i].status == GNTST_okay)
map->unmap_ops[i].handle = map->map_ops[i].handle;
else if (!err)
err = -EINVAL;
continue;
}
map->unmap_ops[i].handle = map->map_ops[i].handle;
if (use_ptemod)
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
else if (map->dma_vaddr) {
unsigned long bfn;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay)
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
else if (!err)
err = -EINVAL;
}
#endif
}
return err;
}

View File

@ -386,12 +386,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
return 0;
err = gnttab_map_refs(map, NULL, pg, cnt);
BUG_ON(err);
for (i = 0; i < cnt; i++) {
if (unlikely(map[i].status != GNTST_okay)) {
pr_err("invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
err = -ENOMEM;
if (!err)
err = -ENOMEM;
} else {
get_page(pg[i]);
}

View File

@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
map->flags = flags;
map->ref = ref;
map->dom = domid;
map->status = 1; /* arbitrary positive value */
}
static inline void