1
0
Fork 0

virtio: last minute bugfixes

Minor bugfixes all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAl3U6E4PHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpi/oIAJwjsmGhheWTRa+vL/otHu1PjOQ4hziYwu/A
 n80gp3pmPMa4yV5CDZt72qL4jH8llXPpH4gErvXvSyKfrvmelkJBILB53mAa04pq
 BZWhkbcqldhMo35y3Ac+WcRk9zlkGq0NPuUcCb959h+pRXuZWtKWxQ2miwi413e1
 1ecBRO64SvXSphflFfMMCB730aIXC/dsZAtBXqs8v+i4cIz9g8Z4fZS6c38nUA23
 wBleNWylaxHn6UDRZAGRY12JFxFe3QxCP0RKTjwo0eF3HG7IZIGQKHnztjlqD4ko
 ZOQmxPYW2+96XnS8n7y+gA517u10OIVZBocD9FX10c3wvS+Lpxg=
 =KAuZ
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull last minute virtio bugfixes from Michael Tsirkin:
 "Minor bugfixes all over the place"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_balloon: fix shrinker count
  virtio_balloon: fix shrinker scan number of pages
  virtio_console: allocate inbufs in add_port() only if it is needed
  virtio_ring: fix return code on DMA mapping fails
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-11-23 13:02:18 -08:00
commit 6b8a794678
3 changed files with 28 additions and 24 deletions

View File

@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols; port->cons.ws.ws_col = cols;
} }
static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{ {
struct port_buffer *buf; struct port_buffer *buf;
unsigned int nr_added_bufs; int nr_added_bufs;
int ret; int ret;
nr_added_bufs = 0; nr_added_bufs = 0;
do { do {
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf) if (!buf)
break; return -ENOMEM;
spin_lock_irq(lock); spin_lock_irq(lock);
ret = add_inbuf(vq, buf); ret = add_inbuf(vq, buf);
if (ret < 0) { if (ret < 0) {
spin_unlock_irq(lock); spin_unlock_irq(lock);
free_buf(buf, true); free_buf(buf, true);
break; return ret;
} }
nr_added_bufs++; nr_added_bufs++;
spin_unlock_irq(lock); spin_unlock_irq(lock);
@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16]; char debugfs_name[16];
struct port *port; struct port *port;
dev_t devt; dev_t devt;
unsigned int nr_added_bufs;
int err; int err;
port = kmalloc(sizeof(*port), GFP_KERNEL); port = kmalloc(sizeof(*port), GFP_KERNEL);
@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock); spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue); init_waitqueue_head(&port->waitqueue);
/* Fill the in_vq with buffers so the host can send us data. */ /* We can safely ignore ENOSPC because it means
nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); * the queue already has buffers. Buffers are removed
if (!nr_added_bufs) { * only by virtcons_remove(), not by unplug_port()
*/
err = fill_queue(port->in_vq, &port->inbuf_lock);
if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n"); dev_err(port->dev, "Error allocating inbufs\n");
err = -ENOMEM;
goto free_device; goto free_device;
} }
@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) { if (multiport) {
unsigned int nr_added_bufs;
spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock); spin_lock_init(&portdev->c_ovq_lock);
nr_added_bufs = fill_queue(portdev->c_ivq, err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
&portdev->c_ivq_lock); if (err < 0) {
if (!nr_added_bufs) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"Error allocating buffers for control queue\n"); "Error allocating buffers for control queue\n");
/* /*
@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev)
VIRTIO_CONSOLE_DEVICE_READY, 0); VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */ /* Device was functional: we need full cleanup. */
virtcons_remove(vdev); virtcons_remove(vdev);
return -ENOMEM; return err;
} }
} else { } else {
/* /*

View File

@ -772,6 +772,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER; return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
} }
static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free)
{
return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
VIRTIO_BALLOON_PAGES_PER_PAGE;
}
static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free) unsigned long pages_to_free)
{ {
@ -782,11 +789,10 @@ static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
* VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
* multiple times to deflate pages till reaching pages_to_free. * multiple times to deflate pages till reaching pages_to_free.
*/ */
while (vb->num_pages && pages_to_free) { while (vb->num_pages && pages_freed < pages_to_free)
pages_freed += leak_balloon(vb, pages_to_free) / pages_freed += leak_balloon_pages(vb,
VIRTIO_BALLOON_PAGES_PER_PAGE; pages_to_free - pages_freed);
pages_to_free -= pages_freed;
}
update_balloon_size(vb); update_balloon_size(vb);
return pages_freed; return pages_freed;
@ -799,7 +805,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct virtio_balloon *vb = container_of(shrinker, struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker); struct virtio_balloon, shrinker);
pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; pages_to_free = sc->nr_to_scan;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
pages_freed = shrink_free_pages(vb, pages_to_free); pages_freed = shrink_free_pages(vb, pages_to_free);
@ -820,7 +826,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
unsigned long count; unsigned long count;
count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER; count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
return count; return count;
} }

View File

@ -583,7 +583,7 @@ unmap_release:
kfree(desc); kfree(desc);
END_USE(vq); END_USE(vq);
return -EIO; return -ENOMEM;
} }
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@ -1085,7 +1085,7 @@ unmap_release:
kfree(desc); kfree(desc);
END_USE(vq); END_USE(vq);
return -EIO; return -ENOMEM;
} }
static inline int virtqueue_add_packed(struct virtqueue *_vq, static inline int virtqueue_add_packed(struct virtqueue *_vq,