1
0
Fork 0

Drivers: hv: ring_buffer: remove code duplication from hv_ringbuffer_peek/read()

hv_ringbuffer_peek() does the same as hv_ringbuffer_read() without
advancing the read index. The only functional change this patch brings
is moving hv_need_to_signal_on_read() call under the ring_lock but this
function is just a couple of comparisons.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
steinar/wifi_calib_4_9_kernel
Vitaly Kuznetsov 2015-12-14 19:01:59 -08:00 committed by Greg Kroah-Hartman
parent 45870a4413
commit b5f53dde8d
1 changed files with 25 additions and 43 deletions

View File

@ -380,47 +380,9 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
return 0;
}
/* Read without advancing the read index. */
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
void *Buffer, u32 buflen)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
unsigned long flags;
spin_lock_irqsave(&Inring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(Inring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
return -EAGAIN;
}
/* Convert to byte offset */
next_read_location = hv_get_next_read_location(Inring_info);
next_read_location = hv_copyfrom_ringbuffer(Inring_info,
Buffer,
buflen,
next_read_location);
spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
return 0;
}
/* Read and advance the read index. */
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
u32 buflen, u32 offset, bool *signal)
static inline int __hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 offset,
bool *signal, bool advance)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
@ -452,6 +414,9 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
buflen,
next_read_location);
if (!advance)
goto out_unlock;
next_read_location = hv_copyfrom_ringbuffer(inring_info,
&prev_indices,
sizeof(u64),
@ -467,9 +432,26 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
out_unlock:
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return 0;
}
/* Read from ring buffer without advancing the read index. */
int hv_ringbuffer_peek(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen)
{
return __hv_ringbuffer_read(inring_info, buffer, buflen,
0, NULL, false);
}
/* Read from ring buffer and advance the read index. */
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 offset,
bool *signal)
{
return __hv_ringbuffer_read(inring_info, buffer, buflen,
offset, signal, true);
}