1
0
Fork 0

Drivers: hv: vmbus: Move some ring buffer functions to hyperv.h

In preparation for implementing APIs for in-place consumption of VMBUS
packets, movve some ring buffer functionality into hyperv.h

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
K. Y. Srinivasan 2016-04-02 17:59:50 -07:00 committed by Greg Kroah-Hartman
parent 5cc472477f
commit 687f32e6d9
2 changed files with 54 additions and 55 deletions

View File

@ -84,52 +84,6 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
return false;
}
/*
* To optimize the flow management on the send-side,
* when the sender is blocked because of lack of
* sufficient space in the ring buffer, potential the
* consumer of the ring buffer can signal the producer.
* This is controlled by the following parameters:
*
* 1. pending_send_sz: This is the size in bytes that the
* producer is trying to send.
* 2. The feature bit feat_pending_send_sz set to indicate if
* the consumer of the ring will signal when the ring
* state transitions from being full to a state where
* there is room for the producer to send the pending packet.
*/
static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
{
u32 cur_write_sz;
u32 pending_sz;
/*
* Issue a full memory barrier before making the signaling decision.
* Here is the reason for having this barrier:
* If the reading of the pend_sz (in this function)
* were to be reordered and read before we commit the new read
* index (in the calling function) we could
* have a problem. If the host were to set the pending_sz after we
* have sampled pending_sz and go to sleep before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
return false;
cur_write_sz = hv_get_bytes_to_write(rbi);
if (cur_write_sz >= pending_sz)
return true;
return false;
}
/* Get the next write location for the specified ring buffer. */
static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
@ -180,15 +134,6 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->read_index = next_read_location;
}
/* Get the start of the ring buffer. */
static inline void *
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
{
return (void *)ring_info->ring_buffer->buffer;
}
/* Get the size of the ring buffer. */
static inline u32
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)

View File

@ -1366,4 +1366,58 @@ extern __u32 vmbus_proto_version;
int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
static inline void *
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
{
return (void *)ring_info->ring_buffer->buffer;
}
/*
* To optimize the flow management on the send-side,
* when the sender is blocked because of lack of
* sufficient space in the ring buffer, potential the
* consumer of the ring buffer can signal the producer.
* This is controlled by the following parameters:
*
* 1. pending_send_sz: This is the size in bytes that the
* producer is trying to send.
* 2. The feature bit feat_pending_send_sz set to indicate if
* the consumer of the ring will signal when the ring
* state transitions from being full to a state where
* there is room for the producer to send the pending packet.
*/
static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
{
u32 cur_write_sz;
u32 pending_sz;
/*
* Issue a full memory barrier before making the signaling decision.
* Here is the reason for having this barrier:
* If the reading of the pend_sz (in this function)
* were to be reordered and read before we commit the new read
* index (in the calling function) we could
* have a problem. If the host were to set the pending_sz after we
* have sampled pending_sz and go to sleep before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
return false;
cur_write_sz = hv_get_bytes_to_write(rbi);
if (cur_write_sz >= pending_sz)
return true;
return false;
}
#endif /* _HYPERV_H */