1
0
Fork 0

Drivers: hv: Eliminate the channel spinlock in the callback path

By ensuring that we set the callback handler to NULL in the channel close
path on the same CPU that the channel is bound to, we can eliminate this lock
acquisition and release in a performance critical path.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
K. Y. Srinivasan 2014-04-08 18:45:53 -07:00 committed by Greg Kroah-Hartman
parent e4d8270e60
commit d3ba720dd5
4 changed files with 25 additions and 15 deletions

View File

@ -471,18 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
static void reset_channel_cb(void *arg)
{
struct vmbus_channel *channel = arg;
channel->onchannel_callback = NULL;
}
static void vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
unsigned long flags;
channel->state = CHANNEL_OPEN_STATE;
channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */
spin_lock_irqsave(&channel->inbound_lock, flags);
channel->onchannel_callback = NULL;
spin_unlock_irqrestore(&channel->inbound_lock, flags);
if (channel->target_cpu != smp_processor_id())
smp_call_function_single(channel->target_cpu, reset_channel_cb,
channel, true);
else
reset_channel_cb(channel);
/* Send a closing message */

View File

@ -365,7 +365,7 @@ static u32 next_vp;
* performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs.
*/
static u32 get_vp_index(uuid_le *type_guid)
static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid)
{
u32 cur_cpu;
int i;
@ -387,10 +387,13 @@ static u32 get_vp_index(uuid_le *type_guid)
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
*/
return 0;
channel->target_cpu = 0;
channel->target_vp = 0;
return;
}
cur_cpu = (++next_vp % max_cpus);
return hv_context.vp_index[cur_cpu];
channel->target_cpu = cur_cpu;
channel->target_vp = hv_context.vp_index[cur_cpu];
}
/*
@ -438,7 +441,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer->connection_id;
}
newchannel->target_vp = get_vp_index(&offer->offer.if_type);
init_vp_index(newchannel, &offer->offer.if_type);
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));

View File

@ -277,7 +277,6 @@ struct vmbus_channel *relid2channel(u32 relid)
static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
unsigned long flags;
void *arg;
bool read_state;
u32 bytes_to_read;
@ -296,13 +295,12 @@ static void process_chn_event(u32 relid)
/*
* A channel once created is persistent even when there
* is no driver handling the device. An unloading driver
* sets the onchannel_callback to NULL under the
* protection of the channel inbound_lock. Thus, checking
* and invoking the driver specific callback takes care of
* orderly unloading of the driver.
* sets the onchannel_callback to NULL on the same CPU
* as where this interrupt is handled (in an interrupt context).
* Thus, checking and invoking the driver specific callback takes
* care of orderly unloading of the driver.
*/
spin_lock_irqsave(&channel->inbound_lock, flags);
if (channel->onchannel_callback != NULL) {
arg = channel->channel_callback_context;
read_state = channel->batched_reading;
@ -327,7 +325,6 @@ static void process_chn_event(u32 relid)
pr_err("no channel callback for relid - %u\n", relid);
}
spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
/*

View File

@ -696,6 +696,8 @@ struct vmbus_channel {
* preserve the earlier behavior.
*/
u32 target_vp;
/* The corresponding CPUID in the guest */
u32 target_cpu;
/*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support