1
0
Fork 0

Drivers: hv: Add code to distribute channel interrupt load

Implement a simple policy for distributing incoming interrupt load.
We classify channels as (a) performance critical and (b) not
performance critical. All non-performance critical channels will
be bound to the boot cpu. Performance critical channels will be
bound to the remaining available CPUs on a round-robin basis.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
wifi-calibration
K. Y. Srinivasan 2012-12-01 06:46:50 -08:00 committed by Greg Kroah-Hartman
parent 6552ecd70c
commit a119845f6e
1 changed files with 84 additions and 1 deletions

View File

@ -257,6 +257,89 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
enum {
IDE = 0,
SCSI,
NIC,
MAX_PERF_CHN,
};
/*
* This is an array of channels (devices) that are performance critical.
* We attempt to distribute the interrupt load for these devices across
* all available CPUs.
*/
static const uuid_le hp_devs[] = {
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
/* IDE */
{
.b = {
0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
}
},
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
/* Storage - SCSI */
{
.b = {
0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
}
},
/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
/* Network */
{
.b = {
0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
}
},
};
/*
* We use this state to statically distribute the channel interrupt load.
*/
static u32 next_vp;
/*
* Starting with Win8, we can statically distribute the incoming
* channel interrupt load by binding a channel to VCPU. We
* implement here a simple round robin scheme for distributing
* the interrupt load.
* We will bind channels that are not performance critical to cpu 0 and
* performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs.
*/
static u32 get_vp_index(uuid_le *type_guid)
{
u32 cur_cpu;
int i;
bool perf_chn = false;
u32 max_cpus = num_online_cpus();
for (i = IDE; i < MAX_PERF_CHN; i++) {
if (!memcmp(type_guid->b, hp_devs[i].b,
sizeof(uuid_le))) {
perf_chn = true;
break;
}
}
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
/*
* Prior to win8, all channel interrupts are
* delivered on cpu 0.
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
*/
return 0;
}
cur_cpu = (++next_vp % max_cpus);
return hv_context.vp_index[cur_cpu];
}
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@ -302,7 +385,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer->connection_id;
}
newchannel->target_vp = 0;
newchannel->target_vp = get_vp_index(&offer->offer.if_type);
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));