1
0
Fork 0

Merge branch 'net-ipa-a-mix-of-small-improvements'

Alex Elder says:

====================
net: ipa: a mix of small improvements

Version 2 of this series restructures a couple of the changed
functions (in patches 1 and 2) to avoid blocks of indented code
by returning early when possible, as suggested by Jakub.  The
description of the first patch was changed as a result, to better
reflect what the updated patch does.  It also fixes one spot I
identified when updating the code, where gsi_channel_stop() was
doing the wrong thing on error.

The original description for this series is below.

This series contains a sort of unrelated set of code cleanups.

The first two are things I wanted to do in a series that updated
some NAPI code recently.  I didn't want to change things in a way
that affected existing testing so I set these aside for later
(i.e., now).

The third makes a change to event ring handling that's similar to
what was done a while back for channels.  There's little benefit to
cacheing the current state of an event ring, so with this we'll just
fetch the state from hardware whenever we need it.

The fourth patch removes the definitions of two unused symbols.

The fifth replaces a count that is always 0 or 1 with a Boolean.

The sixth removes a build-time validation check that doesn't really
provide benefit.

And the last one fixes a problem (in two spots) that could cause a
build-time check to fail "bogusly".
====================

Link: https://lore.kernel.org/r/20210205221100.1738-1-elder@linaro.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
master
Jakub Kicinski 2021-02-06 14:56:22 -08:00
commit 163a180213
5 changed files with 73 additions and 67 deletions

View File

@ -408,30 +408,31 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state;
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
evt_ring_id, evt_ring->state);
evt_ring_id, state);
return -EINVAL;
}
gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
/* If successful the event ring state will have changed */
if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state == GSI_EVT_RING_STATE_ALLOCATED)
return 0;
dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
evt_ring_id, evt_ring->state);
evt_ring_id, state);
return -EIO;
}
@ -439,45 +440,48 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state = evt_ring->state;
enum gsi_evt_ring_state state;
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
evt_ring_id, evt_ring->state);
evt_ring_id, state);
return;
}
gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
/* If successful the event ring state will have changed */
if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state == GSI_EVT_RING_STATE_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
evt_ring_id, evt_ring->state);
evt_ring_id, state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
evt_ring_id, evt_ring->state);
evt_ring_id, state);
return;
}
gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
/* If successful the event ring state will have changed */
if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
evt_ring_id, evt_ring->state);
evt_ring_id, state);
}
/* Fetch the current state of a channel from hardware */
@ -910,11 +914,8 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
static int gsi_channel_stop_retry(struct gsi_channel *channel)
{
u32 retries = GSI_CHANNEL_STOP_RETRIES;
struct gsi *gsi = channel->gsi;
int ret;
mutex_lock(&gsi->mutex);
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
@ -922,22 +923,25 @@ static int gsi_channel_stop_retry(struct gsi_channel *channel)
usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
mutex_unlock(&gsi->mutex);
return ret;
}
static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
{
struct gsi *gsi = channel->gsi;
int ret;
/* Wait for any underway transactions to complete before stopping. */
gsi_channel_trans_quiesce(channel);
ret = stop ? gsi_channel_stop_retry(channel) : 0;
/* Finally, ensure NAPI polling has finished. */
if (!ret)
napi_synchronize(&channel->napi);
if (!stop)
return 0;
mutex_lock(&gsi->mutex);
ret = gsi_channel_stop_retry(channel);
mutex_unlock(&gsi->mutex);
return ret;
}
@ -948,11 +952,11 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
/* Only disable the completion interrupt if stop is successful */
ret = __gsi_channel_stop(channel, true);
if (ret)
return ret;
/* Disable the completion interrupt and NAPI if successful */
gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
napi_disable(&channel->napi);
@ -981,8 +985,16 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
return __gsi_channel_stop(channel, stop);
ret = __gsi_channel_stop(channel, stop);
if (ret)
return ret;
/* Ensure NAPI polling has finished. */
napi_synchronize(&channel->napi);
return 0;
}
/* Resume a suspended channel (starting will be requested if STOPPED) */
@ -1099,7 +1111,6 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
evt_ring = &gsi->evt_ring[evt_ring_id];
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
complete(&evt_ring->completion);
}

View File

@ -142,7 +142,6 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
struct completion completion; /* signals event ring state changes */
enum gsi_evt_ring_state state;
struct gsi_ring ring;
};

View File

@ -59,16 +59,6 @@
#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
(0x0000c01c + 0x1000 * (ee))
#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
(0x0000c028 + 0x1000 * (ee))
#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
(0x0000c02c + 0x1000 * (ee))
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \

View File

@ -174,9 +174,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 limit;
/* Not sure where this constraint come from... */
BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
@ -1020,31 +1017,34 @@ err_free_pages:
}
/**
* ipa_endpoint_replenish() - Replenish the Rx packets cache.
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
* @count: Number of buffers to send to hardware
* @add_one: Whether this is replacing a just-consumed buffer
*
* Allocate RX packet wrapper structures with maximal socket buffers
* for an endpoint. These are supplied to the hardware, which fills
* them with incoming data.
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
* an endpoint can be disabled, in which case requests to replenish a
* buffer are "saved", and transferred to the backlog once it is re-enabled
* again.
*/
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
if (count)
atomic_add(count, &endpoint->replenish_saved);
if (add_one)
atomic_inc(&endpoint->replenish_saved);
return;
}
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
if (count)
atomic_add(count, &endpoint->replenish_backlog);
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return;
@ -1052,8 +1052,8 @@ try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
if (count)
atomic_add(count, &endpoint->replenish_backlog);
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@ -1080,7 +1080,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
ipa_endpoint_replenish(endpoint, 0);
ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
@ -1099,7 +1099,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
ipa_endpoint_replenish(endpoint, 0);
ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@ -1300,7 +1300,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
ipa_endpoint_replenish(endpoint, 1);
ipa_endpoint_replenish(endpoint, true);
if (trans->cancelled)
return;

View File

@ -408,15 +408,18 @@ enum ipa_cs_offload_en {
static inline u32 ipa_header_size_encoded(enum ipa_version version,
u32 header_size)
{
u32 size = header_size & field_mask(HDR_LEN_FMASK);
u32 val;
val = u32_encode_bits(header_size, HDR_LEN_FMASK);
if (version < IPA_VERSION_4_5)
val = u32_encode_bits(size, HDR_LEN_FMASK);
if (version < IPA_VERSION_4_5) {
/* ipa_assert(header_size == size); */
return val;
}
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(HDR_LEN_FMASK);
val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
size = header_size >> hweight32(HDR_LEN_FMASK);
val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
return val;
}
@ -425,15 +428,18 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
u32 offset)
{
u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
u32 val;
val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
if (version < IPA_VERSION_4_5)
val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
if (version < IPA_VERSION_4_5) {
/* ipa_assert(offset == off); */
return val;
}
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(HDR_OFST_METADATA_FMASK);
val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
return val;
}