A mutex ensures we never submit more than one GSI command of any kind at once. This means the per-channel and per-event ring completion structures provide no benefit. Instead, just use the single (existing) GSI completion to signal the completion of GSI commands of all types. This makes gsi_evt_ring_init() a trivial function with no inverse, so open-code it in its sole caller and get rid of the function. Signed-off-by: Alex Elder <elder@xxxxxxxxxx> --- drivers/net/ipa/gsi.c | 44 +++++++++++-------------------------------- drivers/net/ipa/gsi.h | 5 +---- 2 files changed, 12 insertions(+), 37 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index a2fcdb1abdb96..b611e39167fe0 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -339,10 +339,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) * completion to be signaled. Returns true if the command completes * or false if it times out. */ -static bool -gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) +static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) { unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); + struct completion *completion = &gsi->completion; reinit_completion(completion); @@ -366,8 +366,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, enum gsi_evt_cmd_opcode opcode) { - struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; - struct completion *completion = &evt_ring->completion; struct device *dev = gsi->dev; bool timeout; u32 val; @@ -378,7 +376,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); - timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val); gsi_irq_ev_ctrl_disable(gsi); @@ -478,7 +476,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) static void gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) { - struct completion *completion = &channel->completion; u32 channel_id = gsi_channel_id(channel); struct gsi *gsi = channel->gsi; struct device *dev = gsi->dev; @@ -490,7 +487,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) val = u32_encode_bits(channel_id, CH_CHID_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); - timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val); gsi_irq_ch_ctrl_disable(gsi); @@ -1074,13 +1071,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi) while (channel_mask) { u32 channel_id = __ffs(channel_mask); - struct gsi_channel *channel; channel_mask ^= BIT(channel_id); - channel = &gsi->channel[channel_id]; - - complete(&channel->completion); + complete(&gsi->completion); } } @@ -1094,13 +1088,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi) while (event_mask) { u32 evt_ring_id = __ffs(event_mask); - struct gsi_evt_ring *evt_ring; event_mask ^= BIT(evt_ring_id); - evt_ring = &gsi->evt_ring[evt_ring_id]; - - complete(&evt_ring->completion); + complete(&gsi->completion); } } @@ -1110,7 +1101,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) { if (code == GSI_OUT_OF_RESOURCES) { dev_err(gsi->dev, "channel %u out of resources\n", channel_id); - complete(&gsi->channel[channel_id].completion); + complete(&gsi->completion); return; } @@ -1127,7 +1118,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; u32 channel_id = gsi_channel_id(evt_ring->channel); - complete(&evt_ring->completion); + complete(&gsi->completion); dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", channel_id); return; @@ -1651,7 +1642,6 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) static int gsi_generic_command(struct gsi *gsi, u32 channel_id, enum gsi_generic_cmd_opcode opcode) { - struct completion *completion = &gsi->completion; bool timeout; u32 val; @@ -1675,7 +1665,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); - timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val); /* Disable the GP_INT1 IRQ type again */ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); @@ -1975,18 +1965,6 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) gsi_evt_ring_id_free(gsi, evt_ring_id); } -/* Init function for event rings; there is no gsi_evt_ring_exit() */ -static void gsi_evt_ring_init(struct gsi *gsi) -{ - u32 evt_ring_id = 0; - - gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); - gsi->ieob_enabled_bitmap = 0; - do - init_completion(&gsi->evt_ring[evt_ring_id].completion); - while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); -} - static bool gsi_channel_data_valid(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data) { @@ -2069,7 +2047,6 @@ static int gsi_channel_init_one(struct gsi *gsi, channel->tlv_count = data->channel.tlv_count; channel->tre_count = tre_count; channel->event_count = data->channel.event_count; - init_completion(&channel->completion); ret = gsi_channel_evt_ring_init(channel); if (ret) @@ -2129,7 +2106,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count, /* IPA v4.2 requires the AP to allocate channels for the modem */ modem_alloc = gsi->version == IPA_VERSION_4_2; - gsi_evt_ring_init(gsi); /* No matching exit required */ + gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); + gsi->ieob_enabled_bitmap = 0; /* The endpoint data array is indexed by endpoint name */ for (i = 0; i < count; i++) { diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 88b80dc3db79f..ccaa333e37620 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -114,8 +114,6 @@ struct gsi_channel { u16 tre_count; u16 event_count; - struct completion completion; /* signals channel command completion */ - struct gsi_ring tre_ring; u32 evt_ring_id; @@ -141,7 +139,6 @@ enum gsi_evt_ring_state { struct gsi_evt_ring { struct gsi_channel *channel; - struct completion completion; /* signals event ring state changes */ struct gsi_ring ring; }; @@ -160,7 +157,7 @@ struct gsi { u32 modem_channel_bitmap; /* modem channels to allocate */ u32 type_enabled_bitmap; /* GSI IRQ types enabled */ u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */ - struct completion completion; /* for global EE commands */ + struct completion completion; /* Signals GSI command completion */ int result; /* Negative errno (generic commands) */ struct mutex mutex; /* protects commands, programming */ }; -- 2.32.0