>> + elem = kzalloc(sizeof(*elem), GFP_ATOMIC); >> + if (!elem) >> + goto free_desc; >> + >> + elem->ts = jiffies; >> + memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); >> + >> + spin_lock_bh(&dp->reo_cmd_lock); >> + list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); >> + dp->reo_cmd_cache_flush_count++; >> + >> + /* Flush and invalidate aged REO desc from HW cache */ >> + list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, >> + list) { >> + if (dp->reo_cmd_cache_flush_count > >ATH12K_DP_RX_REO_DESC_FREE_THRES || >> + time_after(jiffies, elem->ts + >> + >msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { >> + list_del(&elem->list); >> + dp->reo_cmd_cache_flush_count--; >> + spin_unlock_bh(&dp->reo_cmd_lock); >> + >> + ath12k_dp_reo_cache_flush(ab, &elem->data); >> + kfree(elem); >> + spin_lock_bh(&dp->reo_cmd_lock); > >is this really a safe iteration if you unlock & lock in the middle? >what prevents the tmp node from being deleted during this window? The reo_cmd_cache_flush_list is used in only two contexts, one is this Function called from napi and the other in ath12k_dp_free during core destroy. Before dp_free, the irqs would be disabled and would wait synchronize. Hence there wouldn’t be any race against add or delete to this list. Please let me know if that’s fine. Thanks, Sriram.R