[RFC,3/4] batman-adv: Remove atomic usage for tt.local_changes
Commit Message
The tt.local_changes atomic is either written with tt.changes_list_lock
or close to it (see batadv_tt_local_event()). Thus the performance gain
using an atomic was limited (or because of atomic_read() impact even
negative). Using atomic also comes with the need to be wary of potential
negative tt.local_changes value.
Simplify the tt.local_changes usage by removing the atomic property and
modifying it only with tt.changes_list_lock held.
Signed-off-by: Remi Pommarel <repk@triplefau.lt>
---
net/batman-adv/soft-interface.c | 2 +-
net/batman-adv/translation-table.c | 17 ++++++-----------
net/batman-adv/types.h | 4 ++--
3 files changed, 9 insertions(+), 14 deletions(-)
@@ -783,13 +783,13 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
atomic_set(&bat_priv->tt.vn, 0);
- atomic_set(&bat_priv->tt.local_changes, 0);
atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
atomic_set(&bat_priv->bla.num_requests, 0);
#endif
atomic_set(&bat_priv->tp_num, 0);
+ bat_priv->tt.local_changes = 0;
bat_priv->tt.last_changeset = NULL;
bat_priv->tt.last_changeset_len = 0;
bat_priv->isolation_mark = 0;
@@ -463,7 +463,6 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
struct batadv_tt_change_node *tt_change_node, *entry, *safe;
struct batadv_tt_common_entry *common = &tt_local_entry->common;
u8 flags = common->flags | event_flags;
- bool event_removed = false;
bool del_op_requested, del_op_entry;
tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
@@ -508,21 +507,17 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
del:
list_del(&entry->list);
kmem_cache_free(batadv_tt_change_cache, entry);
+ bat_priv->tt.local_changes--;
kmem_cache_free(batadv_tt_change_cache, tt_change_node);
- event_removed = true;
goto unlock;
}
/* track the change in the OGMinterval list */
list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
+ bat_priv->tt.local_changes++;
unlock:
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
-
- if (event_removed)
- atomic_dec(&bat_priv->tt.local_changes);
- else
- atomic_inc(&bat_priv->tt.local_changes);
}
/**
@@ -994,7 +989,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
size_t tt_extra_len = 0;
u16 tvlv_len;
- tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+ tt_diff_entries_num = READ_ONCE(bat_priv->tt.local_changes);
tt_diff_len = batadv_tt_len(tt_diff_entries_num);
/* if we have too many changes for one packet don't send any
@@ -1022,7 +1017,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_data->flags = BATADV_TT_OGM_DIFF;
spin_lock_bh(&bat_priv->tt.changes_list_lock);
- atomic_set(&bat_priv->tt.local_changes, 0);
+ bat_priv->tt.local_changes = 0;
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
@@ -1438,7 +1433,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
kmem_cache_free(batadv_tt_change_cache, entry);
}
- atomic_set(&bat_priv->tt.local_changes, 0);
+ bat_priv->tt.local_changes = 0;
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
}
@@ -3701,7 +3696,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
{
lockdep_assert_held(&bat_priv->tt.commit_lock);
- if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+ if (READ_ONCE(bat_priv->tt.local_changes) < 1) {
if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
batadv_tt_tvlv_container_update(bat_priv);
return;
@@ -1022,7 +1022,7 @@ struct batadv_priv_tt {
atomic_t ogm_append_cnt;
/** @local_changes: changes registered in an originator interval */
- atomic_t local_changes;
+ size_t local_changes;
/**
* @changes_list: tracks tt local changes within an originator interval
@@ -1044,7 +1044,7 @@ struct batadv_priv_tt {
*/
struct list_head roam_list;
- /** @changes_list_lock: lock protecting changes_list */
+ /** @changes_list_lock: lock protecting changes_list & local_changes */
spinlock_t changes_list_lock;
/** @req_list_lock: lock protecting req_list */