[04/20] batman-adv: Attach local MCAs to OGMs

Message ID 1291761150-29818-4-git-send-email-linus.luessing@saxnet.de (mailing list archive)
State Superseded, archived
Headers

Commit Message

Linus Lüssing Dec. 7, 2010, 10:32 p.m. UTC
  This patch introduces multicast announcements - MCA for short - which
are now being attached to an OGM if an optimized multicast mode needing
MCAs has been selected (i.e. proactive_tracking).

MCA entries are multicast mac addresses used by a multicast receiver in
the mesh cloud. Currently MCAs are only fetched locally from the
according batman interface itself, bridged-in hosts will not yet get
announced and will need a more complex patch for supporting IGMP/MLD
snooping. However, the local fetching also allows to have multicast
optimizations on layer 2 already for batman nodes, not depending on
IP at all.

Signed-off-by: Linus Lüssing <linus.luessing@saxnet.de>
---
 aggregation.c |   12 +++++++-
 aggregation.h |    6 +++-
 main.h        |    2 +
 send.c        |   82 +++++++++++++++++++++++++++++++++++++++++++++++++-------
 4 files changed, 87 insertions(+), 15 deletions(-)
  

Patch

diff --git a/aggregation.c b/aggregation.c
index 0c92e3b..d4de296 100644
--- a/aggregation.c
+++ b/aggregation.c
@@ -30,6 +30,12 @@  static int hna_len(struct batman_packet *batman_packet)
 	return batman_packet->num_hna * ETH_ALEN;
 }
 
+/* calculate the size of the mca information for a given packet */
+static int mca_len(struct batman_packet *batman_packet)
+{
+	return batman_packet->num_mca * ETH_ALEN;
+}
+
 /* return true if new_packet can be aggregated with forw_packet */
 static bool can_aggregate_with(struct batman_packet *new_batman_packet,
 			       int packet_len,
@@ -265,9 +271,11 @@  void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
 				   hna_buff, hna_len(batman_packet),
 				   if_incoming);
 
-		buff_pos += BAT_PACKET_LEN + hna_len(batman_packet);
+		buff_pos += BAT_PACKET_LEN + hna_len(batman_packet) +
+			    mca_len(batman_packet);
 		batman_packet = (struct batman_packet *)
 			(packet_buff + buff_pos);
 	} while (aggregated_packet(buff_pos, packet_len,
-				   batman_packet->num_hna));
+				   batman_packet->num_hna,
+				   batman_packet->num_mca));
 }
diff --git a/aggregation.h b/aggregation.h
index 71a91b3..93f2496 100644
--- a/aggregation.h
+++ b/aggregation.h
@@ -25,9 +25,11 @@ 
 #include "main.h"
 
 /* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
+static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna,
+				    int num_mca)
 {
-	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN);
+	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN) +
+			    (num_mca * ETH_ALEN);
 
 	return (next_buff_pos <= packet_len) &&
 		(next_buff_pos <= MAX_AGGREGATION_BYTES);
diff --git a/main.h b/main.h
index a362433..772d621 100644
--- a/main.h
+++ b/main.h
@@ -105,6 +105,8 @@ 
 
 /* #define VIS_SUBCLUSTERS_DISABLED */
 
+#define UINT8_MAX 255
+
 /*
  * Kernel headers
  */
diff --git a/send.c b/send.c
index b89b9f7..ba7ebfe 100644
--- a/send.c
+++ b/send.c
@@ -122,7 +122,8 @@  static void send_packet_to_if(struct forw_packet *forw_packet,
 	/* adjust all flags and log packets */
 	while (aggregated_packet(buff_pos,
 				 forw_packet->packet_len,
-				 batman_packet->num_hna)) {
+				 batman_packet->num_hna,
+				 batman_packet->num_mca)) {
 
 		/* we might have aggregated direct link packets with an
 		 * ordinary base packet */
@@ -214,18 +215,71 @@  static void send_packet(struct forw_packet *forw_packet)
 	rcu_read_unlock();
 }
 
+static void add_own_MCA(struct batman_packet *batman_packet, int num_mca,
+		       struct net_device *soft_iface)
+{
+	MC_LIST *mc_list_entry;
+	int num_mca_done = 0;
+	unsigned long flags;
+	char *mca_entry = (char *)(batman_packet + 1);
+
+	if (num_mca == 0)
+		goto out;
+
+	if (num_mca > UINT8_MAX) {
+		pr_warning("Too many multicast announcements here, "
+			   "just adding %i\n", UINT8_MAX);
+		num_mca = UINT8_MAX;
+	}
+
+	mca_entry = mca_entry + batman_packet->num_hna * ETH_ALEN;
+
+	MC_LIST_LOCK(soft_iface, flags);
+	netdev_for_each_mc_addr(mc_list_entry, soft_iface) {
+		memcpy(mca_entry, &mc_list_entry->MC_LIST_ADDR, ETH_ALEN);
+		mca_entry += ETH_ALEN;
+
+		/* A multicast address might just have been added,
+		 * avoid writing outside of buffer */
+		if(++num_mca_done == num_mca)
+			break;
+	}
+	MC_LIST_UNLOCK(soft_iface, flags);
+
+out:
+	batman_packet->num_mca = num_mca_done;
+}
+
 static void rebuild_batman_packet(struct bat_priv *bat_priv,
 				  struct batman_if *batman_if)
 {
-	int new_len;
-	unsigned char *new_buff;
+	int new_len, mcast_mode, num_mca = 0;
+	unsigned long flags;
+	unsigned char *new_buff = NULL;
 	struct batman_packet *batman_packet;
 
-	new_len = sizeof(struct batman_packet) +
-			(bat_priv->num_local_hna * ETH_ALEN);
-	new_buff = kmalloc(new_len, GFP_ATOMIC);
+	batman_packet = (struct batman_packet *)batman_if->packet_buff;
+	mcast_mode = atomic_read(&bat_priv->mcast_mode);
+
+	/* Avoid attaching MCAs, if multicast optimization is disabled */
+	if (mcast_mode == MCAST_MODE_PROACT_TRACKING) {
+		MC_LIST_LOCK(batman_if->soft_iface, flags);
+		num_mca = netdev_mc_count(batman_if->soft_iface);
+		MC_LIST_UNLOCK(batman_if->soft_iface, flags);
+	}
 
-	/* keep old buffer if kmalloc should fail */
+	if (atomic_read(&bat_priv->hna_local_changed) ||
+	    num_mca != batman_packet->num_mca) {
+		new_len = sizeof(struct batman_packet) +
+			(bat_priv->num_local_hna * ETH_ALEN) +
+			num_mca * ETH_ALEN;
+		new_buff = kmalloc(new_len, GFP_ATOMIC);
+	}
+
+	/*
+	 * if local hna or mca has changed but kmalloc failed
+	 * then just keep the old buffer
+	 */
 	if (new_buff) {
 		memcpy(new_buff, batman_if->packet_buff,
 		       sizeof(struct batman_packet));
@@ -239,6 +293,13 @@  static void rebuild_batman_packet(struct bat_priv *bat_priv,
 		batman_if->packet_buff = new_buff;
 		batman_if->packet_len = new_len;
 	}
+
+	/**
+	 * always copy mca entries (if there are any) - we have to
+	 * traverse the list anyway, so we can just do a memcpy instead of memcmp
+	 * for the sake of simplicity
+	 */
+	add_own_MCA(batman_packet, num_mca, batman_if->soft_iface);
 }
 
 void schedule_own_packet(struct batman_if *batman_if)
@@ -264,9 +325,7 @@  void schedule_own_packet(struct batman_if *batman_if)
 	if (batman_if->if_status == IF_TO_BE_ACTIVATED)
 		batman_if->if_status = IF_ACTIVE;
 
-	/* if local hna has changed and interface is a primary interface */
-	if ((atomic_read(&bat_priv->hna_local_changed)) &&
-	    (batman_if == bat_priv->primary_if))
+	if (batman_if == bat_priv->primary_if)
 		rebuild_batman_packet(bat_priv, batman_if);
 
 	/**
@@ -359,7 +418,8 @@  void schedule_forward_packet(struct orig_node *orig_node,
 	send_time = forward_send_time(bat_priv);
 	add_bat_packet_to_list(bat_priv,
 			       (unsigned char *)batman_packet,
-			       sizeof(struct batman_packet) + hna_buff_len,
+			       sizeof(struct batman_packet) + hna_buff_len
+			       + batman_packet->num_mca * ETH_ALEN,
 			       if_incoming, 0, send_time);
 }