@@ -520,6 +520,7 @@ static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX,
update_mcast_tracker);
BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
+BAT_ATTR_UINT(num_bcasts, S_IRUGO | S_IWUSR, 0, INT_MAX, NULL);
BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
post_gw_deselect);
static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
@@ -544,6 +545,7 @@ static struct bat_attribute *mesh_attrs[] = {
&bat_attr_gw_mode,
&bat_attr_orig_interval,
&bat_attr_hop_penalty,
+ &bat_attr_num_bcasts,
&bat_attr_gw_sel_class,
&bat_attr_gw_bandwidth,
&bat_attr_mcast_mode,
@@ -512,6 +512,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
struct sk_buff *skb1;
struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ int num_bcasts = atomic_read(&bat_priv->num_bcasts);
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_del(&forw_packet->list);
@@ -536,7 +537,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
forw_packet->num_packets++;
/* if we still have some more bcasts to send */
- if (forw_packet->num_packets < 3) {
+ if (forw_packet->num_packets < num_bcasts) {
_add_bcast_packet_to_list(bat_priv, forw_packet,
((5 * HZ) / 1000));
return;
@@ -137,6 +137,7 @@ struct bat_priv {
atomic_t gw_bandwidth; /* gw bandwidth */
atomic_t orig_interval; /* uint */
atomic_t hop_penalty; /* uint */
+ atomic_t num_bcasts; /* uint */
atomic_t mcast_mode; /* MCAST_MODE_* */
atomic_t mcast_tracker_interval;/* uint, auto */
atomic_t mcast_tracker_timeout; /* uint, auto */
Depending on the scenario, people might want to adjust the number of (re)broadcast of data packets - usually higher values in sparse or lower values in dense networks. Signed-off-by: Linus Lüssing <linus.luessing@saxnet.de> --- bat_sysfs.c | 2 ++ send.c | 3 ++- types.h | 1 + 3 files changed, 5 insertions(+), 1 deletions(-)