Skip to content
Snippets Groups Projects
br_multicast.c 40.7 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	int num;
    	int err = 0;
    
    	if (!pskb_may_pull(skb, sizeof(*icmp6h)))
    		return -EINVAL;
    
    	icmp6h = icmp6_hdr(skb);
    	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
    	len = sizeof(*icmp6h);
    
    	for (i = 0; i < num; i++) {
    		__be16 *nsrcs, _nsrcs;
    
    		nsrcs = skb_header_pointer(skb,
    					   len + offsetof(struct mld2_grec,
    
    					   sizeof(_nsrcs), &_nsrcs);
    		if (!nsrcs)
    			return -EINVAL;
    
    		if (!pskb_may_pull(skb,
    				   len + sizeof(*grec) +
    
    				   sizeof(struct in6_addr) * ntohs(*nsrcs)))
    
    			return -EINVAL;
    
    		grec = (struct mld2_grec *)(skb->data + len);
    
    		len += sizeof(*grec) +
    		       sizeof(struct in6_addr) * ntohs(*nsrcs);
    
    
    		/* We treat these as MLDv1 reports for now. */
    		switch (grec->grec_type) {
    		case MLD2_MODE_IS_INCLUDE:
    		case MLD2_MODE_IS_EXCLUDE:
    		case MLD2_CHANGE_TO_INCLUDE:
    		case MLD2_CHANGE_TO_EXCLUDE:
    		case MLD2_ALLOW_NEW_SOURCES:
    		case MLD2_BLOCK_OLD_SOURCES:
    			break;
    
    		default:
    			continue;
    		}
    
    		err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
    		if (!err)
    			break;
    	}
    
    	return err;
    }
    #endif
    
    
    /*
     * Add port to rotuer_list
     *  list is maintained ordered by pointer value
     *  and locked by br->multicast_lock and RCU
     */
    
    static void br_multicast_add_router(struct net_bridge *br,
    				    struct net_bridge_port *port)
    {
    
    	struct net_bridge_port *p;
    
    	struct hlist_node *n, *slot = NULL;
    
    	hlist_for_each_entry(p, n, &br->router_list, rlist) {
    
    		if ((unsigned long) port >= (unsigned long) p)
    			break;
    		slot = n;
    
    	if (slot)
    		hlist_add_after_rcu(slot, &port->rlist);
    
    	else
    		hlist_add_head_rcu(&port->rlist, &br->router_list);
    
    static void br_multicast_mark_router(struct net_bridge *br,
    				     struct net_bridge_port *port)
    {
    	unsigned long now = jiffies;
    
    	if (!port) {
    		if (br->multicast_router == 1)
    			mod_timer(&br->multicast_router_timer,
    				  now + br->multicast_querier_interval);
    		return;
    	}
    
    	if (port->multicast_router != 1)
    		return;
    
    	if (!hlist_unhashed(&port->rlist))
    		goto timer;
    
    
    	br_multicast_add_router(br, port);
    
    
    timer:
    	mod_timer(&port->multicast_router_timer,
    		  now + br->multicast_querier_interval);
    }
    
    static void br_multicast_query_received(struct net_bridge *br,
    					struct net_bridge_port *port,
    
    {
    	if (saddr)
    		mod_timer(&br->multicast_querier_timer,
    			  jiffies + br->multicast_querier_interval);
    	else if (timer_pending(&br->multicast_querier_timer))
    		return;
    
    	br_multicast_mark_router(br, port);
    }
    
    
    static int br_ip4_multicast_query(struct net_bridge *br,
    				  struct net_bridge_port *port,
    				  struct sk_buff *skb)
    
    	const struct iphdr *iph = ip_hdr(skb);
    
    	struct igmphdr *ih = igmp_hdr(skb);
    	struct net_bridge_mdb_entry *mp;
    	struct igmpv3_query *ih3;
    	struct net_bridge_port_group *p;
    
    	struct net_bridge_port_group __rcu **pp;
    
    	unsigned long max_delay;
    	unsigned long now = jiffies;
    	__be32 group;
    
    
    	spin_lock(&br->multicast_lock);
    	if (!netif_running(br->dev) ||
    	    (port && port->state == BR_STATE_DISABLED))
    		goto out;
    
    
    	br_multicast_query_received(br, port, !!iph->saddr);
    
    
    	group = ih->group;
    
    	if (skb->len == sizeof(*ih)) {
    		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
    
    		if (!max_delay) {
    			max_delay = 10 * HZ;
    			group = 0;
    		}
    	} else {
    
    		if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
    			err = -EINVAL;
    			goto out;
    		}
    
    
    		ih3 = igmpv3_query_hdr(skb);
    		if (ih3->nsrcs)
    
    		max_delay = ih3->code ?
    			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
    
    	mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
    
    	if (!mp)
    		goto out;
    
    	max_delay *= br->multicast_last_member_count;
    
    
    	if (mp->mglist &&
    
    	    (timer_pending(&mp->timer) ?
    	     time_after(mp->timer.expires, now + max_delay) :
    	     try_to_del_timer_sync(&mp->timer) >= 0))
    		mod_timer(&mp->timer, now + max_delay);
    
    
    	for (pp = &mp->ports;
    	     (p = mlock_dereference(*pp, br)) != NULL;
    	     pp = &p->next) {
    
    		if (timer_pending(&p->timer) ?
    		    time_after(p->timer.expires, now + max_delay) :
    		    try_to_del_timer_sync(&p->timer) >= 0)
    
    			mod_timer(&p->timer, now + max_delay);
    
    	}
    
    out:
    	spin_unlock(&br->multicast_lock);
    
    #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
    static int br_ip6_multicast_query(struct net_bridge *br,
    				  struct net_bridge_port *port,
    				  struct sk_buff *skb)
    {
    
    	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
    
    	struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
    	struct net_bridge_mdb_entry *mp;
    	struct mld2_query *mld2q;
    
    	struct net_bridge_port_group *p;
    	struct net_bridge_port_group __rcu **pp;
    
    	unsigned long max_delay;
    	unsigned long now = jiffies;
    
    	const struct in6_addr *group = NULL;
    
    	int err = 0;
    
    	spin_lock(&br->multicast_lock);
    	if (!netif_running(br->dev) ||
    	    (port && port->state == BR_STATE_DISABLED))
    		goto out;
    
    	br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
    
    	if (skb->len == sizeof(*mld)) {
    		if (!pskb_may_pull(skb, sizeof(*mld))) {
    			err = -EINVAL;
    			goto out;
    		}
    		mld = (struct mld_msg *) icmp6_hdr(skb);
    		max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
    		if (max_delay)
    			group = &mld->mld_mca;
    	} else if (skb->len >= sizeof(*mld2q)) {
    		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
    			err = -EINVAL;
    			goto out;
    		}
    		mld2q = (struct mld2_query *)icmp6_hdr(skb);
    		if (!mld2q->mld2q_nsrcs)
    			group = &mld2q->mld2q_mca;
    		max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
    	}
    
    	if (!group)
    		goto out;
    
    
    	mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
    
    	if (!mp)
    		goto out;
    
    	max_delay *= br->multicast_last_member_count;
    
    	if (mp->mglist &&
    
    	    (timer_pending(&mp->timer) ?
    	     time_after(mp->timer.expires, now + max_delay) :
    	     try_to_del_timer_sync(&mp->timer) >= 0))
    		mod_timer(&mp->timer, now + max_delay);
    
    
    	for (pp = &mp->ports;
    	     (p = mlock_dereference(*pp, br)) != NULL;
    	     pp = &p->next) {
    
    		if (timer_pending(&p->timer) ?
    		    time_after(p->timer.expires, now + max_delay) :
    		    try_to_del_timer_sync(&p->timer) >= 0)
    
    			mod_timer(&p->timer, now + max_delay);
    
    	}
    
    out:
    	spin_unlock(&br->multicast_lock);
    	return err;
    }
    #endif
    
    
    static void br_multicast_leave_group(struct net_bridge *br,
    				     struct net_bridge_port *port,
    
    {
    	struct net_bridge_mdb_htable *mdb;
    	struct net_bridge_mdb_entry *mp;
    	struct net_bridge_port_group *p;
    	unsigned long now;
    	unsigned long time;
    
    	spin_lock(&br->multicast_lock);
    	if (!netif_running(br->dev) ||
    	    (port && port->state == BR_STATE_DISABLED) ||
    	    timer_pending(&br->multicast_querier_timer))
    		goto out;
    
    
    	mdb = mlock_dereference(br->mdb, br);
    
    	mp = br_mdb_ip_get(mdb, group);
    	if (!mp)
    		goto out;
    
    	now = jiffies;
    	time = now + br->multicast_last_member_count *
    		     br->multicast_last_member_interval;
    
    	if (!port) {
    
    		if (mp->mglist &&
    
    		    (timer_pending(&mp->timer) ?
    		     time_after(mp->timer.expires, time) :
    		     try_to_del_timer_sync(&mp->timer) >= 0)) {
    			mod_timer(&mp->timer, time);
    
    			mp->queries_sent = 0;
    			mod_timer(&mp->query_timer, now);
    		}
    
    		goto out;
    	}
    
    
    	for (p = mlock_dereference(mp->ports, br);
    	     p != NULL;
    	     p = mlock_dereference(p->next, br)) {
    
    		if (p->port != port)
    			continue;
    
    		if (!hlist_unhashed(&p->mglist) &&
    		    (timer_pending(&p->timer) ?
    		     time_after(p->timer.expires, time) :
    		     try_to_del_timer_sync(&p->timer) >= 0)) {
    			mod_timer(&p->timer, time);
    
    			p->queries_sent = 0;
    			mod_timer(&p->query_timer, now);
    		}
    
    		break;
    	}
    
    out:
    	spin_unlock(&br->multicast_lock);
    }
    
    
    static void br_ip4_multicast_leave_group(struct net_bridge *br,
    					 struct net_bridge_port *port,
    					 __be32 group)
    {
    	struct br_ip br_group;
    
    	if (ipv4_is_local_multicast(group))
    		return;
    
    	br_group.u.ip4 = group;
    	br_group.proto = htons(ETH_P_IP);
    
    	br_multicast_leave_group(br, port, &br_group);
    }
    
    
    #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
    static void br_ip6_multicast_leave_group(struct net_bridge *br,
    					 struct net_bridge_port *port,
    					 const struct in6_addr *group)
    {
    	struct br_ip br_group;
    
    
    	if (!ipv6_is_transient_multicast(group))
    
    	br_group.u.ip6 = *group;
    
    	br_group.proto = htons(ETH_P_IPV6);
    
    	br_multicast_leave_group(br, port, &br_group);
    }
    #endif
    
    static int br_multicast_ipv4_rcv(struct net_bridge *br,
    				 struct net_bridge_port *port,
    				 struct sk_buff *skb)
    {
    	struct sk_buff *skb2 = skb;
    
    	const struct iphdr *iph;
    
    	struct igmphdr *ih;
    	unsigned len;
    	unsigned offset;
    	int err;
    
    	/* We treat OOM as packet loss for now. */
    	if (!pskb_may_pull(skb, sizeof(*iph)))
    		return -EINVAL;
    
    	iph = ip_hdr(skb);
    
    	if (iph->ihl < 5 || iph->version != 4)
    		return -EINVAL;
    
    	if (!pskb_may_pull(skb, ip_hdrlen(skb)))
    		return -EINVAL;
    
    	iph = ip_hdr(skb);
    
    	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
    		return -EINVAL;
    
    
    	if (iph->protocol != IPPROTO_IGMP) {
    		if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
    			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
    
    
    	len = ntohs(iph->tot_len);
    	if (skb->len < len || len < ip_hdrlen(skb))
    		return -EINVAL;
    
    	if (skb->len > len) {
    		skb2 = skb_clone(skb, GFP_ATOMIC);
    		if (!skb2)
    			return -ENOMEM;
    
    		err = pskb_trim_rcsum(skb2, len);
    		if (err)
    
    	}
    
    	len -= ip_hdrlen(skb2);
    	offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
    	__skb_pull(skb2, offset);
    	skb_reset_transport_header(skb2);
    
    	err = -EINVAL;
    	if (!pskb_may_pull(skb2, sizeof(*ih)))
    		goto out;
    
    	switch (skb2->ip_summed) {
    	case CHECKSUM_COMPLETE:
    		if (!csum_fold(skb2->csum))
    			break;
    		/* fall through */
    	case CHECKSUM_NONE:
    		skb2->csum = 0;
    		if (skb_checksum_complete(skb2))
    
    	}
    
    	err = 0;
    
    	BR_INPUT_SKB_CB(skb)->igmp = 1;
    	ih = igmp_hdr(skb2);
    
    	switch (ih->type) {
    	case IGMP_HOST_MEMBERSHIP_REPORT:
    	case IGMPV2_HOST_MEMBERSHIP_REPORT:
    
    		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
    
    		err = br_ip4_multicast_add_group(br, port, ih->group);
    
    		break;
    	case IGMPV3_HOST_MEMBERSHIP_REPORT:
    
    		err = br_ip4_multicast_igmp3_report(br, port, skb2);
    
    		break;
    	case IGMP_HOST_MEMBERSHIP_QUERY:
    
    		err = br_ip4_multicast_query(br, port, skb2);
    
    		break;
    	case IGMP_HOST_LEAVE_MESSAGE:
    
    		br_ip4_multicast_leave_group(br, port, ih->group);
    
    		break;
    	}
    
    out:
    	__skb_push(skb2, offset);
    
    	if (skb2 != skb)
    		kfree_skb(skb2);
    	return err;
    }
    
    
    #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
    static int br_multicast_ipv6_rcv(struct net_bridge *br,
    				 struct net_bridge_port *port,
    				 struct sk_buff *skb)
    {
    
    	struct sk_buff *skb2;
    
    	const struct ipv6hdr *ip6h;
    
    	u8 icmp6_type;
    
    	int offset;
    
    	int err;
    
    	if (!pskb_may_pull(skb, sizeof(*ip6h)))
    		return -EINVAL;
    
    	ip6h = ipv6_hdr(skb);
    
    	/*
    	 * We're interested in MLD messages only.
    	 *  - Version is 6
    	 *  - MLD has always Router Alert hop-by-hop option
    	 *  - But we do not support jumbrograms.
    	 */
    	if (ip6h->version != 6 ||
    	    ip6h->nexthdr != IPPROTO_HOPOPTS ||
    	    ip6h->payload_len == 0)
    		return 0;
    
    
    	len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
    
    	if (skb->len < len)
    		return -EINVAL;
    
    	nexthdr = ip6h->nexthdr;
    
    	offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
    
    
    	if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
    		return 0;
    
    	/* Okay, we found ICMPv6 header */
    	skb2 = skb_clone(skb, GFP_ATOMIC);
    	if (!skb2)
    		return -ENOMEM;
    
    
    	err = -EINVAL;
    	if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
    		goto out;
    
    
    	len -= offset - skb_network_offset(skb2);
    
    	__skb_pull(skb2, offset);
    	skb_reset_transport_header(skb2);
    
    	skb_postpull_rcsum(skb2, skb_network_header(skb2),
    			   skb_network_header_len(skb2));
    
    	icmp6_type = icmp6_hdr(skb2)->icmp6_type;
    
    	switch (icmp6_type) {
    
    	case ICMPV6_MGM_QUERY:
    	case ICMPV6_MGM_REPORT:
    	case ICMPV6_MGM_REDUCTION:
    	case ICMPV6_MLD2_REPORT:
    		break;
    	default:
    		err = 0;
    		goto out;
    	}
    
    	/* Okay, we found MLD message. Check further. */
    	if (skb2->len > len) {
    		err = pskb_trim_rcsum(skb2, len);
    		if (err)
    			goto out;
    
    	switch (skb2->ip_summed) {
    	case CHECKSUM_COMPLETE:
    
    		if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
    					IPPROTO_ICMPV6, skb2->csum))
    
    			break;
    		/*FALLTHROUGH*/
    	case CHECKSUM_NONE:
    
    		skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
    							&ip6h->daddr,
    							skb2->len,
    							IPPROTO_ICMPV6, 0));
    		if (__skb_checksum_complete(skb2))
    
    			goto out;
    	}
    
    	err = 0;
    
    	BR_INPUT_SKB_CB(skb)->igmp = 1;
    
    
    	switch (icmp6_type) {
    
    	case ICMPV6_MGM_REPORT:
    	    {
    
    		struct mld_msg *mld;
    		if (!pskb_may_pull(skb2, sizeof(*mld))) {
    			err = -EINVAL;
    			goto out;
    		}
    		mld = (struct mld_msg *)skb_transport_header(skb2);
    
    		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
    
    		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
    		break;
    	    }
    	case ICMPV6_MLD2_REPORT:
    		err = br_ip6_multicast_mld2_report(br, port, skb2);
    		break;
    	case ICMPV6_MGM_QUERY:
    		err = br_ip6_multicast_query(br, port, skb2);
    		break;
    	case ICMPV6_MGM_REDUCTION:
    	    {
    
    		struct mld_msg *mld;
    		if (!pskb_may_pull(skb2, sizeof(*mld))) {
    			err = -EINVAL;
    			goto out;
    		}
    		mld = (struct mld_msg *)skb_transport_header(skb2);
    
    		br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
    	    }
    	}
    
    out:
    
    int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
    		     struct sk_buff *skb)
    {
    
    	BR_INPUT_SKB_CB(skb)->igmp = 0;
    	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
    
    
    	if (br->multicast_disabled)
    		return 0;
    
    	switch (skb->protocol) {
    	case htons(ETH_P_IP):
    		return br_multicast_ipv4_rcv(br, port, skb);
    
    #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
    	case htons(ETH_P_IPV6):
    		return br_multicast_ipv6_rcv(br, port, skb);
    #endif
    
    	}
    
    	return 0;
    }
    
    static void br_multicast_query_expired(unsigned long data)
    {
    	struct net_bridge *br = (void *)data;
    
    	spin_lock(&br->multicast_lock);
    	if (br->multicast_startup_queries_sent <
    	    br->multicast_startup_query_count)
    		br->multicast_startup_queries_sent++;
    
    	br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
    
    	spin_unlock(&br->multicast_lock);
    }
    
    void br_multicast_init(struct net_bridge *br)
    {
    	br->hash_elasticity = 4;
    	br->hash_max = 512;
    
    	br->multicast_router = 1;
    	br->multicast_last_member_count = 2;
    	br->multicast_startup_query_count = 2;
    
    	br->multicast_last_member_interval = HZ;
    	br->multicast_query_response_interval = 10 * HZ;
    	br->multicast_startup_query_interval = 125 * HZ / 4;
    	br->multicast_query_interval = 125 * HZ;
    	br->multicast_querier_interval = 255 * HZ;
    	br->multicast_membership_interval = 260 * HZ;
    
    	spin_lock_init(&br->multicast_lock);
    	setup_timer(&br->multicast_router_timer,
    		    br_multicast_local_router_expired, 0);
    	setup_timer(&br->multicast_querier_timer,
    		    br_multicast_local_router_expired, 0);
    	setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
    		    (unsigned long)br);
    }
    
    void br_multicast_open(struct net_bridge *br)
    {
    	br->multicast_startup_queries_sent = 0;
    
    	if (br->multicast_disabled)
    		return;
    
    	mod_timer(&br->multicast_query_timer, jiffies);
    }
    
    void br_multicast_stop(struct net_bridge *br)
    {
    	struct net_bridge_mdb_htable *mdb;
    	struct net_bridge_mdb_entry *mp;
    	struct hlist_node *p, *n;
    	u32 ver;
    	int i;
    
    	del_timer_sync(&br->multicast_router_timer);
    	del_timer_sync(&br->multicast_querier_timer);
    	del_timer_sync(&br->multicast_query_timer);
    
    	spin_lock_bh(&br->multicast_lock);
    
    	mdb = mlock_dereference(br->mdb, br);
    
    	if (!mdb)
    		goto out;
    
    	br->mdb = NULL;
    
    	ver = mdb->ver;
    	for (i = 0; i < mdb->max; i++) {
    		hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
    					  hlist[ver]) {
    			del_timer(&mp->timer);
    			del_timer(&mp->query_timer);
    			call_rcu_bh(&mp->rcu, br_multicast_free_group);
    		}
    	}
    
    	if (mdb->old) {
    		spin_unlock_bh(&br->multicast_lock);
    
    		rcu_barrier_bh();
    
    		spin_lock_bh(&br->multicast_lock);
    		WARN_ON(mdb->old);
    	}
    
    	mdb->old = mdb;
    	call_rcu_bh(&mdb->rcu, br_mdb_free);
    
    out:
    	spin_unlock_bh(&br->multicast_lock);
    }
    
    
    int br_multicast_set_router(struct net_bridge *br, unsigned long val)
    {
    	int err = -ENOENT;
    
    	spin_lock_bh(&br->multicast_lock);
    	if (!netif_running(br->dev))
    		goto unlock;
    
    	switch (val) {
    	case 0:
    	case 2:
    		del_timer(&br->multicast_router_timer);
    		/* fall through */
    	case 1:
    		br->multicast_router = val;
    		err = 0;
    		break;
    
    	default:
    		err = -EINVAL;
    		break;
    	}
    
    unlock:
    	spin_unlock_bh(&br->multicast_lock);
    
    	return err;
    }
    
    int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
    {
    	struct net_bridge *br = p->br;
    	int err = -ENOENT;
    
    	spin_lock(&br->multicast_lock);
    	if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
    		goto unlock;
    
    	switch (val) {
    	case 0:
    	case 1:
    	case 2:
    		p->multicast_router = val;
    		err = 0;
    
    		if (val < 2 && !hlist_unhashed(&p->rlist))
    			hlist_del_init_rcu(&p->rlist);
    
    		if (val == 1)
    			break;
    
    		del_timer(&p->multicast_router_timer);
    
    		if (val == 0)
    			break;
    
    		br_multicast_add_router(br, p);
    		break;
    
    	default:
    		err = -EINVAL;
    		break;
    	}
    
    unlock:
    	spin_unlock(&br->multicast_lock);
    
    	return err;
    }
    
    
    int br_multicast_toggle(struct net_bridge *br, unsigned long val)
    {
    	struct net_bridge_port *port;
    
    	struct net_bridge_mdb_htable *mdb;
    
    	spin_lock_bh(&br->multicast_lock);
    
    	if (br->multicast_disabled == !val)
    		goto unlock;
    
    	br->multicast_disabled = !val;
    	if (br->multicast_disabled)
    		goto unlock;
    
    
    	if (!netif_running(br->dev))
    		goto unlock;
    
    
    	mdb = mlock_dereference(br->mdb, br);
    	if (mdb) {
    		if (mdb->old) {
    
    			err = -EEXIST;
    rollback:
    			br->multicast_disabled = !!val;
    			goto unlock;
    		}
    
    
    		err = br_mdb_rehash(&br->mdb, mdb->max,
    
    				    br->hash_elasticity);
    		if (err)
    			goto rollback;
    	}
    
    	br_multicast_open(br);
    	list_for_each_entry(port, &br->port_list, list) {
    		if (port->state == BR_STATE_DISABLED ||
    		    port->state == BR_STATE_BLOCKING)
    			continue;
    
    		__br_multicast_enable_port(port);
    	}
    
    unlock:
    
    	spin_unlock_bh(&br->multicast_lock);
    
    
    int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
    {
    	int err = -ENOENT;
    	u32 old;
    
    	struct net_bridge_mdb_htable *mdb;
    
    
    	spin_lock(&br->multicast_lock);
    	if (!netif_running(br->dev))
    		goto unlock;
    
    	err = -EINVAL;
    	if (!is_power_of_2(val))
    		goto unlock;
    
    
    	mdb = mlock_dereference(br->mdb, br);
    	if (mdb && val < mdb->size)
    
    		goto unlock;
    
    	err = 0;
    
    	old = br->hash_max;
    	br->hash_max = val;
    
    
    	if (mdb) {
    		if (mdb->old) {
    
    			err = -EEXIST;
    rollback:
    			br->hash_max = old;
    			goto unlock;
    		}
    
    		err = br_mdb_rehash(&br->mdb, br->hash_max,
    				    br->hash_elasticity);
    		if (err)
    			goto rollback;
    	}
    
    unlock:
    	spin_unlock(&br->multicast_lock);
    
    	return err;
    }