Newer
Older
NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0;
pp = ptype->gro_receive(&napi->gro_list, skb);
break;
}
rcu_read_unlock();
if (&ptype->list == head)
goto normal;
same_flow = NAPI_GRO_CB(skb)->same_flow;
ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
if (pp) {
struct sk_buff *nskb = *pp;
*pp = nskb->next;
nskb->next = NULL;
napi_gro_complete(nskb);
napi->gro_count--;
if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
napi->gro_count++;
NAPI_GRO_CB(skb)->count = 1;
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
skb->next = napi->gro_list;
napi->gro_list = skb;
if (skb_headlen(skb) < skb_gro_offset(skb)) {
int grow = skb_gro_offset(skb) - skb_headlen(skb);
BUG_ON(skb->end - skb->tail < grow);
memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
skb->tail += grow;
skb->data_len -= grow;
skb_shinfo(skb)->frags[0].page_offset += grow;
skb_shinfo(skb)->frags[0].size -= grow;
if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
put_page(skb_shinfo(skb)->frags[0].page);
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
--skb_shinfo(skb)->nr_frags);
}
ret = GRO_NORMAL;
goto pull;
EXPORT_SYMBOL(dev_gro_receive);
static gro_result_t
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
if (netpoll_rx_on(skb))
return GRO_NORMAL;
for (p = napi->gro_list; p; p = p->next) {
NAPI_GRO_CB(p)->same_flow =
(p->dev == skb->dev) &&
!compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->flush = 0;
}
return dev_gro_receive(napi, skb);
}
gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
switch (ret) {
case GRO_NORMAL:
if (netif_receive_skb(skb))
ret = GRO_DROP;
break;
case GRO_DROP:
case GRO_MERGED_FREE:
case GRO_HELD:
case GRO_MERGED:
break;
return ret;
}
EXPORT_SYMBOL(napi_skb_finish);
void skb_gro_reset_offset(struct sk_buff *skb)
{
NAPI_GRO_CB(skb)->data_offset = 0;
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail &&
!PageHighMem(skb_shinfo(skb)->frags[0].page)) {
NAPI_GRO_CB(skb)->frag0 =
page_address(skb_shinfo(skb)->frags[0].page) +
skb_shinfo(skb)->frags[0].page_offset;
NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
}
}
EXPORT_SYMBOL(skb_gro_reset_offset);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb_gro_reset_offset(skb);
return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
}
EXPORT_SYMBOL(napi_gro_receive);
void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
napi->skb = skb;
}
EXPORT_SYMBOL(napi_reuse_skb);
struct sk_buff *napi_get_frags(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
if (!skb) {
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
if (skb)
napi->skb = skb;
EXPORT_SYMBOL(napi_get_frags);
gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
gro_result_t ret)
switch (ret) {
case GRO_NORMAL:
skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_HELD)
skb_gro_pull(skb, -ETH_HLEN);
else if (netif_receive_skb(skb))
ret = GRO_DROP;
case GRO_DROP:
case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
case GRO_MERGED:
break;
return ret;
EXPORT_SYMBOL(napi_frags_finish);
struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
struct ethhdr *eth;
unsigned int hlen;
unsigned int off;
napi->skb = NULL;
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb);
off = skb_gro_offset(skb);
hlen = off + sizeof(*eth);
eth = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
eth = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!eth)) {
napi_reuse_skb(napi, skb);
skb = NULL;
goto out;
}
}
skb_gro_pull(skb, sizeof(*eth));
/*
* This works because the only protocols we care about don't require
* special handling. We'll fix it up properly at the end.
*/
skb->protocol = eth->h_proto;
out:
return skb;
}
EXPORT_SYMBOL(napi_frags_skb);
gro_result_t napi_gro_frags(struct napi_struct *napi)
struct sk_buff *skb = napi_frags_skb(napi);
return GRO_DROP;
return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
}
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies;
napi->weight = weight_p;
do {
local_irq_disable();
rps_lock(queue);
if (!skb) {
}
rps_unlock(queue);
local_irq_enable();
} while (++work < quota && jiffies == start_time);
return work;
}
/**
* __napi_schedule - schedule for receive
* @n: entry to schedule
*
* The entry's receive function will be scheduled to run
*/
void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
local_irq_save(flags);
list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags);
EXPORT_SYMBOL(__napi_schedule);
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
BUG_ON(n->gro_list);
list_del(&n->poll_list);
smp_mb__before_clear_bit();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);
void napi_complete(struct napi_struct *n)
{
unsigned long flags;
/*
* don't let napi dequeue from the cpu poll list
* just in case its running on a different cpu
*/
if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
return;
napi_gro_flush(n);
local_irq_save(flags);
__napi_complete(n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(napi_complete);
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
INIT_LIST_HEAD(&napi->poll_list);
napi->gro_count = 0;
napi->poll = poll;
napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
}
EXPORT_SYMBOL(netif_napi_add);
void netif_napi_del(struct napi_struct *napi)
{
struct sk_buff *skb, *next;
list_del_init(&napi->dev_list);
for (skb = napi->gro_list; skb; skb = next) {
next = skb->next;
skb->next = NULL;
kfree_skb(skb);
}
napi->gro_list = NULL;
napi->gro_count = 0;
}
EXPORT_SYMBOL(netif_napi_del);
/*
* net_rps_action sends any pending IPI's for rps. This is only called from
* softirq and interrupts must be enabled.
*/
static void net_rps_action(cpumask_t *mask)
{
int cpu;
/* Send pending IPI's to kick RPS processing on remote cpus. */
for_each_cpu_mask_nr(cpu, *mask) {
struct softnet_data *queue = &per_cpu(softnet_data, cpu);
if (cpu_online(cpu))
__smp_call_function_single(cpu, &queue->csd, 0);
}
cpus_clear(*mask);
}
static void net_rx_action(struct softirq_action *h)
{
struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
int select;
struct rps_remote_softirq_cpus *rcpus;
while (!list_empty(list)) {
struct napi_struct *n;
int work, weight;
/* If softirq window is exhuasted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
goto softnet_break;
local_irq_enable();
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
n = list_first_entry(list, struct napi_struct, poll_list);
have = netpoll_poll_lock(n);
weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidently calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}
WARN_ON_ONCE(work > weight);
budget -= work;
local_irq_disable();
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) {
local_irq_enable();
napi_complete(n);
local_irq_disable();
} else
list_move_tail(&n->poll_list, list);
}
netpoll_poll_unlock(have);
rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
select = rcpus->select;
rcpus->select ^= 1;
local_irq_enable();
#else
local_irq_enable();
#endif
#ifdef CONFIG_NET_DMA
/*
* There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware
*/
dma_issue_pending_all();
return;
softnet_break:
__get_cpu_var(netdev_rx_stat).time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
static gifconf_func_t *gifconf_list[NPROTO];
/**
* register_gifconf - register a SIOCGIF handler
* @family: Address family
* @gifconf: Function handler
*
* Register protocol dependent address dumping routines. The handler
* that is passed must not be freed or reused until it has been replaced
* by another handler.
*/
int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
{
if (family >= NPROTO)
return -EINVAL;
gifconf_list[family] = gifconf;
return 0;
}
/*
* Map an interface index to its name (SIOCGIFNAME)
*/
/*
* We need this ioctl for efficient implementation of the
* if_indextoname() function required by the IPv6 API. Without
* it, we would have to search all the interfaces to find a
* match. --pb
*/
static int dev_ifname(struct net *net, struct ifreq __user *arg)
{
struct net_device *dev;
struct ifreq ifr;
/*
* Fetch the caller's info block.
*/
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
return -ENODEV;
}
strcpy(ifr.ifr_name, dev->name);
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
return -EFAULT;
return 0;
}
/*
* Perform a SIOCGIFCONF call. This structure will change
* size eventually, and there is nothing I can do about it.
* Thus we will need a 'compatibility mode'.
*/
static int dev_ifconf(struct net *net, char __user *arg)
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
{
struct ifconf ifc;
struct net_device *dev;
char __user *pos;
int len;
int total;
int i;
/*
* Fetch the caller's info block.
*/
if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
return -EFAULT;
pos = ifc.ifc_buf;
len = ifc.ifc_len;
/*
* Loop over the interfaces, and write an info block for each.
*/
total = 0;
for_each_netdev(net, dev) {
for (i = 0; i < NPROTO; i++) {
if (gifconf_list[i]) {
int done;
if (!pos)
done = gifconf_list[i](dev, NULL, 0);
else
done = gifconf_list[i](dev, pos + total,
len - total);
if (done < 0)
return -EFAULT;
total += done;
}
}
/*
* All done. Write the updated control block back to the caller.
*/
ifc.ifc_len = total;
/*
* Both BSD and Solaris return 0 here, so we do too.
*/
return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
}
#ifdef CONFIG_PROC_FS
/*
* This is invoked by the /proc filesystem handler to display a device
* in detail.
*/
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
struct net *net = seq_file_net(seq);
if (!*pos)
return SEQ_START_TOKEN;
for_each_netdev_rcu(net, dev)
if (off++ == *pos)
return dev;
}
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net_device *dev = (v == SEQ_START_TOKEN) ?
first_net_device(seq_file_net(seq)) :
next_net_device((struct net_device *)v);
return rcu_dereference(dev);
}
void dev_seq_stop(struct seq_file *seq, void *v)
}
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
const struct net_device_stats *stats = dev_get_stats(dev);
seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
stats->rx_fifo_errors,
stats->rx_length_errors + stats->rx_over_errors +
stats->rx_crc_errors + stats->rx_frame_errors,
stats->rx_compressed, stats->multicast,
stats->tx_bytes, stats->tx_packets,
stats->tx_errors, stats->tx_dropped,
stats->tx_fifo_errors, stats->collisions,
stats->tx_carrier_errors +
stats->tx_aborted_errors +
stats->tx_window_errors +
stats->tx_heartbeat_errors,
stats->tx_compressed);
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
}
/*
* Called from the PROCfs module. This now uses the new arbitrary sized
* /proc/net interface to create /proc/net/dev
*/
static int dev_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Inter-| Receive "
" | Transmit\n"
" face |bytes packets errs drop fifo frame "
"compressed multicast|bytes packets errs "
"drop fifo colls carrier compressed\n");
else
dev_seq_printf_stats(seq, v);
return 0;
}
static struct netif_rx_stats *softnet_get_online(loff_t *pos)
{
struct netif_rx_stats *rc = NULL;
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
rc = &per_cpu(netdev_rx_stat, *pos);
break;
} else
++*pos;
return rc;
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
{
return softnet_get_online(pos);
}
static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return softnet_get_online(pos);
}
static void softnet_seq_stop(struct seq_file *seq, void *v)
{
}
static int softnet_seq_show(struct seq_file *seq, void *v)
{
struct netif_rx_stats *s = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
s->total, s->dropped, s->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = dev_seq_show,
};
static int dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_seq_ops,
sizeof(struct seq_net_private));
static const struct file_operations dev_seq_fops = {
.owner = THIS_MODULE,
.open = dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
static const struct seq_operations softnet_seq_ops = {
.start = softnet_seq_start,
.next = softnet_seq_next,
.stop = softnet_seq_stop,
.show = softnet_seq_show,
};
static int softnet_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &softnet_seq_ops);
}
static const struct file_operations softnet_seq_fops = {
.owner = THIS_MODULE,
.open = softnet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *ptype_get_idx(loff_t pos)
{
struct packet_type *pt = NULL;
loff_t i = 0;
int t;
list_for_each_entry_rcu(pt, &ptype_all, list) {
if (i == pos)
return pt;
++i;
}
for (t = 0; t < PTYPE_HASH_SIZE; t++) {
list_for_each_entry_rcu(pt, &ptype_base[t], list) {
if (i == pos)
return pt;
++i;
}
}
return NULL;
}
static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
{
rcu_read_lock();
return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct packet_type *pt;
struct list_head *nxt;
int hash;
++*pos;
if (v == SEQ_START_TOKEN)
return ptype_get_idx(0);
pt = v;
nxt = pt->list.next;
if (pt->type == htons(ETH_P_ALL)) {
if (nxt != &ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
} else
hash = ntohs(pt->type) & PTYPE_HASH_MASK;
while (nxt == &ptype_base[hash]) {
if (++hash >= PTYPE_HASH_SIZE)
return NULL;
nxt = ptype_base[hash].next;
}
found:
return list_entry(nxt, struct packet_type, list);
}
static void ptype_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ptype_seq_show(struct seq_file *seq, void *v)
{
struct packet_type *pt = v;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else
seq_printf(seq, "%04x", ntohs(pt->type));
seq_printf(seq, " %-8s %pF\n",
pt->dev ? pt->dev->name : "", pt->func);
}
return 0;
}
static const struct seq_operations ptype_seq_ops = {
.start = ptype_seq_start,
.next = ptype_seq_next,
.stop = ptype_seq_stop,
.show = ptype_seq_show,
};
static int ptype_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ptype_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations ptype_seq_fops = {
.owner = THIS_MODULE,
.open = ptype_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
static int __net_init dev_proc_net_init(struct net *net)
if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
if (wext_proc_init(net))
proc_net_remove(net, "ptype");
proc_net_remove(net, "softnet_stat");
proc_net_remove(net, "dev");
static void __net_exit dev_proc_net_exit(struct net *net)
{
wext_proc_exit(net);
proc_net_remove(net, "ptype");
proc_net_remove(net, "softnet_stat");
proc_net_remove(net, "dev");
}
static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init,
.exit = dev_proc_net_exit,
};
static int __init dev_proc_init(void)
{
return register_pernet_subsys(&dev_proc_ops);
}
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
#else
#define dev_proc_init() 0
#endif /* CONFIG_PROC_FS */
/**
* netdev_set_master - set up master/slave pair
* @slave: slave device
* @master: new master device
*
* Changes the master device of the slave. Pass %NULL to break the
* bonding. The caller must hold the RTNL semaphore. On a failure
* a negative errno code is returned. On success the reference counts
* are adjusted, %RTM_NEWLINK is sent to the routing socket and the
* function returns zero.
*/
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
struct net_device *old = slave->master;
ASSERT_RTNL();
if (master) {
if (old)
return -EBUSY;
dev_hold(master);
}
slave->master = master;
if (master)
slave->flags |= IFF_SLAVE;
else
slave->flags &= ~IFF_SLAVE;
rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
return 0;
}
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
ops->ndo_change_rx_flags(dev, flags);
static int __dev_set_promiscuity(struct net_device *dev, int inc)
uid_t uid;
gid_t gid;
dev->flags |= IFF_PROMISC;
dev->promiscuity += inc;
if (dev->promiscuity == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch promisc and return error.
*/
if (inc < 0)
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
printk(KERN_WARNING "%s: promiscuity touches roof, "
"set promiscuity failed, promiscuity feature "
"of device might be broken.\n", dev->name);
return -EOVERFLOW;
}
}
if (dev->flags != old_flags) {
printk(KERN_INFO "device %s %s promiscuous mode\n",
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(current->audit_context, GFP_ATOMIC,
AUDIT_ANOM_PROMISCUOUS,
"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
dev->name, (dev->flags & IFF_PROMISC),
(old_flags & IFF_PROMISC),
audit_get_loginuid(current),
uid, gid,
audit_get_sessionid(current));
dev_change_rx_flags(dev, IFF_PROMISC);
/**
* dev_set_promiscuity - update promiscuity count on a device
* @dev: device
* @inc: modifier
*
* Add or remove promiscuity from a device. While the count in the device
* remains above zero the interface remains promiscuous. Once it hits zero
* the device reverts back to normal filtering operation. A negative inc
* value is used to drop promiscuity on the device.
* Return 0 if successful or a negative errno code on error.
int dev_set_promiscuity(struct net_device *dev, int inc)
{
unsigned short old_flags = dev->flags;
err = __dev_set_promiscuity(dev, inc);