Newer
Older
* Return 0 if successful or a negative errno code on error.
int dev_set_allmulti(struct net_device *dev, int inc)
dev->allmulti += inc;
if (dev->allmulti == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch allmulti and return error.
*/
if (inc < 0)
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
printk(KERN_WARNING "%s: allmulti touches roof, "
"set allmulti failed, allmulti feature of "
"device might be broken.\n", dev->name);
return -EOVERFLOW;
}
}
if (dev->flags ^ old_flags) {
dev_change_rx_flags(dev, IFF_ALLMULTI);
/*
* Upload unicast and multicast address lists to device and
* configure RX filtering. When the device doesn't support unicast
* filtering it is put in promiscuous mode while unicast addresses
* are present.
*/
void __dev_set_rx_mode(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
/* dev_open will call this function so the list will stay sane. */
if (!(dev->flags&IFF_UP))
return;
if (!netif_device_present(dev))
if (ops->ndo_set_rx_mode)
ops->ndo_set_rx_mode(dev);
else {
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
__dev_set_promiscuity(dev, 1);
dev->uc_promisc = 1;
} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
__dev_set_promiscuity(dev, -1);
dev->uc_promisc = 0;
}
if (ops->ndo_set_multicast_list)
ops->ndo_set_multicast_list(dev);
}
}
void dev_set_rx_mode(struct net_device *dev)
{
netif_addr_lock_bh(dev);
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
/**
* dev_get_flags - get flags reported to userspace
* @dev: device
*
* Get the combination of flag bits exported through APIs to userspace.
*/
unsigned dev_get_flags(const struct net_device *dev)
{
unsigned flags;
flags = (dev->flags & ~(IFF_PROMISC |
IFF_ALLMULTI |
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
(dev->gflags & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev)) {
if (netif_oper_up(dev))
flags |= IFF_RUNNING;
if (netif_carrier_ok(dev))
flags |= IFF_LOWER_UP;
if (netif_dormant(dev))
flags |= IFF_DORMANT;
}
int __dev_change_flags(struct net_device *dev, unsigned int flags)
/*
* Set the flags on our device.
*/
dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
IFF_AUTOMEDIA)) |
(dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
IFF_ALLMULTI));
/*
* Load in the correct multicast list now the flags have changed.
*/
if ((old_flags ^ flags) & IFF_MULTICAST)
dev_change_rx_flags(dev, IFF_MULTICAST);
/*
* Have we downed the interface. We handle IFF_UP ourselves
* according to user attempts to set it, rather than blindly
* setting it.
*/
ret = 0;
if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
}
if ((flags ^ dev->gflags) & IFF_PROMISC) {
int inc = (flags & IFF_PROMISC) ? 1 : -1;
dev->gflags ^= IFF_PROMISC;
dev_set_promiscuity(dev, inc);
}
/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
is important. Some (broken) drivers set IFF_PROMISC, when
IFF_ALLMULTI is requested not asking us and not reporting.
*/
if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
dev->gflags ^= IFF_ALLMULTI;
dev_set_allmulti(dev, inc);
}
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
return ret;
}
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
{
unsigned int changes = dev->flags ^ old_flags;
if (changes & IFF_UP) {
if (dev->flags & IFF_UP)
call_netdevice_notifiers(NETDEV_UP, dev);
else
call_netdevice_notifiers(NETDEV_DOWN, dev);
}
if (dev->flags & IFF_UP &&
(changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
call_netdevice_notifiers(NETDEV_CHANGE, dev);
}
/**
* dev_change_flags - change device settings
* @dev: device
* @flags: device state flags
*
* Change settings on device based state flags. The flags are
* in the userspace exported format.
*/
int dev_change_flags(struct net_device *dev, unsigned flags)
{
int ret, changes;
int old_flags = dev->flags;
ret = __dev_change_flags(dev, flags);
if (ret < 0)
return ret;
changes = old_flags ^ dev->flags;
if (changes)
rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
__dev_notify_flags(dev, old_flags);
/**
* dev_set_mtu - Change maximum transfer unit
* @dev: device
* @new_mtu: new transfer unit
*
* Change the maximum transfer size of the network device.
*/
int dev_set_mtu(struct net_device *dev, int new_mtu)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
if (new_mtu == dev->mtu)
return 0;
/* MTU must be positive. */
if (new_mtu < 0)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
err = 0;
if (ops->ndo_change_mtu)
err = ops->ndo_change_mtu(dev, new_mtu);
call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
*
* Change the hardware (MAC) address of the device
*/
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (!ops->ndo_set_mac_address)
return -EOPNOTSUPP;
if (sa->sa_family != dev->type)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
err = ops->ndo_set_mac_address(dev, sa);
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
* Perform the SIOCxIFxxx calls, inside rcu_read_lock()
static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
if (!dev)
return -ENODEV;
switch (cmd) {
case SIOCGIFFLAGS: /* Get interface flags */
ifr->ifr_flags = (short) dev_get_flags(dev);
return 0;
case SIOCGIFMETRIC: /* Get the metric on the interface
(currently unused) */
ifr->ifr_metric = 0;
return 0;
case SIOCGIFMTU: /* Get the MTU of a device */
ifr->ifr_mtu = dev->mtu;
return 0;
case SIOCGIFHWADDR:
if (!dev->addr_len)
memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
else
memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
ifr->ifr_hwaddr.sa_family = dev->type;
return 0;
case SIOCGIFSLAVE:
err = -EINVAL;
break;
case SIOCGIFMAP:
ifr->ifr_map.mem_start = dev->mem_start;
ifr->ifr_map.mem_end = dev->mem_end;
ifr->ifr_map.base_addr = dev->base_addr;
ifr->ifr_map.irq = dev->irq;
ifr->ifr_map.dma = dev->dma;
ifr->ifr_map.port = dev->if_port;
return 0;
case SIOCGIFINDEX:
ifr->ifr_ifindex = dev->ifindex;
return 0;
case SIOCGIFTXQLEN:
ifr->ifr_qlen = dev->tx_queue_len;
return 0;
default:
/* dev_ioctl() should ensure this case
* is never reached
*/
WARN_ON(1);
err = -EINVAL;
break;
}
return err;
}
/*
* Perform the SIOCxIFxxx calls, inside rtnl_lock()
*/
static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
{
int err;
struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
if (!dev)
return -ENODEV;
case SIOCSIFFLAGS: /* Set interface flags */
return dev_change_flags(dev, ifr->ifr_flags);
case SIOCSIFMETRIC: /* Set the metric on the interface
(currently unused) */
return -EOPNOTSUPP;
case SIOCSIFMTU: /* Set the MTU of a device */
return dev_set_mtu(dev, ifr->ifr_mtu);
case SIOCSIFHWADDR:
return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
case SIOCSIFHWBROADCAST:
if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return 0;
case SIOCSIFMAP:
if (ops->ndo_set_config) {
return ops->ndo_set_config(dev, &ifr->ifr_map);
}
return -EOPNOTSUPP;
case SIOCADDMULTI:
if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCDELMULTI:
if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCSIFTXQLEN:
if (ifr->ifr_qlen < 0)
return -EINVAL;
dev->tx_queue_len = ifr->ifr_qlen;
return 0;
case SIOCSIFNAME:
ifr->ifr_newname[IFNAMSIZ-1] = '\0';
return dev_change_name(dev, ifr->ifr_newname);
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
/*
* Unknown or private ioctl
*/
default:
if ((cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15) ||
cmd == SIOCBONDENSLAVE ||
cmd == SIOCBONDRELEASE ||
cmd == SIOCBONDSETHWADDR ||
cmd == SIOCBONDSLAVEINFOQUERY ||
cmd == SIOCBONDINFOQUERY ||
cmd == SIOCBONDCHANGEACTIVE ||
cmd == SIOCGMIIPHY ||
cmd == SIOCGMIIREG ||
cmd == SIOCSMIIREG ||
cmd == SIOCBRADDIF ||
cmd == SIOCBRDELIF ||
cmd == SIOCSHWTSTAMP ||
cmd == SIOCWANDEV) {
err = -EOPNOTSUPP;
if (ops->ndo_do_ioctl) {
if (netif_device_present(dev))
err = ops->ndo_do_ioctl(dev, ifr, cmd);
else
err = -ENODEV;
}
} else
err = -EINVAL;
}
return err;
}
/*
* This function handles all "interface"-type I/O control requests. The actual
* 'doing' part of this is dev_ifsioc above.
*/
/**
* dev_ioctl - network device ioctl
* @net: the applicable net namespace
* @cmd: command to issue
* @arg: pointer to a struct ifreq in user space
*
* Issue ioctl functions to devices. This is normally called by the
* user space syscall interfaces but can sometimes be useful for
* other purposes. The return value is the return from the syscall if
* positive or a negative errno code on error.
*/
int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct ifreq ifr;
int ret;
char *colon;
/* One special case: SIOCGIFCONF takes ifconf argument
and requires shared lock, because it sleeps writing
to user space.
*/
if (cmd == SIOCGIFCONF) {
ret = dev_ifconf(net, (char __user *) arg);
return dev_ifname(net, (struct ifreq __user *)arg);
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
ifr.ifr_name[IFNAMSIZ-1] = 0;
colon = strchr(ifr.ifr_name, ':');
if (colon)
*colon = 0;
/*
* See which interface the caller is talking about.
*/
switch (cmd) {
/*
* These ioctl calls:
* - can be done by all.
* - atomic and do not require locking.
* - return a value
*/
case SIOCGIFFLAGS:
case SIOCGIFMETRIC:
case SIOCGIFMTU:
case SIOCGIFHWADDR:
case SIOCGIFSLAVE:
case SIOCGIFMAP:
case SIOCGIFINDEX:
case SIOCGIFTXQLEN:
dev_load(net, ifr.ifr_name);
if (!ret) {
if (colon)
*colon = ':';
if (copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
return ret;
case SIOCETHTOOL:
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ethtool(net, &ifr);
rtnl_unlock();
if (!ret) {
if (colon)
*colon = ':';
if (copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
return ret;
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
/*
* These ioctl calls:
* - require superuser power.
* - require strict serialization.
* - return a value
*/
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSIFNAME:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, &ifr, cmd);
rtnl_unlock();
if (!ret) {
if (colon)
*colon = ':';
if (copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
return ret;
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
/*
* These ioctl calls:
* - require superuser power.
* - require strict serialization.
* - do not return a value
*/
case SIOCSIFFLAGS:
case SIOCSIFMETRIC:
case SIOCSIFMTU:
case SIOCSIFMAP:
case SIOCSIFHWADDR:
case SIOCSIFSLAVE:
case SIOCADDMULTI:
case SIOCDELMULTI:
case SIOCSIFHWBROADCAST:
case SIOCSIFTXQLEN:
case SIOCSMIIREG:
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
case SIOCBRADDIF:
case SIOCBRDELIF:
case SIOCSHWTSTAMP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* fall through */
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, &ifr, cmd);
rtnl_unlock();
return ret;
case SIOCGIFMEM:
/* Get the per device memory space. We can add this but
* currently do not support it */
case SIOCSIFMEM:
/* Set the per device memory buffer space.
* Not applicable in our case */
case SIOCSIFLINK:
return -EINVAL;
/*
* Unknown or private ioctl.
*/
default:
if (cmd == SIOCWANDEV ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
dev_load(net, ifr.ifr_name);
ret = dev_ifsioc(net, &ifr, cmd);
if (!ret && copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
/* Take care of Wireless Extensions */
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
return wext_handle_ioctl(net, &ifr, cmd, arg);
return -EINVAL;
}
}
/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
* Returns a suitable unique value for a new device interface
* number. The caller must hold the rtnl semaphore or the
* dev_base_lock to be sure it remains unique.
*/
static int dev_new_index(struct net *net)
{
static int ifindex;
for (;;) {
if (++ifindex <= 0)
ifindex = 1;
if (!__dev_get_by_index(net, ifindex))
return ifindex;
}
}
/* Delayed registration/unregisteration */
static LIST_HEAD(net_todo_list);
static void net_set_todo(struct net_device *dev)
{
list_add_tail(&dev->todo_list, &net_todo_list);
}
static void rollback_registered_many(struct list_head *head)
struct net_device *dev, *tmp;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
* for initialization unwind. Remove those
* devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
list_del(&dev->unreg_list);
continue;
BUG_ON(dev->reg_state != NETREG_REGISTERED);
/* If device is running, close it first. */
dev_close(dev);
/* And unlink it from device chain. */
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
}
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
/* Shutdown queueing discipline. */
dev_shutdown(dev);
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/*
* Flush the unicast and multicast chains
*/
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
/* Notifier chain MUST detach us from master device. */
WARN_ON(dev->master);
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
}
/* Process any work delayed until the end of the batch */
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
synchronize_net();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
}
static void rollback_registered(struct net_device *dev)
{
LIST_HEAD(single);
list_add(&dev->unreg_list, &single);
rollback_registered_many(&single);
static void __netdev_init_queue_locks_one(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
spin_lock_init(&dev_queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
dev_queue->xmit_lock_owner = -1;
}
static void netdev_init_queue_locks(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
__netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
}
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
unsigned long netdev_fix_features(unsigned long features, const char *name)
{
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
if (name)
printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
"checksum feature.\n", name);
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
if (name)
printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
"SG feature.\n", name);
features &= ~NETIF_F_TSO;
}
if (features & NETIF_F_UFO) {
if (!(features & NETIF_F_GEN_CSUM)) {
if (name)
printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
"since no NETIF_F_HW_CSUM feature.\n",
name);
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
if (name)
printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
"since no NETIF_F_SG feature.\n", name);
features &= ~NETIF_F_UFO;
}
}
return features;
}
EXPORT_SYMBOL(netdev_fix_features);

Patrick Mullaney
committed
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
/**
* netif_stacked_transfer_operstate - transfer operstate
* @rootdev: the root or lower level device to transfer state from
* @dev: the device to transfer operstate to
*
* Transfer operational state from root to device. This is normally
* called when a stacking relationship exists between the root
* device and the device(a leaf device).
*/
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev)
{
if (rootdev->operstate == IF_OPER_DORMANT)
netif_dormant_on(dev);
else
netif_dormant_off(dev);
if (netif_carrier_ok(rootdev)) {
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
} else {
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
}
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
/**
* register_netdevice - register a network device
* @dev: device to register
*
* Take a completed network device structure and add it to the kernel
* interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
* chain. 0 is returned on success. A negative errno code is returned
* on a failure to set up the device, or if the name is a duplicate.
*
* Callers must hold the rtnl semaphore. You may want
* register_netdev() instead of this.
*
* BUGS:
* The locking appears insufficient to guarantee two parallel registers
* will not get the same name.
*/
int register_netdevice(struct net_device *dev)
{
int ret;
struct net *net = dev_net(dev);
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
might_sleep();
/* When net_device's are persistent, this will be fatal. */
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
spin_lock_init(&dev->addr_list_lock);
netdev_set_addr_lockdep_class(dev);
netdev_init_queue_locks(dev);
if (!dev->num_rx_queues) {
/*
* Allocate a single RX queue if driver never called
* alloc_netdev_mq
*/
dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
if (!dev->_rx) {
ret = -ENOMEM;
goto out;
}
dev->_rx->first = dev->_rx;
atomic_set(&dev->_rx->count, 1);
dev->num_rx_queues = 1;
}
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
ret = dev_get_valid_name(net, dev->name, dev->name, 0);
if (ret)
dev->ifindex = dev_new_index(net);
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
/* Fix illegal checksum combinations */
if ((dev->features & NETIF_F_HW_CSUM) &&
(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
dev->name);
dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
if ((dev->features & NETIF_F_NO_CSUM) &&
(dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
dev->name);
dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
}
dev->features = netdev_fix_features(dev->features, dev->name);
/* Enable software GSO if SG is supported. */
if (dev->features & NETIF_F_SG)
dev->features |= NETIF_F_GSO;
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
goto err_uninit;
ret = netdev_register_kobject(dev);
if (ret)
dev->reg_state = NETREG_REGISTERED;
/*
* Default initial state at registry is that the
* device is present.
*/
set_bit(__LINK_STATE_PRESENT, &dev->state);
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
dev->reg_state = NETREG_UNREGISTERED;
}
/*
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
/**
* init_dummy_netdev - init a dummy network device for NAPI
* @dev: device to init
*
* This takes a network device structure and initialize the minimum
* amount of fields so it can be used to schedule NAPI polls without
* registering a full blown interface. This is to be used by drivers
* that need to tie several hardware interfaces to a single NAPI
* poll scheduler due to HW limitations.
*/
int init_dummy_netdev(struct net_device *dev)
{
/* Clear everything. Note we don't initialize spinlocks
* are they aren't supposed to be taken by any of the
* NAPI code and this dummy netdev is supposed to be
* only ever used for NAPI polls
*/
memset(dev, 0, sizeof(struct net_device));
/* make sure we BUG if trying to hit standard
* register/unregister code path
*/
dev->reg_state = NETREG_DUMMY;
/* initialize the ref count */
atomic_set(&dev->refcnt, 1);
/* NAPI wants this */
INIT_LIST_HEAD(&dev->napi_list);
/* a dummy interface is started by default */
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);
/**
* register_netdev - register a network device
* @dev: device to register
*