blob: 1f466e82ac339359ffa0587f0e1e1f324c0f5dd7 [file] [log] [blame]
/*
* NET3 Protocol independent device support routines.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Derived from the non IP parts of dev.c 1.0.19
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
*
* Additional Authors:
* Florian la Roche <rzsfl@rz.uni-sb.de>
* Alan Cox <gw4pts@gw4pts.ampr.org>
* David Hinds <dahinds@users.sourceforge.net>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
* Adam Sulmicki <adam@cfar.umd.edu>
* Pekka Riikonen <priikone@poesidon.pspt.fi>
*
* Changes:
* D.J. Barrow : Fixed bug where dev->refcnt gets set
* to 2 if register_netdev gets called
* before net_dev_init & also removed a
* few lines of code in the process.
* Alan Cox : device private ioctl copies fields back.
* Alan Cox : Transmit queue code does relevant
* stunts to keep the queue safe.
* Alan Cox : Fixed double lock.
* Alan Cox : Fixed promisc NULL pointer trap
* ???????? : Support the full private ioctl range
* Alan Cox : Moved ioctl permission check into
* drivers
* Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
* Alan Cox : 100 backlog just doesn't cut it when
* you start doing multicast video 8)
* Alan Cox : Rewrote net_bh and list manager.
* Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Took out transmit every packet pass
* Saved a few bytes in the ioctl handler
* Alan Cox : Network driver sets packet type before
* calling netif_rx. Saves a function
* call a packet.
* Alan Cox : Hashed net_bh()
* Richard Kooijman: Timestamp fixes.
* Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Device lock protection.
* Alan Cox : Fixed nasty side effect of device close
* changes.
* Rudi Cilibrasi : Pass the right thing to
* set_mac_address()
* Dave Miller : 32bit quantity for the device lock to
* make it work out on a Sparc.
* Bjorn Ekwall : Added KERNELD hack.
* Alan Cox : Cleaned up the backlog initialise.
* Craig Metz : SIOCGIFCONF fix if space for under
* 1 device.
* Thomas Bogendoerfer : Return ENODEV for dev_open, if there
* is no device open function.
* Andi Kleen : Fix error reporting for SIOCGIFCONF
* Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
* Cyrus Durgin : Cleaned for KMOD
* Adam Sulmicki : Bug Fix : Network Device Unload
* A network device unload needs to purge
* the backlog queue.
* Paul Rusty Russell : SIOCSIFNAME
* Pekka Riikonen : Netdev boot-time settings code
* Andrew Morton : Make unregister_netdevice wait
* indefinitely on dev->refcnt
* J Hadi Salim : - Backlog queue sampling
* - netif_rx() feedback
*/
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/if_bridge.h>
#include <linux/if_macvlan.h>
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <net/wext.h>
#include <net/iw_handler.h>
#include <asm/current.h>
#include <linux/audit.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
#include <linux/pci.h>
#include "net-sysfs.h"
/* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
*
* Why 16. Because with 16 the only overlap we get on a hash of the
* low nibble of the protocol value is RARP/SNAP/X.25.
*
* NOTE: That is no longer true with the addition of VLAN tags. Not
* sure which should go first, but I bet it won't make much
* difference if we are running VLANs. The good news is that
* this protocol won't be in the list unless compiled in, so
* the average user (w/out VLANs) will not be adversely affected.
* --BLG
*
* 0800 IP
* 8100 802.1Q VLAN
* 0001 802.3
* 0002 AX.25
* 0004 802.2
* 8035 RARP
* 0005 SNAP
* 0805 X.25
* 0806 ARP
* 8137 IPX
* 0009 Localtalk
* 86DD IPv6
*/
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
static DEFINE_SPINLOCK(ptype_lock);
static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
static struct list_head ptype_all __read_mostly; /* Taps */
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
* semaphore.
*
* Pure readers hold dev_base_lock for reading, or rcu_read_lock()
*
* Writers must hold the rtnl semaphore while they loop through the
* dev_base_head list, and hold dev_base_lock for writing when they do the
* actual updates. This allows pure readers to access the list even
* while a writer is preparing to update it.
*
* To put it another way, dev_base_lock is held for writing only to
* protect against pure readers; the rtnl semaphore provides the
* protection against other writers.
*
* See, for example usages, register_netdevice() and
* unregister_netdevice(), which must be called with the rtnl
* semaphore held.
*/
DEFINE_RWLOCK(dev_base_lock);
EXPORT_SYMBOL(dev_base_lock);
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{
unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
}
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
{
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
spin_lock(&sd->input_pkt_queue.lock);
#endif
}
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
spin_unlock(&sd->input_pkt_queue.lock);
#endif
}
/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
struct net *net = dev_net(dev);
ASSERT_RTNL();
write_lock_bh(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock_bh(&dev_base_lock);
return 0;
}
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
static void unlist_netdevice(struct net_device *dev)
{
ASSERT_RTNL();
/* Unlink dev from the device chain */
write_lock_bh(&dev_base_lock);
list_del_rcu(&dev->dev_list);
hlist_del_rcu(&dev->name_hlist);
hlist_del_rcu(&dev->index_hlist);
write_unlock_bh(&dev_base_lock);
}
/*
* Our notifier list
*/
static RAW_NOTIFIER_HEAD(netdev_chain);
/*
* Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler.
*/
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type
*/
static const unsigned short netdev_lock_type[] =
{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] =
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
"_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
"_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
"_xmit_PHONET_PIPE", "_xmit_IEEE802154",
"_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
int i;
for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
if (netdev_lock_type[i] == dev_type)
return i;
/* the last key is used by default */
return ARRAY_SIZE(netdev_lock_type) - 1;
}
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
int i;
i = netdev_lock_pos(dev_type);
lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
netdev_lock_name[i]);
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
int i;
i = netdev_lock_pos(dev->type);
lockdep_set_class_and_name(&dev->addr_list_lock,
&netdev_addr_lock_key[i],
netdev_lock_name[i]);
}
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
}
#endif
/*******************************************************************************
Protocol management and registration routines
*******************************************************************************/
/*
* Add a protocol ID to the list. Now that the input handler is
* smarter we can dispense with all the messy stuff that used to be
* here.
*
* BEWARE!!! Protocol handlers, mangling input packets,
* MUST BE last in hash buckets and checking protocol handlers
* MUST start from promiscuous ptype_all chain in net_bh.
* It is true now, do not change it.
* Explanation follows: if protocol handler, mangling packet, will
* be the first on list, it is not able to sense, that packet
* is cloned and should be copied-on-write, so that it will
* change it and subsequent readers will get broken packet.
* --ANK (980803)
*/
/**
* dev_add_pack - add packet handler
* @pt: packet type declaration
*
* Add a protocol handler to the networking stack. The passed &packet_type
* is linked into kernel lists and may not be freed until it has been
* removed from the kernel lists.
*
* This call does not sleep therefore it can not
* guarantee all CPU's that are in middle of receiving packets
* will see the new packet type (until the next received packet).
*/
void dev_add_pack(struct packet_type *pt)
{
int hash;
spin_lock_bh(&ptype_lock);
if (pt->type == htons(ETH_P_ALL))
list_add_rcu(&pt->list, &ptype_all);
else {
hash = ntohs(pt->type) & PTYPE_HASH_MASK;
list_add_rcu(&pt->list, &ptype_base[hash]);
}
spin_unlock_bh(&ptype_lock);
}
EXPORT_SYMBOL(dev_add_pack);
/**
* __dev_remove_pack - remove packet handler
* @pt: packet type declaration
*
* Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function
* returns.
*
* The packet type might still be in use by receivers
* and must not be freed until after all the CPU's have gone
* through a quiescent state.
*/
void __dev_remove_pack(struct packet_type *pt)
{
struct list_head *head;
struct packet_type *pt1;
spin_lock_bh(&ptype_lock);
if (pt->type == htons(ETH_P_ALL))
head = &ptype_all;
else
head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
list_del_rcu(&pt->list);
goto out;
}
}
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
spin_unlock_bh(&ptype_lock);
}
EXPORT_SYMBOL(__dev_remove_pack);
/**
* dev_remove_pack - remove packet handler
* @pt: packet type declaration
*
* Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function
* returns.
*
* This call sleeps to guarantee that no CPU is looking at the packet
* type after return.
*/
void dev_remove_pack(struct packet_type *pt)
{
__dev_remove_pack(pt);
synchronize_net();
}
EXPORT_SYMBOL(dev_remove_pack);
/******************************************************************************
Device Boot-time Settings Routines
*******************************************************************************/
/* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
/**
* netdev_boot_setup_add - add new setup entry
* @name: name of the device
* @map: configured settings for the device
*
* Adds new setup entry to the dev_boot_setup list. The function
* returns 0 on error and 1 on success. This is a generic routine to
* all netdevices.
*/
static int netdev_boot_setup_add(char *name, struct ifmap *map)
{
struct netdev_boot_setup *s;
int i;
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
strlcpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
}
return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
}
/**
* netdev_boot_setup_check - check boot time settings
* @dev: the netdevice
*
* Check boot time settings for the device.
* The found settings are set for the device to be used
* later in the device probing.
* Returns 0 if no settings found, 1 if they are.
*/
int netdev_boot_setup_check(struct net_device *dev)
{
struct netdev_boot_setup *s = dev_boot_setup;
int i;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
!strcmp(dev->name, s[i].name)) {
dev->irq = s[i].map.irq;
dev->base_addr = s[i].map.base_addr;
dev->mem_start = s[i].map.mem_start;
dev->mem_end = s[i].map.mem_end;
return 1;
}
}
return 0;
}
EXPORT_SYMBOL(netdev_boot_setup_check);
/**
* netdev_boot_base - get address from boot time settings
* @prefix: prefix for network device
* @unit: id for network device
*
* Check boot time settings for the base address of device.
* The found settings are set for the device to be used
* later in the device probing.
* Returns 0 if no settings found.
*/
unsigned long netdev_boot_base(const char *prefix, int unit)
{
const struct netdev_boot_setup *s = dev_boot_setup;
char name[IFNAMSIZ];
int i;
sprintf(name, "%s%d", prefix, unit);
/*
* If device already registered then return base of 1
* to indicate not to probe for this interface
*/
if (__dev_get_by_name(&init_net, name))
return 1;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
if (!strcmp(name, s[i].name))
return s[i].map.base_addr;
return 0;
}
/*
* Saves at boot time configured settings for any netdevice.
*/
int __init netdev_boot_setup(char *str)
{
int ints[5];
struct ifmap map;
str = get_options(str, ARRAY_SIZE(ints), ints);
if (!str || !*str)
return 0;
/* Save settings */
memset(&map, 0, sizeof(map));
if (ints[0] > 0)
map.irq = ints[1];
if (ints[0] > 1)
map.base_addr = ints[2];
if (ints[0] > 2)
map.mem_start = ints[3];
if (ints[0] > 3)
map.mem_end = ints[4];
/* Add new entry to the list */
return netdev_boot_setup_add(str, &map);
}
__setup("netdev=", netdev_boot_setup);
/*******************************************************************************
Device Interface Subroutines
*******************************************************************************/
/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
* Find an interface by name. Must be called under RTNL semaphore
* or @dev_base_lock. If the name is found a pointer to the device
* is returned. If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
struct net_device *__dev_get_by_name(struct net *net, const char *name)
{
struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_name_hash(net, name);
hlist_for_each_entry(dev, p, head, name_hlist)
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
return NULL;
}
EXPORT_SYMBOL(__dev_get_by_name);
/**
* dev_get_by_name_rcu - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
* Find an interface by name.
* If the name is found a pointer to the device is returned.
* If the name is not found then %NULL is returned.
* The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_name_hash(net, name);
hlist_for_each_entry_rcu(dev, p, head, name_hlist)
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_get_by_name_rcu);
/**
* dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
* Find an interface by name. This can be called from any
* context and does its own locking. The returned handle has
* the usage count incremented and the caller must use dev_put() to
* release it when it is no longer needed. %NULL is returned if no
* matching device is found.
*/
struct net_device *dev_get_by_name(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
EXPORT_SYMBOL(dev_get_by_name);
/**
* __dev_get_by_index - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
* about locking. The caller must hold either the RTNL semaphore
* or @dev_base_lock.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
{
struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
hlist_for_each_entry(dev, p, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
return NULL;
}
EXPORT_SYMBOL(__dev_get_by_index);
/**
* dev_get_by_index_rcu - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
* about locking. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{
struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
hlist_for_each_entry_rcu(dev, p, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_get_by_index_rcu);
/**
* dev_get_by_index - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. Returns NULL if the device
* is not found or a pointer to the device. The device returned has
* had a reference added and the pointer is safe until the user calls
* dev_put to indicate they have finished with it.
*/
struct net_device *dev_get_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
EXPORT_SYMBOL(dev_get_by_index);
/**
* dev_getbyhwaddr - find a device by its hardware address
* @net: the applicable net namespace
* @type: media type of device
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
* is not found or a pointer to the device. The caller must hold the
* rtnl semaphore. The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
* BUGS:
* If the API was consistent this would be __dev_get_by_hwaddr
*/
struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
{
struct net_device *dev;
ASSERT_RTNL();
for_each_netdev(net, dev)
if (dev->type == type &&
!memcmp(dev->dev_addr, ha, dev->addr_len))
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_getbyhwaddr);
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
struct net_device *dev;
ASSERT_RTNL();
for_each_netdev(net, dev)
if (dev->type == type)
return dev;
return NULL;
}
EXPORT_SYMBOL(__dev_getfirstbyhwtype);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
struct net_device *dev, *ret = NULL;
rcu_read_lock();
for_each_netdev_rcu(net, dev)
if (dev->type == type) {
dev_hold(dev);
ret = dev;
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
* dev_get_by_flags - find any device with given flags
* @net: the applicable net namespace
* @if_flags: IFF_* values
* @mask: bitmask of bits in if_flags to check
*
* Search for any interface with the given flags. Returns NULL if a device
* is not found or a pointer to the device. The device returned has
* had a reference added and the pointer is safe until the user calls
* dev_put to indicate they have finished with it.
*/
struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
unsigned short mask)
{
struct net_device *dev, *ret;
ret = NULL;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (((dev->flags ^ if_flags) & mask) == 0) {
dev_hold(dev);
ret = dev;
break;
}
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(dev_get_by_flags);
/**
* dev_valid_name - check if name is okay for network device
* @name: name string
*
* Network device names need to be valid file names to
* to allow sysfs to work. We also disallow any kind of
* whitespace.
*/
int dev_valid_name(const char *name)
{
if (*name == '\0')
return 0;
if (strlen(name) >= IFNAMSIZ)
return 0;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return 0;
while (*name) {
if (*name == '/' || isspace(*name))
return 0;
name++;
}
return 1;
}
EXPORT_SYMBOL(dev_valid_name);
/**
* __dev_alloc_name - allocate a name for a device
* @net: network namespace to allocate the device name in
* @name: name format string
* @buf: scratch buffer and result name string
*
* Passed a format string - eg "lt%d" it will try and find a suitable
* id. It scans list of devices to build up a free map, then chooses
* the first empty slot. The caller must hold the dev_base or rtnl lock
* while allocating the name and adding the device in order to avoid
* duplicates.
* Limited to bits_per_byte * page size devices (ie 32K on most platforms).
* Returns the number of the unit assigned or a negative errno code.
*/
static int __dev_alloc_name(struct net *net, const char *name, char *buf)
{
int i = 0;
const char *p;
const int max_netdevices = 8*PAGE_SIZE;
unsigned long *inuse;
struct net_device *d;
p = strnchr(name, IFNAMSIZ-1, '%');
if (p) {
/*
* Verify the string as this thing may have come from
* the user. There must be either one "%d" and no other "%"
* characters.
*/
if (p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
/* Use one page as a bit array of possible slots */
inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
if (!inuse)
return -ENOMEM;
for_each_netdev(net, d) {
if (!sscanf(d->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
continue;
/* avoid cases where sscanf is not exact inverse of printf */
snprintf(buf, IFNAMSIZ, name, i);
if (!strncmp(buf, d->name, IFNAMSIZ))
set_bit(i, inuse);
}
i = find_first_zero_bit(inuse, max_netdevices);
free_page((unsigned long) inuse);
}
if (buf != name)
snprintf(buf, IFNAMSIZ, name, i);
if (!__dev_get_by_name(net, buf))
return i;
/* It is possible to run out of possible slots
* when the name is long and there isn't enough space left
* for the digits, or if all bits are used.
*/
return -ENFILE;
}
/**
* dev_alloc_name - allocate a name for a device
* @dev: device
* @name: name format string
*
* Passed a format string - eg "lt%d" it will try and find a suitable
* id. It scans list of devices to build up a free map, then chooses
* the first empty slot. The caller must hold the dev_base or rtnl lock
* while allocating the name and adding the device in order to avoid
* duplicates.
* Limited to bits_per_byte * page size devices (ie 32K on most platforms).
* Returns the number of the unit assigned or a negative errno code.
*/
int dev_alloc_name(struct net_device *dev, const char *name)
{
char buf[IFNAMSIZ];
struct net *net;
int ret;
BUG_ON(!dev_net(dev));
net = dev_net(dev);
ret = __dev_alloc_name(net, name, buf);
if (ret >= 0)
strlcpy(dev->name, buf, IFNAMSIZ);
return ret;
}
EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
{
struct net *net;
BUG_ON(!dev_net(dev));
net = dev_net(dev);
if (!dev_valid_name(name))
return -EINVAL;
if (fmt && strchr(name, '%'))
return dev_alloc_name(dev, name);
else if (__dev_get_by_name(net, name))
return -EEXIST;
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
return 0;
}
/**
* dev_change_name - change name of a device
* @dev: device
* @newname: name (or format string) must be at least IFNAMSIZ
*
* Change name of a device, can pass format strings "eth%d".
* for wildcarding.
*/
int dev_change_name(struct net_device *dev, const char *newname)
{
char oldname[IFNAMSIZ];
int err = 0;
int ret;
struct net *net;
ASSERT_RTNL();
BUG_ON(!dev_net(dev));
net = dev_net(dev);
if (dev->flags & IFF_UP)
return -EBUSY;
if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
return 0;
memcpy(oldname, dev->name, IFNAMSIZ);
err = dev_get_valid_name(dev, newname, 1);
if (err < 0)
return err;
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
return ret;
}
write_lock_bh(&dev_base_lock);
hlist_del(&dev->name_hlist);
write_unlock_bh(&dev_base_lock);
synchronize_rcu();
write_lock_bh(&dev_base_lock);
hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
write_unlock_bh(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
if (ret) {
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
printk(KERN_ERR
"%s: name change rollback failed: %d.\n",
dev->name, ret);
}
}
return err;
}
/**
* dev_set_alias - change ifalias of a device
* @dev: device
* @alias: name up to IFALIASZ
* @len: limit of bytes to copy from info
*
* Set ifalias for a device,
*/
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
ASSERT_RTNL();
if (len >= IFALIASZ)
return -EINVAL;
if (!len) {
if (dev->ifalias) {
kfree(dev->ifalias);
dev->ifalias = NULL;
}
return 0;
}
dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
if (!dev->ifalias)
return -ENOMEM;
strlcpy(dev->ifalias, alias, len+1);
return len;
}
/**
* netdev_features_change - device changes features
* @dev: device to cause notification
*
* Called to indicate a device has changed features.
*/
void netdev_features_change(struct net_device *dev)
{
call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
}
EXPORT_SYMBOL(netdev_features_change);
/**
* netdev_state_change - device changes state
* @dev: device to cause notification
*
* Called to indicate a device has changed state. This function calls
* the notifier chains for netdev_chain and sends a NEWLINK message
* to the routing socket.
*/
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
call_netdevice_notifiers(NETDEV_CHANGE, dev);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
}
}
EXPORT_SYMBOL(netdev_state_change);
int netdev_bonding_change(struct net_device *dev, unsigned long event)
{
return call_netdevice_notifiers(event, dev);
}
EXPORT_SYMBOL(netdev_bonding_change);
/**
* dev_load - load a network module
* @net: the applicable net namespace
* @name: name of interface
*
* If a network interface is not present and the process has suitable
* privileges this function loads the module. If module loading is not
* available in this kernel then it becomes a nop.
*/
void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
rcu_read_unlock();
if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
}
EXPORT_SYMBOL(dev_load);
static int __dev_open(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
int ret;
ASSERT_RTNL();
/*
* Is it even present?
*/
if (!netif_device_present(dev))
return -ENODEV;
ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
ret = notifier_to_errno(ret);
if (ret)
return ret;
/*
* Call device private open method
*/
set_bit(__LINK_STATE_START, &dev->state);
if (ops->ndo_validate_addr)
ret = ops->ndo_validate_addr(dev);
if (!ret && ops->ndo_open)
ret = ops->ndo_open(dev);
/*
* If it went open OK then:
*/
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
/*
* Set the flags.
*/
dev->flags |= IFF_UP;
/*
* Enable NET_DMA
*/
net_dmaengine_get();
/*
* Initialize multicasting status
*/
dev_set_rx_mode(dev);
/*
* Wakeup transmit queue engine
*/
dev_activate(dev);
}
return ret;
}
/**
* dev_open - prepare an interface for use.
* @dev: device to open
*
* Takes a device from down to up state. The device's private open
* function is invoked and then the multicast lists are loaded. Finally
* the device is moved into the up state and a %NETDEV_UP message is
* sent to the netdev notifier chain.
*
* Calling this function on an active interface is a nop. On a failure
* a negative errno code is returned.
*/
int dev_open(struct net_device *dev)
{
int ret;
/*
* Is it already up?
*/
if (dev->flags & IFF_UP)
return 0;
/*
* Open device
*/
ret = __dev_open(dev);
if (ret < 0)
return ret;
/*
* ... and announce new interface.
*/
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_UP, dev);
return ret;
}
EXPORT_SYMBOL(dev_open);
static int __dev_close(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
ASSERT_RTNL();
might_sleep();
/*
* Tell people we are going down, so that they can
* prepare to death, when device is still operating.
*/
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
/* Synchronize to scheduled poll. We cannot touch poll list,
* it can be even on different cpu. So just clear netif_running().
*
* dev->stop() will invoke napi_disable() on all of it's
* napi_struct instances on this device.
*/
smp_mb__after_clear_bit(); /* Commit netif_running(). */
dev_deactivate(dev);
/*
* Call the device specific close. This cannot fail.
* Only if device is UP
*
* We allow it to be called even after a DETACH hot-plug
* event.
*/
if (ops->ndo_stop)
ops->ndo_stop(dev);
/*
* Device is now down.
*/
dev->flags &= ~IFF_UP;
/*
* Shutdown NET_DMA
*/
net_dmaengine_put();
return 0;
}
/**
* dev_close - shutdown an interface.
* @dev: device to shutdown
*
* This function moves an active device into down state. A
* %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
* is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
* chain.
*/
int dev_close(struct net_device *dev)
{
if (!(dev->flags & IFF_UP))
return 0;
__dev_close(dev);
/*
* Tell people we are down
*/
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_DOWN, dev);
return 0;
}
EXPORT_SYMBOL(dev_close);
/**
* dev_disable_lro - disable Large Receive Offload on a device
* @dev: device
*
* Disable Large Receive Offload (LRO) on a net device. Must be
* called under RTNL. This is needed if received packets may be
* forwarded to another interface.
*/
void dev_disable_lro(struct net_device *dev)
{
if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
dev->ethtool_ops->set_flags) {
u32 flags = dev->ethtool_ops->get_flags(dev);
if (flags & ETH_FLAG_LRO) {
flags &= ~ETH_FLAG_LRO;
dev->ethtool_ops->set_flags(dev, flags);
}
}
WARN_ON(dev->features & NETIF_F_LRO);
}
EXPORT_SYMBOL(dev_disable_lro);
static int dev_boot_phase = 1;
/*
* Device change register/unregister. These are not inline or static
* as we export them to the world.
*/
/**
* register_netdevice_notifier - register a network notifier block
* @nb: notifier
*
* Register a notifier to be called when network device events occur.
* The notifier passed is linked into the kernel structures and must
* not be reused until it has been unregistered. A negative errno code
* is returned on a failure.
*
* When registered all registration and up events are replayed
* to the new notifier to allow device to have a race free
* view of the network device list.
*/
int register_netdevice_notifier(struct notifier_block *nb)
{
struct net_device *dev;
struct net_device *last;
struct net *net;
int err;
rtnl_lock();
err = raw_notifier_chain_register(&netdev_chain, nb);
if (err)
goto unlock;
if (dev_boot_phase)
goto unlock;
for_each_net(net) {
for_each_netdev(net, dev) {
err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
err = notifier_to_errno(err);
if (err)
goto rollback;
if (!(dev->flags & IFF_UP))
continue;
nb->notifier_call(nb, NETDEV_UP, dev);
}
}
unlock:
rtnl_unlock();
return err;
rollback:
last = dev;
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev == last)
break;
if (dev->flags & IFF_UP) {
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
nb->notifier_call(nb, NETDEV_DOWN, dev);
}
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
}
}
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
EXPORT_SYMBOL(register_netdevice_notifier);
/**
* unregister_netdevice_notifier - unregister a network notifier block
* @nb: notifier
*
* Unregister a notifier previously registered by
* register_netdevice_notifier(). The notifier is unlinked into the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
*/
int unregister_netdevice_notifier(struct notifier_block *nb)
{
int err;
rtnl_lock();
err = raw_notifier_chain_unregister(&netdev_chain, nb);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL(unregister_netdevice_notifier);
/**
* call_netdevice_notifiers - call all network notifier blocks
* @val: value passed unmodified to notifier function
* @dev: net_device pointer passed unmodified to notifier function
*
* Call all network notifier blocks. Parameters and return value
* are as for raw_notifier_call_chain().
*/
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
ASSERT_RTNL();
return raw_notifier_call_chain(&netdev_chain, val, dev);
}
/* When > 0 there are consumers of rx skb time stamps */
static atomic_t netstamp_needed = ATOMIC_INIT(0);
void net_enable_timestamp(void)
{
atomic_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
atomic_dec(&netstamp_needed);
}
EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
if (atomic_read(&netstamp_needed))
__net_timestamp(skb);
else
skb->tstamp.tv64 = 0;
}
static inline void net_timestamp_check(struct sk_buff *skb)
{
if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
__net_timestamp(skb);
}
/**
* dev_forward_skb - loopback an skb to another netif
*
* @dev: destination network device
* @skb: buffer to forward
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped, but freed)
*
* dev_forward_skb can be used for injecting an skb from the
* start_xmit function of one device into the receive queue
* of another device.
*
* The receiving device may be in another namespace, so
* we have to clear all information in the skb that could
* impact namespace isolation.
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
skb_orphan(skb);
nf_reset(skb);
if (!(dev->flags & IFF_UP) ||
(skb->len > (dev->mtu + dev->hard_header_len))) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
*/
static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
#ifdef CONFIG_NET_CLS_ACT
if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
net_timestamp_set(skb);
#else
net_timestamp_set(skb);
#endif
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
*/
if ((ptype->dev == dev || !ptype->dev) &&
(ptype->af_packet_priv == NULL ||
(struct sock *)ptype->af_packet_priv != skb->sk)) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
break;
/* skb->nh should be correctly
set by sender, so that the second statement is
just protection against buggy protocols.
*/
skb_reset_mac_header(skb2);
if (skb_network_header(skb2) < skb2->data ||
skb2->network_header > skb2->tail) {
if (net_ratelimit())
printk(KERN_CRIT "protocol %04x is "
"buggy, dev %s\n",
skb2->protocol, dev->name);
skb_reset_network_header(skb2);
}
skb2->transport_header = skb2->network_header;
skb2->pkt_type = PACKET_OUTGOING;
ptype->func(skb2, skb->dev, ptype, skb->dev);
}
}
rcu_read_unlock();
}
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
*/
void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
unsigned int real_num = dev->real_num_tx_queues;
if (unlikely(txq > dev->num_tx_queues))
;
else if (txq > real_num)
dev->real_num_tx_queues = txq;
else if (txq < real_num) {
dev->real_num_tx_queues = txq;
qdisc_reset_all_tx_gt(dev, txq);
}
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
q->next_sched = NULL;
*sd->output_queue_tailp = q;
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
void __netif_schedule(struct Qdisc *q)
{
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
__netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
void dev_kfree_skb_irq(struct sk_buff *skb)
{
if (atomic_dec_and_test(&skb->users)) {
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
skb->next = sd->completion_queue;
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
void dev_kfree_skb_any(struct sk_buff *skb)
{
if (in_irq() || irqs_disabled())
dev_kfree_skb_irq(skb);
else
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);
/**
* netif_device_detach - mark device as removed
* @dev: network device
*
* Mark device as removed from system and therefore no longer available.
*/
void netif_device_detach(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_stop_all_queues(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
/**
* netif_device_attach - mark device as attached
* @dev: network device
*
* Mark device as attached from system and restart if needed.
*/
void netif_device_attach(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_wake_all_queues(dev);
__netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_device_attach);
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
return ((features & NETIF_F_GEN_CSUM) ||
((features & NETIF_F_IP_CSUM) &&
protocol == htons(ETH_P_IP)) ||
((features & NETIF_F_IPV6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
}
static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
{
if (can_checksum_protocol(dev->features, skb->protocol))
return true;
if (skb->protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
if (can_checksum_protocol(dev->features & dev->vlan_features,
veh->h_vlan_encapsulated_proto))
return true;
}
return false;
}
/**
* skb_dev_set -- assign a new device to a buffer
* @skb: buffer for the new device
* @dev: network device
*
* If an skb is owned by a device already, we have to reset
* all data private to the namespace a device belongs to
* before assigning it a new device.
*/
#ifdef CONFIG_NET_NS
void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb_dst_drop(skb);
if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
secpath_reset(skb);
nf_reset(skb);
skb_init_secmark(skb);
skb->mark = 0;
skb->priority = 0;
skb->nf_trace = 0;
skb->ipvs_property = 0;
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
#endif
}
skb->dev = dev;
}
EXPORT_SYMBOL(skb_set_dev);
#endif /* CONFIG_NET_NS */
/*
* Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path.
*/
int skb_checksum_help(struct sk_buff *skb)
{
__wsum csum;
int ret = 0, offset;
if (skb->ip_summed == CHECKSUM_COMPLETE)
goto out_set_summed;
if (unlikely(skb_shinfo(skb)->gso_size)) {
/* Let GSO fix up the checksum. */
goto out_set_summed;
}
offset = skb->csum_start - skb_headroom(skb);
BUG_ON(offset >= skb_headlen(skb));
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
if (skb_cloned(skb) &&
!skb_clone_writable(skb, offset + sizeof(__sum16))) {
ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (ret)
goto out;
}
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
return ret;
}
EXPORT_SYMBOL(skb_checksum_help);
/**
* skb_gso_segment - Perform segmentation on skb.
* @skb: buffer to segment
* @features: features for the output path (see dev->features)
*
* This function segments the given skb and returns a list of segments.
*
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*/
struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
__be16 type = skb->protocol;
int err;
skb_reset_mac_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
__skb_pull(skb, skb->mac_len);
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
struct net_device *dev = skb->dev;
struct ethtool_drvinfo info = {};
if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
dev->ethtool_ops->get_drvinfo(dev, &info);
WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
"ip_summed=%d",
info.driver, dev ? dev->features : 0L,
skb->sk ? skb->sk->sk_route_caps : 0L,
skb->len, skb->data_len, skb->ip_summed);
if (skb_header_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
return ERR_PTR(err);
}
rcu_read_lock();
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
err = ptype->gso_send_check(skb);
segs = ERR_PTR(err);
if (err || skb_gso_ok(skb, features))
break;
__skb_push(skb, (skb->data -
skb_network_header(skb)));
}
segs = ptype->gso_segment(skb, features);
break;
}
}
rcu_read_unlock();
__skb_push(skb, skb->data - skb_mac_header(skb));
return segs;
}
EXPORT_SYMBOL(skb_gso_segment);
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
{
if (net_ratelimit()) {
printk(KERN_ERR "%s: hw csum failure.\n",
dev ? dev->name : "<unknown>");
dump_stack();
}
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif
/* Actually, we should eliminate this check as soon as we know, that:
* 1. IOMMU is present and allows to map all the memory.
* 2. No high memory really exists on this machine.
*/
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
if (PageHighMem(skb_shinfo(skb)->frags[i].page))
return 1;
}
if (PCI_DMA_BUS_IS_PHYS) {
struct device *pdev = dev->dev.parent;
if (!pdev)
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1;
}
}
#endif
return 0;
}
struct dev_gso_cb {
void (*destructor)(struct sk_buff *skb);
};
#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
static void dev_gso_skb_destructor(struct sk_buff *skb)
{
struct dev_gso_cb *cb;
do {
struct sk_buff *nskb = skb->next;
skb->next = nskb->next;
nskb->next = NULL;
kfree_skb(nskb);
} while (skb->next);
cb = DEV_GSO_CB(skb);
if (cb->destructor)
cb->destructor(skb);
}
/**
* dev_gso_segment - Perform emulated hardware segmentation on skb.
* @skb: buffer to segment
*
* This function segments the given skb and stores the list of segments
* in skb->next.
*/
static int dev_gso_segment(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct sk_buff *segs;
int features = dev->features & ~(illegal_highdma(dev, skb) ?
NETIF_F_SG : 0);
segs = skb_gso_segment(skb, features);
/* Verifying header integrity only. */
if (!segs)
return 0;
if (IS_ERR(segs))
return PTR_ERR(segs);
skb->next = segs;
DEV_GSO_CB(skb)->destructor = skb->destructor;
skb->destructor = dev_gso_skb_destructor;
return 0;
}
/*
* Try to orphan skb early, right before transmission by the device.
* We cannot orphan skb if tx timestamp is requested, since
* drivers need to call skb_tstamp_tx() to send the timestamp.
*/
static inline void skb_orphan_try(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
if (sk && !skb_tx(skb)->flags) {
/* skb_tx_hash() wont be able to get sk.
* We copy sk_hash into skb->rxhash
*/
if (!skb->rxhash)
skb->rxhash = sk->sk_hash;
skb_orphan(skb);
}
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
skb_orphan_try(skb);
if (netif_needs_gso(dev, skb)) {
if (unlikely(dev_gso_segment(skb)))
goto out_kfree_skb;
if (skb->next)
goto gso;
}
rc = ops->ndo_start_xmit(skb, dev);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
}
gso:
do {
struct sk_buff *nskb = skb->next;
skb->next = nskb->next;
nskb->next = NULL;
/*
* If device doesnt need nskb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
nskb->next = skb->next;
skb->next = nskb;
return rc;
}
txq_trans_update(txq);
if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
out_kfree_gso_skb:
if (likely(skb->next == NULL))
skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb:
kfree_skb(skb);
return rc;
}
static u32 hashrnd __read_mostly;
u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
{
u32 hash;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= dev->real_num_tx_queues))
hash -= dev->real_num_tx_queues;
return hash;
}
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
}
EXPORT_SYMBOL(skb_tx_hash);
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
pr_warning("%s selects TX queue %d, but "
"real number of TX queues is %d\n",
dev->name, queue_index, dev->real_num_tx_queues);
}
return 0;
}
return queue_index;
}
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
int queue_index;
struct sock *sk = skb->sk;
queue_index = sk_tx_queue_get(sk);
if (queue_index < 0) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb);
queue_index = dev_cap_txqueue(dev, queue_index);
} else {
queue_index = 0;
if (dev->real_num_tx_queues > 1)
queue_index = skb_tx_hash(dev, skb);
if (sk) {
struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
}
}
}
skb_set_queue_mapping(skb, queue_index);
return netdev_get_tx_queue(dev, queue_index);
}
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
int rc;
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
if (sch_direct_xmit(skb, q, dev, txq, root_lock))
__qdisc_run(q);
else
clear_bit(__QDISC_STATE_RUNNING, &q->state);
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
rc = qdisc_enqueue_root(skb, q);
qdisc_run(q);
}
spin_unlock(root_lock);
return rc;
}
/*
* Returns true if either:
* 1. skb has frag_list and the device doesn't support FRAGLIST, or
* 2. skb is fragmented and the device does not support SG, or if
* at least one of fragments is in highmem and device does not
* support DMA from it.
*/
static inline int skb_needs_linearize(struct sk_buff *skb,
struct net_device *dev)
{
return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
illegal_highdma(dev, skb)));
}
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
* this function. The function can be called from an interrupt.
*
* A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*
* -----------------------------------------------------------------------------------
* I notice this method can also return errors from the queue disciplines,
* including NET_XMIT_DROP, which is a positive value. So, errors can also
* be positive.
*
* Regardless of the return value, the skb is consumed, so it is currently
* difficult to retry a send to this method. (You can bump the ref count
* before sending to hold a reference for retry if you are careful.)
*
* When calling this method, interrupts MUST be enabled. This is because
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;
/* GSO will handle the following emulations directly. */
if (netif_needs_gso(dev, skb))
goto gso;
/* Convert a paged skb to linear, if required */
if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not support
* checksumming for this protocol, complete checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb, skb->csum_start -
skb_headroom(skb));
if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
goto out_kfree_skb;
}
gso:
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh();
txq = dev_pick_tx(dev, skb);
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
#endif
if (q->enqueue) {
rc = __dev_xmit_skb(skb, q, dev, txq);
goto out;
}
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_tx_queue_stopped(txq)) {
rc = dev_hard_start_xmit(skb, dev, txq);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
}
}
HARD_TX_UNLOCK(dev, txq);
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %s asks to "
"queue packet!\n", dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately */
if (net_ratelimit())
printk(KERN_CRIT "Dead loop on virtual device "
"%s, fix it urgently!\n", dev->name);
}
}
rc = -ENETDOWN;
rcu_read_unlock_bh();
out_kfree_skb:
kfree_skb(skb);
return rc;
out:
rcu_read_unlock_bh();
return rc;
}
EXPORT_SYMBOL(dev_queue_xmit);
/*=======================================================================
Receiver routines
=======================================================================*/
int netdev_max_backlog __read_mostly = 1000;
int netdev_tstamp_prequeue __read_mostly = 1;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
struct napi_struct *napi)
{
list_add_tail(&napi->poll_list, &sd->poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
* rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
struct ipv6hdr *ip6;
struct iphdr *ip;
struct netdev_rx_queue *rxqueue;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
u8 ip_proto;
u16 tcpu;
u32 addr1, addr2, ihl;
union {
u32 v32;
u16 v16[2];
} ports;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
"on queue %u, but number of RX queues is %u\n",
dev->name, index, dev->num_rx_queues);
goto done;
}
rxqueue = dev->_rx + index;
} else
rxqueue = dev->_rx;
if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
goto done;
if (skb->rxhash)
goto got_hash; /* Skip hash computation on packet header */
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
if (!pskb_may_pull(skb, sizeof(*ip)))
goto done;
ip = (struct iphdr *) skb->data;
ip_proto = ip->protocol;
addr1 = (__force u32) ip->saddr;
addr2 = (__force u32) ip->daddr;
ihl = ip->ihl;
break;
case __constant_htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, sizeof(*ip6)))
goto done;
ip6 = (struct ipv6hdr *) skb->data;
ip_proto = ip6->nexthdr;
addr1 = (__force u32) ip6->saddr.s6_addr32[3];
addr2 = (__force u32) ip6->daddr.s6_addr32[3];
ihl = (40 >> 2);
break;
default:
goto done;
}
switch (ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_DCCP:
case IPPROTO_ESP:
case IPPROTO_AH:
case IPPROTO_SCTP:
case IPPROTO_UDPLITE:
if (pskb_may_pull(skb, (ihl * 4) + 4)) {
ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
if (ports.v16[1] < ports.v16[0])
swap(ports.v16[0], ports.v16[1]);
break;
}
default:
ports.v32 = 0;
break;
}
/* get a consistent hash (same value on both flow directions) */
if (addr2 < addr1)
swap(addr1, addr2);
skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
if (!skb->rxhash)
skb->rxhash = 1;
got_hash:
flow_table = rcu_dereference(rxqueue->rps_flow_table);
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
u16 next_cpu;
struct rps_dev_flow *rflow;
rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
tcpu = rflow->cpu;
next_cpu = sock_flow_table->ents[skb->rxhash &
sock_flow_table->mask];
/*
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
* - Current CPU is unset (equal to RPS_NO_CPU).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
* This guarantees that all previous packets for the flow
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) {
tcpu = rflow->cpu = next_cpu;
if (tcpu != RPS_NO_CPU)
rflow->last_qtail = per_cpu(softnet_data,
tcpu).input_queue_head;
}
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
}
}
map = rcu_dereference(rxqueue->rps_map);
if (map) {
tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
if (cpu_online(tcpu)) {
cpu = tcpu;
goto done;
}
}
done:
return cpu;
}
/* Called from hardirq (IPI) context */
static void rps_trigger_softirq(void *data)
{
struct softnet_data *sd = data;
____napi_schedule(sd, &sd->backlog);
sd->received_rps++;
}
#endif /* CONFIG_RPS */
/*
* Check if this softnet_data structure is another cpu one
* If yes, queue it to our IPI list and return 1
* If no, return 0
*/
static int rps_ipi_queued(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
struct softnet_data *mysd = &__get_cpu_var(softnet_data);
if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
return 1;
}
#endif /* CONFIG_RPS */
return 0;
}
/*
* enqueue_to_backlog is called to queue an skb to a per CPU backlog
* queue (may be a remote CPU queue).
*/
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
unsigned int *qtail)
{
struct softnet_data *sd;
unsigned long flags;
sd = &per_cpu(softnet_data, cpu);
local_irq_save(flags);
rps_lock(sd);
if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
if (skb_queue_len(&sd->input_pkt_queue)) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
rps_unlock(sd);
local_irq_restore(flags);
return NET_RX_SUCCESS;
}
/* Schedule NAPI for backlog device
* We can use non atomic operation since we own the queue lock
*/
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
if (!rps_ipi_queued(sd))
____napi_schedule(sd, &sd->backlog);
}
goto enqueue;
}
sd->dropped++;
rps_unlock(sd);
local_irq_restore(flags);
kfree_skb(skb);
return NET_RX_DROP;
}
/**
* netif_rx - post buffer to the network code
* @skb: buffer to post
*
* This function receives a packet from a device driver and queues it for
* the upper (protocol) levels to process. It always succeeds. The buffer
* may be dropped during processing for congestion control or by the
* protocol layers.
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped)
*
*/
int netif_rx(struct sk_buff *skb)
{
int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
return NET_RX_DROP;
if (netdev_tstamp_prequeue)
net_timestamp_check(skb);
#ifdef CONFIG_RPS
{
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu < 0)
cpu = smp_processor_id();
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
}
#else
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
put_cpu();
}
#endif
return ret;
}
EXPORT_SYMBOL(netif_rx);
int netif_rx_ni(struct sk_buff *skb)
{
int err;
preempt_disable();
err = netif_rx(skb);
if (local_softirq_pending())
do_softirq();
preempt_enable();
return err;
}
EXPORT_SYMBOL(netif_rx_ni);
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
if (sd->completion_queue) {
struct sk_buff *clist;
local_irq_disable();
clist = sd->completion_queue;
sd->completion_queue = NULL;
local_irq_enable();
while (clist) {
struct sk_buff *skb = clist;
clist = clist->next;
WARN_ON(atomic_read(&skb->users));
__kfree_skb(skb);
}
}
if (sd->output_queue) {
struct Qdisc *head;
local_irq_disable();
head = sd->output_queue;
sd->output_queue = NULL;
sd->output_queue_tailp = &sd->output_queue;
local_irq_enable();
while (head) {
struct Qdisc *q = head;
spinlock_t *root_lock;
head = head->next_sched;
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
if (!test_bit(__QDISC_STATE_DEACTIVATED,
&q->state)) {
__netif_reschedule(q);
} else {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
}
}
}
}
}
static inline int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{
atomic_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
/*
* If bridge module is loaded call bridging hook.
* returns NULL if packet was consumed.
*/
struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
struct sk_buff *skb) __read_mostly;
EXPORT_SYMBOL_GPL(br_handle_frame_hook);
static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
struct packet_type **pt_prev, int *ret,
struct net_device *orig_dev)
{
struct net_bridge_port *port;
if (skb->pkt_type == PACKET_LOOPBACK ||
(port = rcu_dereference(skb->dev->br_port)) == NULL)
return skb;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
return br_handle_frame_hook(port, skb);
}
#else
#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
#endif
#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
struct sk_buff *skb) __read_mostly;
EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret,
struct net_device *orig_dev)
{
struct macvlan_port *port;
port = rcu_dereference(skb->dev->macvlan_port);
if (!port)
return skb;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
return macvlan_handle_frame_hook(port, skb);
}
#else
#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
#endif
#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
* a compare and 2 stores extra right now if we dont have it on
* but have CONFIG_NET_CLS_ACT
* NOTE: This doesnt stop any functionality; if you dont have
* the ingress scheduler, you just cant add policies on ingress.
*
*/
static int ing_filter(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
u32 ttl = G_TC_RTTL(skb->tc_verd);
struct netdev_queue *rxq;
int result = TC_ACT_OK;
struct Qdisc *q;
if (MAX_RED_LOOP < ttl++) {
printk(KERN_WARNING
"Redir loop detected Dropping packet (%d->%d)\n",
skb->skb_iif, dev->ifindex);
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
rxq = &dev->rx_queue;
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}
return result;
}
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
if (skb->dev->rx_queue.qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
} else {
/* Huh? Why does turning on AF_PACKET affect this? */
skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
}
switch (ing_filter(skb)) {
case TC_ACT_SHOT:
case TC_ACT_STOLEN:
kfree_skb(skb);
return NULL;
}
out:
skb->tc_verd = 0;
return skb;
}
#endif
/*
* netif_nit_deliver - deliver received packets to network taps
* @skb: buffer
*
* This function is used to deliver incoming packets to network
* taps. It should be used when the normal netif_receive_skb path
* is bypassed, for example because of VLAN acceleration.
*/
void netif_nit_deliver(struct sk_buff *skb)
{
struct packet_type *ptype;
if (list_empty(&ptype_all))
return;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (!ptype->dev || ptype->dev == skb->dev)
deliver_skb(skb, ptype, skb->dev);
}
rcu_read_unlock();
}
static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
struct net_device *master)
{
if (skb->pkt_type == PACKET_HOST) {
u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
memcpy(dest, master->dev_addr, ETH_ALEN);
}
}
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
* ARP on active-backup slaves with arp_validate enabled.
*/
int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
{
struct net_device *dev = skb->dev;
if (master->priv_flags & IFF_MASTER_ARPMON)
dev->last_rx = jiffies;
if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
/* Do address unmangle. The local destination address
* will be always the one master has. Provides the right
* functionality in a bridge.
*/
skb_bond_set_mac_by_master(skb, master);
}
if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
skb->protocol == __cpu_to_be16(ETH_P_ARP))
return 0;
if (master->priv_flags & IFF_MASTER_ALB) {
if (skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return 0;
}
if (master->priv_flags & IFF_MASTER_8023AD &&
skb->protocol == __cpu_to_be16(ETH_P_SLOW))
return 0;
return 1;
}
return 0;
}
EXPORT_SYMBOL(__skb_bond_should_drop);
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
struct net_device *master;
struct net_device *null_or_orig;
struct net_device *orig_or_bond;
int ret = NET_RX_DROP;
__be16 type;
if (!netdev_tstamp_prequeue)
net_timestamp_check(skb);
if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
return NET_RX_SUCCESS;
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
return NET_RX_DROP;
if (!skb->skb_iif)
skb->skb_iif = skb->dev->ifindex;
/*
* bonding note: skbs received on inactive slaves should only
* be delivered to pkt handlers that are exact matches. Also
* the deliver_no_wcard flag will be set. If packet handlers
* are sensitive to duplicate packets these skbs will need to
* be dropped at the handler. The vlan accel path may have
* already set the deliver_no_wcard flag.
*/
null_or_orig = NULL;
orig_dev = skb->dev;
master = ACCESS_ONCE(orig_dev->master);
if (skb->deliver_no_wcard)
null_or_orig = orig_dev;
else if (master) {
if (skb_bond_should_drop(skb, master)) {
skb->deliver_no_wcard = 1;
null_or_orig = orig_dev; /* deliver only exact match */
} else
skb->dev = master;
}
__get_cpu_var(softnet_data).processed++;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
pt_prev = NULL;
rcu_read_lock();
#ifdef CONFIG_NET_CLS_ACT
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
goto ncls;
}
#endif
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
ptype->dev == orig_dev) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
}
#ifdef CONFIG_NET_CLS_ACT
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
ncls:
#endif
skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
/*
* Make sure frames received on VLAN interfaces stacked on
* bonding interfaces still make their way to any base bonding
* device that may have registered for a specific ptype. The
* handler may have to adjust skb->dev and orig_dev.
*/
orig_or_bond = orig_dev;
if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
(vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
orig_or_bond = vlan_dev_real_dev(skb->dev);
}
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type && (ptype->dev == null_or_orig ||
ptype->dev == skb->dev || ptype->dev == orig_dev ||
ptype->dev == orig_or_bond)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
}
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
*/
ret = NET_RX_DROP;
}
out:
rcu_read_unlock();
return ret;
}
/**
* netif_receive_skb - process receive buffer from network
* @skb: buffer to process
*
* netif_receive_skb() is the main receive data processing function.
* It always succeeds. The buffer may be dropped during processing
* for congestion control or by the protocol layers.
*
* This function may only be called from softirq context and interrupts
* should be enabled.
*
* Return values (usually ignored):
* NET_RX_SUCCESS: no congestion
* NET_RX_DROP: packet was dropped
*/
int netif_receive_skb(struct sk_buff *skb)
{