Skip to content
Snippets Groups Projects
sock.h 63.7 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    
    static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
    {
    
    	skb_orphan(skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	skb->sk = sk;
    	skb->destructor = sock_wfree;
    
    	/*
    	 * We used to take a refcount on sk, but following operation
    	 * is enough to guarantee sk_free() wont free this sock until
    	 * all in-flight packets are completed
    	 */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
    }
    
    static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
    {
    
    	skb_orphan(skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	skb->sk = sk;
    	skb->destructor = sock_rfree;
    	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
    
    	sk_mem_charge(sk, skb->truesize);
    
    void sk_reset_timer(struct sock *sk, struct timer_list *timer,
    		    unsigned long expires);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    void sk_stop_timer(struct sock *sk, struct timer_list *timer);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /*
     *	Recover an error report and clear atomically
     */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static inline int sock_error(struct sock *sk)
    {
    
    	int err;
    	if (likely(!sk->sk_err))
    		return 0;
    	err = xchg(&sk->sk_err, 0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return -err;
    }
    
    static inline unsigned long sock_wspace(struct sock *sk)
    {
    	int amt = 0;
    
    	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
    		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
    
    		if (amt < 0)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			amt = 0;
    	}
    	return amt;
    }
    
    static inline void sk_wake_async(struct sock *sk, int how, int band)
    {
    
    	if (sock_flag(sk, SOCK_FASYNC))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		sock_wake_async(sk->sk_socket, how, band);
    }
    
    
    /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
     * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
     * Note: for send buffers, TCP works better if we can build two skbs at
     * minimum.
    
    #define TCP_SKB_MIN_TRUESIZE	(2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
    
    
    #define SOCK_MIN_SNDBUF		(TCP_SKB_MIN_TRUESIZE * 2)
    #define SOCK_MIN_RCVBUF		 TCP_SKB_MIN_TRUESIZE
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    static inline void sk_stream_moderate_sndbuf(struct sock *sk)
    {
    	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
    
    		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
    
    		sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
    
    struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /**
     * sk_page_frag - return an appropriate page_frag
     * @sk: socket
     *
     * If socket allocation mode allows current thread to sleep, it means its
     * safe to use the per task page_frag instead of the per socket one.
     */
    static inline struct page_frag *sk_page_frag(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	if (sk->sk_allocation & __GFP_WAIT)
    		return &current->task_frag;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	return &sk->sk_frag;
    
    bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     *	Default write policy as shown to user space via poll/select/SIGIO
     */
    
    static inline bool sock_writeable(const struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
    
    static inline gfp_t gfp_any(void)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
    
    static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	return noblock ? 0 : sk->sk_rcvtimeo;
    }
    
    
    static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	return noblock ? 0 : sk->sk_sndtimeo;
    }
    
    static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
    {
    	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
    }
    
    /* Alas, with timeout socket operations are not restartable.
     * Compare this to poll().
     */
    static inline int sock_intr_errno(long timeo)
    {
    	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
    }
    
    
    void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
    			   struct sk_buff *skb);
    void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
    			     struct sk_buff *skb);
    
    static inline void
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
    {
    
    	ktime_t kt = skb->tstamp;
    
    	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
    
    	/*
    	 * generate control messages if
    	 * - receive time stamping in software requested (SOCK_RCVTSTAMP
    	 *   or SOCK_TIMESTAMPING_RX_SOFTWARE)
    	 * - software time stamp available and wanted
    	 *   (SOCK_TIMESTAMPING_SOFTWARE)
    	 * - hardware time stamps available and wanted
    	 *   (SOCK_TIMESTAMPING_SYS_HARDWARE or
    	 *   SOCK_TIMESTAMPING_RAW_HARDWARE)
    	 */
    	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
    	    sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
    	    (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
    	    (hwtstamps->hwtstamp.tv64 &&
    	     sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
    	    (hwtstamps->syststamp.tv64 &&
    	     sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
    
    		__sock_recv_timestamp(msg, sk, skb);
    	else
    
    		sk->sk_stamp = kt;
    
    
    	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
    		__sock_recv_wifi_status(msg, sk, skb);
    
    void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
    			      struct sk_buff *skb);
    
    
    static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
    					  struct sk_buff *skb)
    {
    #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)			| \
    			   (1UL << SOCK_RCVTSTAMP)			| \
    			   (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)	| \
    			   (1UL << SOCK_TIMESTAMPING_SOFTWARE)		| \
    
    			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE)	| \
    
    			   (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
    
    	if (sk->sk_flags & FLAGS_TS_OR_DROPS)
    		__sock_recv_ts_and_drops(msg, sk, skb);
    	else
    		sk->sk_stamp = skb->tstamp;
    }
    
    /**
     * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
     * @sk:		socket sending this packet
    
     * @tx_flags:	filled with instructions for time stamping
    
     * Currently only depends on SOCK_TIMESTAMPING* flags.
    
    void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /**
     * sk_eat_skb - Release a skb if it is no longer needed
    
     * @sk: socket to eat this skb from
     * @skb: socket buffer to eat
    
     * @copied_early: flag indicating whether DMA operations copied this data early
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     * This routine must be called with interrupts disabled or with the socket
     * locked so that the sk_buff queue operation is ok.
    */
    
    #ifdef CONFIG_NET_DMA
    
    static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
    
    {
    	__skb_unlink(skb, &sk->sk_receive_queue);
    	if (!copied_early)
    		__kfree_skb(skb);
    	else
    		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
    }
    #else
    
    static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	__skb_unlink(skb, &sk->sk_receive_queue);
    	__kfree_skb(skb);
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    static inline
    struct net *sock_net(const struct sock *sk)
    {
    
    	return read_pnet(&sk->sk_net);
    
    void sock_net_set(struct sock *sk, struct net *net)
    
    	write_pnet(&sk->sk_net, net);
    
    /*
     * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
    
    Lucas De Marchi's avatar
    Lucas De Marchi committed
     * They should not hold a reference to a namespace in order to allow
    
     * to stop it.
     * Sockets after sk_change_net should be released using sk_release_kernel
     */
    static inline void sk_change_net(struct sock *sk, struct net *net)
    {
    
    	sock_net_set(sk, hold_net(net));
    
    static inline struct sock *skb_steal_sock(struct sk_buff *skb)
    {
    
    		struct sock *sk = skb->sk;
    
    		skb->destructor = NULL;
    		skb->sk = NULL;
    		return sk;
    	}
    	return NULL;
    }
    
    
    void sock_enable_timestamp(struct sock *sk, int flag);
    int sock_get_timestamp(struct sock *, struct timeval __user *);
    int sock_get_timestampns(struct sock *, struct timespec __user *);
    int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
    		       int type);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /*
     *	Enable debug/info messages
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    extern int net_msg_warn;
    #define NETDEBUG(fmt, args...) \
    	do { if (net_msg_warn) printk(fmt,##args); } while (0)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    #define LIMIT_NETDEBUG(fmt, args...) \
    	do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    extern __u32 sysctl_wmem_max;
    extern __u32 sysctl_rmem_max;
    
    
    extern __u32 sysctl_wmem_default;
    extern __u32 sysctl_rmem_default;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #endif	/* _SOCK_H */