Skip to content
Snippets Groups Projects
sock.h 38.7 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static inline void
    sk_dst_set(struct sock *sk, struct dst_entry *dst)
    {
    	write_lock(&sk->sk_dst_lock);
    	__sk_dst_set(sk, dst);
    	write_unlock(&sk->sk_dst_lock);
    }
    
    static inline void
    __sk_dst_reset(struct sock *sk)
    {
    	struct dst_entry *old_dst;
    
    	old_dst = sk->sk_dst_cache;
    	sk->sk_dst_cache = NULL;
    	dst_release(old_dst);
    }
    
    static inline void
    sk_dst_reset(struct sock *sk)
    {
    	write_lock(&sk->sk_dst_lock);
    	__sk_dst_reset(sk);
    	write_unlock(&sk->sk_dst_lock);
    }
    
    
    extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
    {
    	__sk_dst_set(sk, dst);
    	sk->sk_route_caps = dst->dev->features;
    	if (sk->sk_route_caps & NETIF_F_TSO) {
    		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
    			sk->sk_route_caps &= ~NETIF_F_TSO;
    	}
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
    {
    	sk->sk_wmem_queued   += skb->truesize;
    	sk->sk_forward_alloc -= skb->truesize;
    }
    
    static inline int skb_copy_to_page(struct sock *sk, char __user *from,
    				   struct sk_buff *skb, struct page *page,
    				   int off, int copy)
    {
    	if (skb->ip_summed == CHECKSUM_NONE) {
    		int err = 0;
    		unsigned int csum = csum_and_copy_from_user(from,
    						     page_address(page) + off,
    							    copy, 0, &err);
    		if (err)
    			return err;
    		skb->csum = csum_block_add(skb->csum, csum, skb->len);
    	} else if (copy_from_user(page_address(page) + off, from, copy))
    		return -EFAULT;
    
    	skb->len	     += copy;
    	skb->data_len	     += copy;
    	skb->truesize	     += copy;
    	sk->sk_wmem_queued   += copy;
    	sk->sk_forward_alloc -= copy;
    	return 0;
    }
    
    /*
     * 	Queue a received datagram if it will fit. Stream and sequenced
     *	protocols can't normally use this as they need to fit buffers in
     *	and play with them.
     *
     * 	Inlined as it's very short and called for pretty much every
     *	packet ever received.
     */
    
    static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
    {
    	sock_hold(sk);
    	skb->sk = sk;
    	skb->destructor = sock_wfree;
    	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
    }
    
    static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
    {
    	skb->sk = sk;
    	skb->destructor = sock_rfree;
    	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
    }
    
    extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
    			   unsigned long expires);
    
    extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
    
    
    extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
    {
    	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
    	   number of warnings when compiling with -W --ANK
    	 */
    	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
    	    (unsigned)sk->sk_rcvbuf)
    		return -ENOMEM;
    	skb_set_owner_r(skb, sk);
    	skb_queue_tail(&sk->sk_error_queue, skb);
    	if (!sock_flag(sk, SOCK_DEAD))
    		sk->sk_data_ready(sk, skb->len);
    	return 0;
    }
    
    /*
     *	Recover an error report and clear atomically
     */
     
    static inline int sock_error(struct sock *sk)
    {
    
    	int err;
    	if (likely(!sk->sk_err))
    		return 0;
    	err = xchg(&sk->sk_err, 0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return -err;
    }
    
    static inline unsigned long sock_wspace(struct sock *sk)
    {
    	int amt = 0;
    
    	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
    		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
    		if (amt < 0) 
    			amt = 0;
    	}
    	return amt;
    }
    
    static inline void sk_wake_async(struct sock *sk, int how, int band)
    {
    	if (sk->sk_socket && sk->sk_socket->fasync_list)
    		sock_wake_async(sk->sk_socket, how, band);
    }
    
    #define SOCK_MIN_SNDBUF 2048
    #define SOCK_MIN_RCVBUF 256
    
    static inline void sk_stream_moderate_sndbuf(struct sock *sk)
    {
    	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
    		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
    		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
    	}
    }
    
    static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
    
    Victor Fusco's avatar
    Victor Fusco committed
    						   int size, int mem,
    
    						   gfp_t gfp)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
    
    	skb = alloc_skb_fclone(size + hdr_len, gfp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (skb) {
    		skb->truesize += mem;
    
    		if (sk_stream_wmem_schedule(sk, skb->truesize)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			return skb;
    		}
    		__kfree_skb(skb);
    	} else {
    		sk->sk_prot->enter_memory_pressure();
    		sk_stream_moderate_sndbuf(sk);
    	}
    	return NULL;
    }
    
    static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
    
    Victor Fusco's avatar
    Victor Fusco committed
    						  int size,
    
    						  gfp_t gfp)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	return sk_stream_alloc_pskb(sk, size, 0, gfp);
    }
    
    static inline struct page *sk_stream_alloc_page(struct sock *sk)
    {
    	struct page *page = NULL;
    
    
    	page = alloc_pages(sk->sk_allocation, 0);
    	if (!page) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		sk->sk_prot->enter_memory_pressure();
    		sk_stream_moderate_sndbuf(sk);
    	}
    	return page;
    }
    
    #define sk_stream_for_retrans_queue(skb, sk)				\
    		for (skb = (sk)->sk_write_queue.next;			\
    		     (skb != (sk)->sk_send_head) &&			\
    		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
    		     skb = skb->next)
    
    
    /*from STCP for fast SACK Process*/
    #define sk_stream_for_retrans_queue_from(skb, sk)			\
    		for (; (skb != (sk)->sk_send_head) &&                   \
    		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
    		     skb = skb->next)
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     *	Default write policy as shown to user space via poll/select/SIGIO
     */
    static inline int sock_writeable(const struct sock *sk) 
    {
    	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
    }
    
    
    static inline gfp_t gfp_any(void)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
    }
    
    static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
    {
    	return noblock ? 0 : sk->sk_rcvtimeo;
    }
    
    static inline long sock_sndtimeo(const struct sock *sk, int noblock)
    {
    	return noblock ? 0 : sk->sk_sndtimeo;
    }
    
    static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
    {
    	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
    }
    
    /* Alas, with timeout socket operations are not restartable.
     * Compare this to poll().
     */
    static inline int sock_intr_errno(long timeo)
    {
    	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
    }
    
    static __inline__ void
    sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
    {
    
    	struct timeval stamp;
    
    	skb_get_timestamp(skb, &stamp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (sock_flag(sk, SOCK_RCVTSTAMP)) {
    		/* Race occurred between timestamp enabling and packet
    		   receiving.  Fill in the current time for now. */
    
    		if (stamp.tv_sec == 0)
    			do_gettimeofday(&stamp);
    		skb_set_timestamp(skb, &stamp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /**
     * sk_eat_skb - Release a skb if it is no longer needed
    
     * @sk: socket to eat this skb from
     * @skb: socket buffer to eat
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
     * This routine must be called with interrupts disabled or with the socket
     * locked so that the sk_buff queue operation is ok.
    */
    
    #ifdef CONFIG_NET_DMA
    static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
    {
    	__skb_unlink(skb, &sk->sk_receive_queue);
    	if (!copied_early)
    		__kfree_skb(skb);
    	else
    		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
    }
    #else
    static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	__skb_unlink(skb, &sk->sk_receive_queue);
    	__kfree_skb(skb);
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    extern void sock_enable_timestamp(struct sock *sk);
    extern int sock_get_timestamp(struct sock *, struct timeval __user *);
    
    /* 
     *	Enable debug/info messages 
     */
    
    
    #define NETDEBUG(fmt, args...)	printk(fmt,##args)
    #define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
    
    #else
    #define NETDEBUG(fmt, args...)	do { } while (0)
    #define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #endif
    
    /*
     * Macros for sleeping on a socket. Use them like this:
     *
     * SOCK_SLEEP_PRE(sk)
     * if (condition)
     * 	schedule();
     * SOCK_SLEEP_POST(sk)
     *
     * N.B. These are now obsolete and were, afaik, only ever used in DECnet
     * and when the last use of them in DECnet has gone, I'm intending to
     * remove them.
     */
    
    #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
    				DECLARE_WAITQUEUE(wait, tsk); \
    				tsk->state = TASK_INTERRUPTIBLE; \
    				add_wait_queue((sk)->sk_sleep, &wait); \
    				release_sock(sk);
    
    #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
    				remove_wait_queue((sk)->sk_sleep, &wait); \
    				lock_sock(sk); \
    				}
    
    static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
    {
    	if (valbool)
    		sock_set_flag(sk, bit);
    	else
    		sock_reset_flag(sk, bit);
    }
    
    extern __u32 sysctl_wmem_max;
    extern __u32 sysctl_rmem_max;
    
    #ifdef CONFIG_NET
    int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
    #else
    static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
    {
    	return -ENODEV;
    }
    #endif
    
    
    extern void sk_init(void);
    
    #ifdef CONFIG_SYSCTL
    extern struct ctl_table core_table[];
    #endif
    
    
    extern __u32 sysctl_wmem_default;
    extern __u32 sysctl_rmem_default;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #endif	/* _SOCK_H */