欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

epoll源码剖析

程序员文章站 2022-06-14 11:18:42
...

epoll源码剖析

本篇博客部分参考 董昊的poll和epoll内核源码剖析,作者博客链接http://donghao.org/uii/

剖析的是内核版本为2.6.9

上一篇博客通过分析poll的源码,我们发现poll的运行效率有两个瓶颈:1. 当用户传入的fd很多时,由于poll系统调用每次都要把所有struct pollfd拷进内核,所以参数传递和页分配此时就成了poll系统调用的性能瓶颈。2. 当用户传入的fd很多时(比如1000个),对do_pollfd就会调用很多次。所以epoll对这两个问题进行了改进

  1. epoll自己保存拷入的fd,通过epoll_ctl把所有fd传入内核再一起"wait",这就省掉了不必要的重复拷贝
  2. 在 epoll_wait时,不是把current轮流的加入fd对应的设备等待队列,而是在设备等待队列醒来时调用一个回调函数,把产生事件的fd归入一个链表,然后返回这个链表上的fd

下面对epoll的源码进行简单的梳理

重要的类型定义

// 结构存储在创建的内核事件表 epfd 的 file 文件结构的“private_data”成员中
struct eventpoll 
{
	rwlock_t 			lock;		// 保护this结构访问
	struct rw_semaphore sem;		// 信号量
	wait_queue_head_t 	wq;			// sys_epoll_wait()使用的等待队列
	wait_queue_head_t 	poll_wait;	// file->poll()使用的等待队列
	struct list_head 	rdllist;	// 有事件响应的文件描述符列表
	struct rb_root 		rbr;		// 红黑树根节点,用以存储监控的fd
};

// 添加到eventpoll的文件描述符的结构,是epoll的基本单元
struct epitem 
{
	struct rb_node 		rbn;		// 用于将该结构链接到eventpoll的红黑树
	struct list_head 	rdllink;	// 用于将此结构链接到eventpoll就绪列表的列表头
	struct epoll_filefd ffd;		// 引用的文件描述符信息,包含文件指针和文件描述符
	int 				nwait;		// 附加到轮询操作的活动等待队列的数量
	struct list_head	pwqlist;	// 包含等待队列的列表
	struct eventpoll 	*ep;		// 这个项目的“容器”,指向该结构所在的struct eventpoll
	struct epoll_event 	event;		// 描述感兴趣事件的结构和fd
	atomic_t 			usecnt;
	struct list_head 	fllink;		// 用于将此结构链接到 struct file 结构的列表头
	struct list_head 	txlink;		// 用于将此结构链接到传输列表的列表头
	unsigned int 		revents;	// 传递事件到用户空间期间使用
};

// 开辟一个ep_pqueue的队列节点,其中的函数指针指向ep_ptable_queue_proc
struct ep_pqueue 
{
	poll_table 		pt;		// 回调函数
	struct epitem 	*epi;	// struct epitem指针
};

struct eppoll_entry 
{
	struct list_head 	llink;	// 用于将该结构链接到 struct epitem 的列表标头
	void 				*base;	// 指向其对应的epitem
	wait_queue_t 		wait;	// 即将链接到目标文件等待队列头的等待队列项。
	wait_queue_head_t 	*whead;	// 连接等待队列项的等待队列头
};

重要的函数

// 初始化epoll模块
static int __init eventpoll_init(void);

// epoll_create的内核实现
asmlinkage long sys_epoll_create(int size);

// epoll_ctl的内核实现
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event);

// epoll_wait的内核实现
asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, int maxevents, int timeout);

// =============================================================================================

// 将内核事件(epitem)插入到struct eventpoll中,其他功能详见下文
static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd);

// 回调函数,将就绪事件写入struct eventpoll的rdllist
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key);

// 将内核就绪事件写入用户态
static int ep_events_transfer(struct eventpoll *ep, struct epoll_event __user *events, int maxevents);

epoll源码剖析

因为epoll是个module,所以先看看module的入口eventpoll_init:

static struct file_system_type eventpoll_fs_type = {
	.name		= "eventpollfs",
	.get_sb		= eventpollfs_get_sb,
	.kill_sb	= kill_anon_super,
};

// eventpoll_init-----------------------------------------------------------------------
static int __init eventpoll_init(void)
{
	int error;

	init_MUTEX(&epsem);

	/* Initialize the structure used to perform safe poll wait head wake ups */
	ep_poll_safewake_init(&psw);

    // 创建用于存放struct epitem结点的内核
	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
			0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
			NULL, NULL);

    // 创建用于存放struct eppoll_entry结点的内核
	pwq_cache = kmem_cache_create("eventpoll_pwq",
			sizeof(struct eppoll_entry), 0,
			EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);

    // 注册一个文件系统用于存储eventpoll文件
	error = register_filesystem(&eventpoll_fs_type);
	if (error)
		goto epanic;

    // 可以忽略下面的代码,读者有兴趣可自行研读-----------------------------------------------
	/* Mount the above commented virtual file system */
	eventpoll_mnt = kern_mount(&eventpoll_fs_type);
	error = PTR_ERR(eventpoll_mnt);
	if (IS_ERR(eventpoll_mnt))
		goto epanic;

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: successfully initialized.\n",
			current));
	return 0;

epanic:
	panic("eventpoll_init() failed\n");
}

这个module在初始化时注册了一个新的文件系统,叫"eventpollfs"(在eventpoll_fs_type结构里),然后挂载此文件系统。另外创建两个内核cache(在内核编程中,如果需要频繁分配小块内存,应该创建kmem_cahe来做“内存池”),分别用于存放struct epitem和eppoll_entry。

epoll_create之所以会返回一个新的fd,就是因为它是在这个叫做"eventpollfs"的文件系统里创建了一个新文件,epoll_create的内核实现如下:

// sys_epoll_create----------------------------------------------------------------
asmlinkage long sys_epoll_create(int size)
{
	int error, fd;
	struct inode *inode;
	struct file *file;

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
		     current, size));

	/* Sanity check on the size parameter */
	error = -EINVAL;
	if (size <= 0)
		goto eexit_1;

	// 创建设置eventpoll文件所需的所有项目。即文件结构、inode和空闲文件描述符。
	error = ep_getfd(&fd, &inode, &file);
	if (error)
		goto eexit_1;

	// 创建一个struct eventpoll结构,对其初始化并将 file->private_data指向struct eventpoll结构
	error = ep_file_init(file);
	if (error)
		goto eexit_2;

	// 可以忽略下面的代码,读者有兴趣可自行研读-----------------------------------------------
	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
		     current, size, fd));

	return fd;

eexit_2:
	sys_close(fd);
eexit_1:
	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
		     current, size, error));
	return error;
}

sys_epoll_create里面有两个主要的函数:ep_getfd在第一次调用epoll_create时,创建新inode、 新的file、新的fd;ep_file_init则要创建一个struct eventpoll结构,并把它放入file->private_data。

在这里有一个问题,为什么epoll的开发者不做一个内核的超级大map把用户要创建的epoll句柄存起来,在epoll_create时返回一个指针?

因为Linux的系统调用几乎没有多少是返回指针的(特此强调,malloc不是系统调用,malloc调用的brk才是)因为Linux一切皆文件,输入输出是文件、socket也是文件,一切皆文件意味着使用这个操作系统的程序可以非常简单,因为一切都是文件操作而已!而且使用文件系统有个好处:epoll_create返回的是一个fd,而不是指针,指针如果指错了,没办法判断,而fd则可以通过current->files->fd_array[]找到其真伪。

看完sys_epoll_create后,我们再来看看epoll_ctl的内核实现:

// sys_epoll_ctl-----------------------------------------------------------------------------
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event)
{
	int error;
	struct file *file, *tfile;
	struct eventpoll *ep;
	struct epitem *epi;
	struct epoll_event epds;

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
		     current, epfd, op, fd, event));

	error = -EFAULT;
    // 将用户关注的事件从用户态拷贝到内核态
	if (EP_OP_HASH_EVENT(op) &&
	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
		goto eexit_1;

    // 获取epfd的struct file结构
	error = -EBADF;
	file = fget(epfd);
	if (!file)
		goto eexit_1;

    //  获取待操作文件描述符fd的struct file结构
	tfile = fget(fd);
	if (!tfile)
		goto eexit_2;

	/* The target file descriptor must support poll */
	error = -EPERM;
	if (!tfile->f_op || !tfile->f_op->poll)
		goto eexit_3;

	/*
	 * We have to check that the file structure underneath the file descriptor
	 * the user passed to us _is_ an eventpoll file. And also we do not permit
	 * adding an epoll file descriptor inside itself.
	 */
	error = -EINVAL;
	if (file == tfile || !IS_FILE_EPOLL(file))
		goto eexit_3;

    // 从epfd的file->private_data中拿到struct eventpoll
	ep = file->private_data;

	down_write(&ep->sem);

	// 判断待操作的结点是否在红黑树中
	epi = ep_find(ep, tfile, fd);

	error = -EINVAL;
	switch (op) {
	case EPOLL_CTL_ADD:	// 增加
		if (!epi) {
			epds.events |= POLLERR | POLLHUP;

			error = ep_insert(ep, &epds, tfile, fd);	// 插入操作,我们后面再谈具体实现
		} else
			error = -EEXIST;
		break;
	case EPOLL_CTL_DEL:	// 删除
		if (epi)
			error = ep_remove(ep, epi);
		else
			error = -ENOENT;
		break;
	case EPOLL_CTL_MOD:	// 修改
		if (epi) {
			epds.events |= POLLERR | POLLHUP;
			error = ep_modify(ep, epi, &epds);
		} else
			error = -ENOENT;
		break;
	}

    // 可以忽略下面的代码,读者有兴趣可自行研读-----------------------------------------------
	/*
	 * The function ep_find() increments the usage count of the structure
	 * so, if this is not NULL, we need to release it.
	 */
	if (epi)
		ep_release_epitem(epi);

	up_write(&ep->sem);

eexit_3:
	fput(tfile);
eexit_2:
	fput(file);
eexit_1:
	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
		     current, epfd, op, fd, event, error));

	return error;
}

sys_epoll_ctl先调用ep_find,如果找到了struct epitem而用户操作是ADD,那么返回-EEXIST;如果是DEL,则ep_remove。如果找不到struct epitem而用户操作是ADD,就ep_insert创建并插入一个。

通过窥探struct eventpoll结构体的rbr成员,我们可以发现epoll是将所有的fd以struct epitem的结构维护到一颗红黑树上,而struct eventpoll的rbr成员就是红黑树的根。简而言之,一个新创建的epoll文件带有一个struct eventpoll结构,这个结构上再挂一个红黑树,而这个红黑树就是每次epoll_ctl时fd存放的地方!

当我们清楚epoll的数据结构后,我们再来看最核心的epoll_wait的内核实现:

// sys_epoll_wait-------------------------------------------------------------------
asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, int maxevents, int timeout)
{
	int error;
	struct file *file;
	struct eventpoll *ep;

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
		     current, epfd, events, maxevents, timeout));

	// 判断maxevents参数是否合法
	if (maxevents <= 0)
		return -EINVAL;

	// 验证用户传递的区域是可写的
	if ((error = verify_area(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))))
		goto eexit_1;

	// 获取epfd的struct file结构
	error = -EBADF;
	file = fget(epfd);
	if (!file)
		goto eexit_1;

	/*
	 * We have to check that the file structure underneath the fd
	 * the user passed to us _is_ an eventpoll file.
	 */
	error = -EINVAL;
	if (!IS_FILE_EPOLL(file))
		goto eexit_2;

	// 从epfd的file->private_data中拿到struct eventpoll
	ep = file->private_data;

	/* Time to fish for events ... */
	error = ep_poll(ep, events, maxevents, timeout);

eexit_2:
	fput(file);
eexit_1:
	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
		     current, epfd, events, maxevents, timeout, error));

	return error;
}

通过查看源码,我们发现sys_epoll_wait只是对传入的参数进行了一系列的检查并且从epfd的file->private_data中拿到struct eventpoll,紧接着就调用了ep_poll函数。简单来说,sys_epoll_wait除了调用了ep_poll之外,在没有实质性的操作。

既然如此,我们就来看看ep_poll做了哪些事情:

// ep_poll-----------------------------------------------------------------------------------------------
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout)
{
	int res, eavail;
	unsigned long flags;
	long jtimeout;
	wait_queue_t wait;

	/*
	 * Calculate the timeout by checking for the "infinite" value ( -1 )
	 * and the overflow condition. The passed timeout is in milliseconds,
	 * that why (t * HZ) / 1000.
	 */
	jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ?
		MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000;

retry:
	write_lock_irqsave(&ep->lock, flags);

	res = 0;
	if (list_empty(&ep->rdllist)) {
        
		// 没有任何可用的事件来返回给调用者,在这里休眠,当事件变得可用时,将被ep_poll_callback()唤醒。
		init_waitqueue_entry(&wait, current);
		add_wait_queue(&ep->wq, &wait);

		for (;;) {
			/*
			 * We don't want to sleep if the ep_poll_callback() sends us
			 * a wakeup in between. That's why we set the task state
			 * to TASK_INTERRUPTIBLE before doing the checks.
			 */
			set_current_state(TASK_INTERRUPTIBLE);
            
            // 这一句是我们关注的重点,主要用来判断ep->rdllist是否为空
			if (!list_empty(&ep->rdllist) || !jtimeout)
				break;
			if (signal_pending(current)) {
				res = -EINTR;
				break;
			}

			write_unlock_irqrestore(&ep->lock, flags);
			jtimeout = schedule_timeout(jtimeout); // 让current挂起,别的进程跑,timeout到了以后再回来运行current
			write_lock_irqsave(&ep->lock, flags);
		}
		remove_wait_queue(&ep->wq, &wait);

		set_current_state(TASK_RUNNING);
	}

	/* Is it worth to try to dig for events ? */
	eavail = !list_empty(&ep->rdllist);

	write_unlock_irqrestore(&ep->lock, flags);

	/*
	 * Try to transfer events to user space. In case we get 0 events and
	 * there's still timeout left over, we go trying again in search of
	 * more luck.
	 */
    // 这里我们后面将进行详细的解释,这里暂且不表
	if (!res && eavail &&
	    !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout)
		goto retry;

	return res;
}

可以发现,ep_poll除了判断ep->rdllist是否为空和休眠之外,没有做任何的事情。那么这个ep->rdllist怎么样才能不为空呢?又是由谁来让他不为空的?答案是ep_insert时设下的回调函数

现在,我们将目光转到sys_epoll_ctl下的ep_insert方法:

// ep_insert-------------------------------------------------------------------------------------------
static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd)
{
	int error, revents, pwake = 0;
	unsigned long flags;
	struct epitem *epi;
	struct ep_pqueue epq;

	error = -ENOMEM;
    // 申请一个eoi空间
	if (!(epi = EPI_MEM_ALLOC()))
		goto eexit_1;

	// 初始化epi结点
	EP_RB_INITNODE(&epi->rbn);
	INIT_LIST_HEAD(&epi->rdllink);
	INIT_LIST_HEAD(&epi->fllink);
	INIT_LIST_HEAD(&epi->txlink);
	INIT_LIST_HEAD(&epi->pwqlist);
	epi->ep = ep;						// 将 struct eventpoll给epi的ep
	EP_SET_FFD(&epi->ffd, tfile, fd);	// 将文件描述符以及struct file放入epi
	epi->event = *event;				// 将用户关注的事件写入epi
	atomic_set(&epi->usecnt, 1);
	epi->nwait = 0;

	// 使用队列回调初始化epq的poll_table
	epq.epi = epi;
	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);

	// 调用fd对应的poll(),即调用ep_ptable_queue_proc,并获取当前事件位
	revents = tfile->f_op->poll(tfile, &epq.pt);

	/*
	 * We have to check if something went wrong during the poll wait queue
	 * install process. Namely an allocation for a wait queue failed due
	 * high memory pressure.
	 */
	if (epi->nwait < 0)
		goto eexit_2;

	// 把struct epitem放到struct file里的f_ep_links链表,方便查找
	spin_lock(&tfile->f_ep_lock);
	list_add_tail(&epi->fllink, &tfile->f_ep_links);
	spin_unlock(&tfile->f_ep_lock);

	/* We have to drop the new item inside our item list to keep track of it */
	write_lock_irqsave(&ep->lock, flags);

	// 将epi结点插入红黑树
	ep_rbtree_insert(ep, epi);

	// 如果关注的事件已经发生,将当前节点插入到ep->rdllist
	if ((revents & event->events) && !EP_IS_LINKED(&epi->rdllink)) {
		list_add_tail(&epi->rdllink, &ep->rdllist);

		/* Notify waiting tasks that events are available */
		if (waitqueue_active(&ep->wq))
			wake_up(&ep->wq);
		if (waitqueue_active(&ep->poll_wait))
			pwake++;
	}

	write_unlock_irqrestore(&ep->lock, flags);

    // 可以忽略下面的代码,读者有兴趣可自行研读-----------------------------------------------
	/* We have to call this outside the lock */
	if (pwake)
		ep_poll_safewake(&psw, &ep->poll_wait);

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n",
		     current, ep, tfile, fd));

	return 0;

eexit_2:
	ep_unregister_pollwait(ep, epi);

	/*
	 * We need to do this because an event could have been arrived on some
	 * allocated wait queue.
	 */
	write_lock_irqsave(&ep->lock, flags);
	if (EP_IS_LINKED(&epi->rdllink))
		EP_LIST_DEL(&epi->rdllink);
	write_unlock_irqrestore(&ep->lock, flags);

	EPI_MEM_FREE(epi);
eexit_1:
	return error;
}

在插入的实现中,我们可以看到一段很熟悉的代码:

	// 使用队列回调初始化epq的poll_table
	epq.epi = epi;
	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);	// 相当于&(epq.pt)->qproc = ep_ptable_queue_proc;

	// 调用fd对应的poll(),即调用ep_ptable_queue_proc,并获取当前事件位
	revents = tfile->f_op->poll(tfile, &epq.pt);

poll中也出现过类似的代码,这段代码等价于&(epq.pt)->qproc = ep_ptable_queue_proc;,紧接着就调用了fd对应的poll方法。而poll其实就是调用poll_wait(每个支持poll的设备驱动程序都要调用的),最后就是调用ep_ptable_queue_proc。此外,ep_insert还把struct epitem放到struct file里的f_ep_links链表里,以方便查找,struct epitem里的 fllink就是担负这个使命的。

既然ep_insert里调用了ep_ptable_queue_proc,我们就来看看ep_ptable_queue_proc是何方神圣:

// ep_ptable_queue_proc---------------------------------------------------------------------------
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt)
{
	struct epitem *epi = EP_ITEM_FROM_EPQUEUE(pt);	// 获取pt的struct epitem结构
	struct eppoll_entry *pwq;

	if (epi->nwait >= 0 && (pwq = PWQ_MEM_ALLOC())) {
        // 设置回调函数为ep_poll_callback
		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
		pwq->whead = whead;
		pwq->base = epi;
        // 加入设备的等待队列
		add_wait_queue(whead, &pwq->wait);
		list_add_tail(&pwq->llink, &epi->pwqlist);
		epi->nwait++;
	} else {
		/* We have to signal that an error occurred */
		epi->nwait = -1;
	}
}

ep_ptable_queue_proc主要做的事情就是:创建struct eppoll_entry,设置其唤醒回调函数为 ep_poll_callback,然后加入设备等待队列(whead就是每个设备驱动都要带的等待队列)。只有这样,当设备就绪,唤醒等待队列上的等待项时,ep_poll_callback才会被调用。

每次调用poll系统调用,操作系统都要把current(当前进程)挂到fd对应的所有设备的等待队列上,可以想象,fd多到上千的时候,这样“挂”法很费事;而每次调用epoll_wait则没有这么罗嗦,epoll只在epoll_ctl 时把current挂一遍(这第一遍是免不了的)并给每个fd一个命令“好了就调回调函数”,如果设备有事件了,通过回调函数,会把fd放入rdllist,而每次调用epoll_wait就只是收集rdllist里的fd就可以了。epoll巧妙的利用回调函数,实现了更高效的事件驱动模型。

到了这里,我们已经可以直到ep_poll_callback所做的事情了,它主要就是把红黑树上的收到event的epitem(代表每个fd)插入ep->rdllist中,这样,当epoll_wait返回时,rdllist里就都是就绪的fd了

下面我们来看一下ep_poll_callback的具体实现:

// ep_poll_callback---------------------------------------------------------------------
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
	int pwake = 0;
	unsigned long flags;
	struct epitem *epi = EP_ITEM_FROM_WAIT(wait);
	struct eventpoll *ep = epi->ep;

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
		     current, epi->file, epi, ep));

	write_lock_irqsave(&ep->lock, flags);

	/*
	 * If the event mask does not contain any poll(2) event, we consider the
	 * descriptor to be disabled. This condition is likely the effect of the
	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
	 * until the next EPOLL_CTL_MOD will be issued.
	 */
	if (!(epi->event.events & ~EP_PRIVATE_BITS))
		goto is_disabled;

	/* If this file is already in the ready list we exit soon */
	if (EP_IS_LINKED(&epi->rdllink))
		goto is_linked;

	list_add_tail(&epi->rdllink, &ep->rdllist);

is_linked:
	/*
	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
	 * wait list.
	 */
	if (waitqueue_active(&ep->wq))
		wake_up(&ep->wq);
	if (waitqueue_active(&ep->poll_wait))
		pwake++;

is_disabled:
	write_unlock_irqrestore(&ep->lock, flags);

	/* We have to call this outside the lock */
	if (pwake)
		ep_poll_safewake(&psw, &ep->poll_wait);

	return 1;
}

可以看到,整个ep_poll_callback的核心代码就只有一句:

list_add_tail(&epi->rdllink, &ep->rdllist);

就是把struct epitem放到struct eventpoll的rdllist中去。

到这我们已经把epoll的基本流程了解清楚了,我们可以通过一幅图来更好的理解epoll做的事情
epoll源码剖析

epoll的调用链如下:

eventpoll_init()
sys_epoll_create()
sys_epoll_ctl() -> ep_insert() -> f_op->poll() [ -> ep_ptable_queue_proc() -> ep_poll_callback() ]
    			-> ep_remove()														|
    			-> ep_modify()														|
sys_epoll_wait() -> ep_poll()    <-----  <-----  <-----  <-----  <-----  <-----     |

epoll的EPOLLET

EPOLLET是epoll系统调用独有的flag,ET就是Edge Trigger(边缘触发)的意思,有了EPOLLET,重复的事件就不会总是出来打扰程序的判断,故而常被使用。

上面我们说到epoll把fd都挂上一个回调函数,当fd对应的设备有消息时,就把fd放入rdllist链表,这样 epoll_wait只要检查这个rdllist链表就可以知道哪些fd有事件了。

现在我们将目光回到ep_poll的最后几行:

// ep_poll---------------------------------------------------------------------------
	/*
	 * Try to transfer events to user space. In case we get 0 events and
	 * there's still timeout left over, we go trying again in search of
	 * more luck.
	 */
	if (!res && eavail &&
	    !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout)
		goto retry;

	return res;
}

可以发现,最后调用了一个ep_events_transfer函数,这个函数的主要作用就是将rdllist里面的fd拷贝到用户空间,我们可以看一下这个函数的具体实现:

// ep_events_transfer----------------------------------------------------------------------------------
static int ep_events_transfer(struct eventpoll *ep, struct epoll_event __user *events, int maxevents)
{
	int eventcnt = 0;
	struct list_head txlist;

	INIT_LIST_HEAD(&txlist);

	/*
	 * We need to lock this because we could be hit by
	 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
	 */
	down_read(&ep->sem);

	/* Collect/extract ready items */
	if (ep_collect_ready_items(ep, &txlist, maxevents) > 0) {
		/* Build result set in userspace */
		eventcnt = ep_send_events(ep, &txlist, events);

		/* Reinject ready items into the ready list */
		ep_reinject_items(ep, &txlist);
	}

	up_read(&ep->sem);

	return eventcnt;
}

在这个函数中,我们只需要关注三个函数

  • ep_collect_ready_items:把rdllist里的fd挪到txlist里,挪完后rdllist就空了
  • ep_send_events:把txlist里的fd拷给用户空间
  • ep_reinject_items:把一部分fd从txlist里“返还”给rdllist以便下次还能从rdllist里发现它

我们先来看ep_send_events:

// ep_send_events-----------------------------------------------------------------------------------------
static int ep_send_events(struct eventpoll *ep, struct list_head *txlist, struct epoll_event __user *events)
{
	int eventcnt = 0;
	unsigned int revents;
	struct list_head *lnk;
	struct epitem *epi;

	/*
	 * We can loop without lock because this is a task private list.
	 * The test done during the collection loop will guarantee us that
	 * another task will not try to collect this file. Also, items
	 * cannot vanish during the loop because we are holding "sem".
	 */
	list_for_each(lnk, txlist) {
		epi = list_entry(lnk, struct epitem, txlink);

		/*
		 * Get the ready file event set. We can safely use the file
		 * because we are holding the "sem" in read and this will
		 * guarantee that both the file and the item will not vanish.
		 */
		revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);

		/*
		 * Set the return event set for the current file descriptor.
		 * Note that only the task task was successfully able to link
		 * the item to its "txlist" will write this field.
		 */
		epi->revents = revents & epi->event.events;

		if (epi->revents) {
			if (__put_user(epi->revents,
				       &events[eventcnt].events) ||
			    __put_user(epi->event.data,
				       &events[eventcnt].data))
				return -EFAULT;
			if (epi->event.events & EPOLLONESHOT)
				epi->event.events &= EP_PRIVATE_BITS;
			eventcnt++;
		}
	}
	return eventcnt;
}

ep_send_events的拷贝过程都很简单,但是里面有一行代码很有意思,那就是:

	revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);

在调用poll的时候,它把第二个参数置为NULL,这是为什么呢?回答这个问题之前,我们先看一下设备驱动通常是怎么实现poll的:

// scull_p_poll----------------------------------------------------------
static unsigned int scull_p_poll(struct file *filp, poll_table *wait) 
{
	struct scull_pipe *dev = filp->private_data; 
    unsigned int mask = 0;
    
	/*
	 * The buffer is circular; it is considered full 
	 * if "wp" is right behind "rp" and empty if the 
	 * two are equal. 
	 */
	down(&dev->sem); 
    poll_wait(filp, &dev->inq, wait); 
    poll_wait(filp, &dev->outq, wait); 
    if (dev->rp != dev->wp)
        mask |= POLLIN | POLLRDNORM; /* readable */ 
    if (spacefree(dev))
        mask |= POLLOUT | POLLWRNORM; /* writable */
	up(&dev->sem); 
    return mask;
}

上面这段代码摘自《linux设备驱动程序(第三版)》,绝对经典,设备先要把current(当前进程)挂在 inq和outq两个队列上(这个“挂”操作是wait回调函数指针做的),然后等设备来唤醒,唤醒后就能通过 mask拿到事件掩码了(注意那个mask参数,它就是负责拿事件掩码的)。那如果wait为NULL,poll_wait会做些什么呢?

// poll_wait---------------------------------------------------------------------------------------
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
	if (p && wait_address)
		p->qproc(filp, wait_address, p);
}

这是poll_wait的实现,可以看到,当poll_table为NULL,poll_wait什么都不做。回到ep_send_events,我们的疑问也有了答案,之所以将第二个参数设置为空,实际上就是“我不想休眠,我只想拿到事件掩码”的意思。然后再把拿到的事件掩码拷给用户空间

当ep_send_events完成后,就会调用ep_reinject_items,我们来看看ep_reinject_items的具体实现:

// ep_reinject_items---------------------------------------------------------------------
static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist)
{
	int ricnt = 0, pwake = 0;
	unsigned long flags;
	struct epitem *epi;

	write_lock_irqsave(&ep->lock, flags);

	while (!list_empty(txlist)) {
		epi = list_entry(txlist->next, struct epitem, txlink);

		/* Unlink the current item from the transfer list */
		EP_LIST_DEL(&epi->txlink);

		/*
		 * If the item is no more linked to the interest set, we don't
		 * have to push it inside the ready list because the following
		 * ep_release_epitem() is going to drop it. Also, if the current
		 * item is set to have an Edge Triggered behaviour, we don't have
		 * to push it back either.
		 */
		if (EP_RB_LINKED(&epi->rbn) && !(epi->event.events & EPOLLET) &&
		    (epi->revents & epi->event.events) && !EP_IS_LINKED(&epi->rdllink)) {
			list_add_tail(&epi->rdllink, &ep->rdllist);
			ricnt++;
		}
	}

	if (ricnt) {
		/*
		 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
		 * wait list.
		 */
		if (waitqueue_active(&ep->wq))
			wake_up(&ep->wq);
		if (waitqueue_active(&ep->poll_wait))
			pwake++;
	}

	write_unlock_irqrestore(&ep->lock, flags);

	/* We have to call this outside the lock */
	if (pwake)
		ep_poll_safewake(&psw, &ep->poll_wait);
}

在这段代码中,我们需要关注的其实只有一小段代码,这段代码如下:

if (EP_RB_LINKED(&epi->rbn) && 
	!(epi->event.events & EPOLLET) &&		// 没有标上EPOLLET
	(epi->revents & epi->event.events) && 	// 事件被关注
    !EP_IS_LINKED(&epi->rdllink)) 
{
	list_add_tail(&epi->rdllink, &ep->rdllist);
	ricnt++;
}

通过这个判断,可以发现ep_reinject_items只是把txlist里的一部分fd又放回rdllist,这一部分就是那些 “没有标上EPOLLET”且“事件被关注” 的fd被重新放回了rdllist。那么下次epoll_wait当然会又把rdllist里的fd拿来拷给用户了。

举个简单的例子:假设一个socket,只是connect,还没有收发数据,那么它的poll事件掩码总是有POLLOUT的(参见上面的驱动示例),每次调用epoll_wait总是返回POLLOUT事件(比较烦),因为它的fd就总是被放回rdllist;假如此时有人往这个socket里写了一大堆数据,造成socket塞住(不可写了),那么(epi->revents & epi->event.events)的判断就不成立了(没有POLLOUT了),fd不会放回rdllist,epoll_wait将不会再返回用户 POLLOUT事件。现在我们给这个socket加上EPOLLET,然后connect,没有收发数据,此时,!(epi->event.events & EPOLLET)的判断又不成立了,所以epoll_wait只会返回一次POLLOUT通知给用户(因为此fd不会再回到rdllist了),接下来的epoll_wait都不会有任何事件通知了。

最后

当我们看完epoll的源码之后,就可以明白其中函数的调用关系如下:
epoll源码剖析