最新消息:

nginx对Linux native AIO机制的应用 二

IO admin 3517浏览 0评论

从前面文章 nginx对Linux native AIO机制的应用 一 已经了解到,在Linux系统上,要使用native AIO机制,可以利用libaio库,也可以手动利用syscall做一层自己的封装,而nginx采用就是后者,这样做的好处是既使用简单(即:用户无需安装libaio库),又能满足nginx的本身需求。

看具体代码:

#if (NGX_HAVE_FILE_AIO)
 
/*
 * We call io_setup(), io_destroy() io_submit(), and io_getevents() directly
 * as syscalls instead of libaio usage, because the library header file
 * supports eventfd() since 0.3.107 version only.
 *
 * Also we do not use eventfd() in glibc, because glibc supports it
 * since 2.8 version and glibc maps two syscalls eventfd() and eventfd2()
 * into single eventfd() function with different number of parameters.
 */
 
static int
io_setup(u_int nr_reqs, aio_context_t *ctx)
{
    return syscall(SYS_io_setup, nr_reqs, ctx);
}
 
 
static int
io_destroy(aio_context_t ctx)
{
    return syscall(SYS_io_destroy, ctx);
}
 
 
static int
io_getevents(aio_context_t ctx, long min_nr, long nr, struct io_event *events,
    struct timespec *tmo)
{
    return syscall(SYS_io_getevents, ctx, min_nr, nr, events, tmo);
}
 
 
static void
ngx_epoll_aio_init(ngx_cycle_t *cycle, ngx_epoll_conf_t *epcf)
{
    int                 n;
    struct epoll_event  ee;
 
    ngx_eventfd = syscall(SYS_eventfd, 0);
 
    if (ngx_eventfd == -1) {
        ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
                      "eventfd() failed");
        ngx_file_aio = 0;
        return;
    }
...
 
static int
io_submit(aio_context_t ctx, long n, struct iocb **paiocb)
{
    return syscall(SYS_io_submit, ctx, n, paiocb);
}

nginx封装了四个重点接口函数:io_setup()、io_submit()、io_getevents()、io_destroy(),这些已经无需多说。另外,由于对eventfd()函数的使用仅有一处,因此nginx连封装都没做,直接syscall对应的系统函数标识SYS_eventfd(由nginx定义,与系统的__NR_eventfd对应,其它几个定义标识类似)。
说明:linux下的__NR_eventfd和__NR_eventfd2基本一致,只是参数有点不同,具体如下:

SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
...
SYSCALL_DEFINE1(eventfd, unsigned int, count)
{
    return sys_eventfd2(count, 0);
}

即eventfd的flags参数默认为0。
在linux下,nginx把aio结合到epoll里使用,下面就来看这个具体过程。
除了提供前面讲到的几个接口函数,nginx还需要做的就是初始准备工作,这实现在函数ngx_epoll_aio_init()内,调用关系为:
ngx_epoll_init() -> ngx_epoll_aio_init()

static void
ngx_epoll_aio_init(ngx_cycle_t *cycle, ngx_epoll_conf_t *epcf)
{
    int                 n;
    struct epoll_event  ee;
    ngx_eventfd = syscall(SYS_eventfd, 0);
...
    n = 1;
    if (ioctl(ngx_eventfd, FIONBIO, &n) == -1) {
...
    if (io_setup(epcf->aio_requests, &ngx_aio_ctx) == -1) {
...
    ngx_eventfd_event.data = &ngx_eventfd_conn;
    ngx_eventfd_event.handler = ngx_epoll_eventfd_handler;
    ngx_eventfd_event.log = cycle->log;
    ngx_eventfd_event.active = 1;
    ngx_eventfd_conn.fd = ngx_eventfd;
    ngx_eventfd_conn.read = &ngx_eventfd_event;
    ngx_eventfd_conn.log = cycle->log;
    ee.events = EPOLLIN|EPOLLET;
    ee.data.ptr = &ngx_eventfd_conn;
    if (epoll_ctl(ep, EPOLL_CTL_ADD, ngx_eventfd, &ee) != -1) {
        return;
    }

只看正常流程,这里做了几个工作:创建一个ngx_eventfd(全局变量)并将其设置为非阻塞,创建aio上下文环境ngx_aio_ctx(全局变量),初始化ngx_eventfd_event和ngx_eventfd_conn(两者都是全局变量,利用conn和event来进行统一描述,便于将eventfd、aio融合并适用到nginx的整体逻辑里),最后将代表aio的文件描述符ngx_eventfd加入到epoll机制里,即完成eventfd与epoll的关联。

当提交一下aio请求时,再把ngx_eventfd设置到aio请求内,即把eventfd与aio关联起来:

ssize_t
ngx_file_aio_read(ngx_file_t *file, u_char *buf, size_t size, off_t offset,
    ngx_pool_t *pool)
{
...
    aio->aiocb.aio_flags = IOCB_FLAG_RESFD;
    aio->aiocb.aio_resfd = ngx_eventfd;
    ev->handler = ngx_file_aio_event_handler;

根据nginx本身的应用场景(Web服务器,读多写少),因此暂只有read的情况才使用了native AIO,当然,到底在什么情况下才真正启用了native AIO请求,还需根据nginx的配置以及每个请求的文件来决定,这在前面《配置篇》已简单分析过。
当有aio请求完成时,文件描述符ngx_eventfd将变得可读,阻塞点epoll_wait()函数返回,并将走如下可能的流程:

static ngx_int_t
ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags)
{
...
    events = epoll_wait(ep, event_list, (int) nevents, timer);
...
    for (i = 0; i < events; i++) {
        c = event_list[i].data.ptr;
        instance = (uintptr_t) c & 1;
        c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1);
        rev = c->read;
...
        if ((revents & EPOLLIN) && rev->active) {
...
                rev->handler(rev);
...
        }
...

上面之所以说是可能的流程,是因为会根据具体配置而有所变化,但不管怎样,如上面的rev->handler回调那样,最终总会调入到函数ngx_epoll_eventfd_handler()内,而该函数的具体逻辑如下:

static void
ngx_epoll_eventfd_handler(ngx_event_t *ev)
{
...
    n = read(ngx_eventfd, &ready, 8);
...
    ts.tv_sec = 0;
    ts.tv_nsec = 0;
    while (ready) {
        events = io_getevents(ngx_aio_ctx, 1, 64, event, &ts);
...
        if (events > 0) {
            ready -= events;
            for (i = 0; i < events; i++) {
...
                e = (ngx_event_t *) (uintptr_t) event[i].data;
                e->complete = 1;
                e->active = 0;
                e->ready = 1;
                aio = e->data;
                aio->res = event[i].res;
                ngx_post_event(e, &ngx_posted_events);
            }
            continue;
        }
...

忽略异常逻辑,先read文件描述符ngx_eventfd,以获取完成的aio请求个数ready,然后while循环获取所有这些完成的aio请求,并且把每一个具体的aio请求结果封装成event事件对象,post提交到ngx_posted_events处理队列里,这样在下一轮nginx事件处理循环里就会被实际执行:

void
ngx_process_events_and_timers(ngx_cycle_t *cycle)
{
...
    if (ngx_posted_events) {
        if (ngx_threaded) {
            ngx_wakeup_worker_thread(cycle);
        } else {
            ngx_event_process_posted(cycle, &ngx_posted_events);
        }
    }
}
void
ngx_event_process_posted(ngx_cycle_t *cycle,
    ngx_thread_volatile ngx_event_t **posted)
{
    ngx_event_t  *ev;
    for ( ;; ) {
        ev = (ngx_event_t *) *posted;
...
        ngx_delete_posted_event(ev);
        ev->handler(ev);
    }
}

函数ngx_event_process_posted()会调用event事件对象的handler回调,根据aio请求的初始设置,也就是函数ngx_file_aio_event_handler():

static void
ngx_file_aio_event_handler(ngx_event_t *ev)
{
    ngx_event_aio_t  *aio;
    aio = ev->data;
    ngx_log_debug2(NGX_LOG_DEBUG_CORE, ev->log, 0,
                   "aio event handler fd:%d %V", aio->fd, &aio->file->name);
    aio->handler(ev);
}

aio->handler回调指向函数ngx_http_copy_aio_event_handler(),而后续的流程主要就是将aio从磁盘里读取到缓存的数据发送到最终客户端:
ngx_http_copy_aio_event_handler() -> ngx_http_request_handler() -> ngx_http_writer() -> ngx_http_output_filter() -> ngx_http_top_body_filter()

 

转载请注明:爱开源 » nginx对Linux native AIO机制的应用 二

您必须 登录 才能发表评论!