libuv学习笔记(13)

libuv学习笔记(13)

uv_tcp_t数据结构与相关函数(2)

本篇主要内容是上一篇未能学习的listen、accept与write

相关函数

uv_listen,导出函数,在uv.h中声明,在stream.c中定义
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
  int err;
  err = ERROR_INVALID_PARAMETER;
  switch (stream->type) {
    case UV_TCP:
      err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
      break;
    case UV_NAMED_PIPE://管道
      err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
      break;
    default:
      assert(0);
  }
  return uv_translate_sys_error(err);
}

对于tcp流,调用uv_tcp_listen

int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
  uv_loop_t* loop = handle->loop;
  unsigned int i, simultaneous_accepts;
  uv_tcp_accept_t* req;
  int err;
  assert(backlog > 0);
  //正在监听,赋值回调函数
  if (handle->flags & UV_HANDLE_LISTENING) {
    handle->stream.serv.connection_cb = cb;
  }
  //正在读取数据,返回错误
  if (handle->flags & UV_HANDLE_READING) {
    return WSAEISCONN;
  }
  //绑定的时候出了错误
  if (handle->delayed_error) {
    return handle->delayed_error;
  }
  //未能绑定
  if (!(handle->flags & UV_HANDLE_BOUND)) {//绑定默认的地址与端口
    err = uv_tcp_try_bind(handle,
                          (const struct sockaddr*) &uv_addr_ip4_any_,
                          sizeof(uv_addr_ip4_any_),
                          0);
    if (err)
      return err;
    if (handle->delayed_error)
      return handle->delayed_error;
  }
  //获取acceptex函数指针
  if (!handle->tcp.serv.func_acceptex) {
    if (!uv_get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
      return WSAEAFNOSUPPORT;
    }
  }

  if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
      listen(handle->socket, backlog) == SOCKET_ERROR) {
    return WSAGetLastError();
  }
  //改变状态
  handle->flags |= UV_HANDLE_LISTENING;
  handle->stream.serv.connection_cb = cb;
  INCREASE_ACTIVE_COUNT(loop, handle);//递增handle的活动计数

  simultaneous_accepts = handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT ? 1
    : uv_simultaneous_server_accepts;
  //初始化内部accept请求
  if(!handle->tcp.serv.accept_reqs) {
    handle->tcp.serv.accept_reqs = (uv_tcp_accept_t*)
      uv__malloc(uv_simultaneous_server_accepts * sizeof(uv_tcp_accept_t));
    if (!handle->tcp.serv.accept_reqs) {
      uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
    }

    for (i = 0; i < simultaneous_accepts; i++) {
      req = &handle->tcp.serv.accept_reqs[i];
      uv_req_init(loop, (uv_req_t*)req);
      req->type = UV_ACCEPT;
      req->accept_socket = INVALID_SOCKET;
      req->data = handle;

      req->wait_handle = INVALID_HANDLE_VALUE;
      if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
        //模拟iocp的情况下,需要创建event
        req->event_handle = CreateEvent(NULL, 0, 0, NULL);
        if (!req->event_handle) {
          uv_fatal_error(GetLastError(), "CreateEvent");
        }
      } else {
        req->event_handle = NULL;
      }
      //添加accept请求
      uv_tcp_queue_accept(handle, req);
    }
    //初始化其他未使用的请求,因为uv_tcp_endgame 并不知道有多少初始化的请求,它只会清理所有请求 
    for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) {
      req = &handle->tcp.serv.accept_reqs[i];
      uv_req_init(loop, (uv_req_t*) req);
      req->type = UV_ACCEPT;
      req->accept_socket = INVALID_SOCKET;
      req->data = handle;
      req->wait_handle = INVALID_HANDLE_VALUE;
      req->event_handle = NULL;
    }
  }
  return 0;
}

添加accept请求

static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
  uv_loop_t* loop = handle->loop;
  BOOL success;
  DWORD bytes;
  SOCKET accept_socket;
  short family;
  assert(handle->flags & UV_HANDLE_LISTENING);
  assert(req->accept_socket == INVALID_SOCKET);
  //选择ip类型
  if (handle->flags & UV_HANDLE_IPV6) {
    family = AF_INET6;
  } else {
    family = AF_INET;
  }
  //新建一个socket用来获取接受到的socket
  accept_socket = socket(family, SOCK_STREAM, 0);
  if (accept_socket == INVALID_SOCKET) {
    //出现错误,直接将请求添加到loop的请求队列做错误处理
    SET_REQ_ERROR(req, WSAGetLastError());
    uv_insert_pending_req(loop, (uv_req_t*)req);
    handle->reqs_pending++;
    return;
  }
  //使socket不可继承
  if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
    SET_REQ_ERROR(req, GetLastError());
    uv_insert_pending_req(loop, (uv_req_t*)req);
    handle->reqs_pending++;
    closesocket(accept_socket);
    return;
  }
  //准备重叠结构体
  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {//模拟iocp,使用之前创建的event
    req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
  }
  //异步accept
  success = handle->tcp.serv.func_acceptex(handle->socket,
                                          accept_socket,
                                          (void*)req->accept_buffer,
                                          0,
                                          sizeof(struct sockaddr_storage),
                                          sizeof(struct sockaddr_storage),
                                          &bytes,
                                          &req->u.io.overlapped);

  if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
    //直接将请求添加到loop的请求队列
    req->accept_socket = accept_socket;
    handle->reqs_pending++;
    uv_insert_pending_req(loop, (uv_req_t*)req);
  } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
    //通过iocp处理
    req->accept_socket = accept_socket;
    handle->reqs_pending++;
    //如果是模拟iocp,那么注册等待event_handle的事件
    if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
        req->wait_handle == INVALID_HANDLE_VALUE &&
        !RegisterWaitForSingleObject(&req->wait_handle,
          req->event_handle, post_completion, (void*) req,
          INFINITE, WT_EXECUTEINWAITTHREAD)) {
      SET_REQ_ERROR(req, GetLastError());
      uv_insert_pending_req(loop, (uv_req_t*)req);
      handle->reqs_pending++;
      return;
    }
  } else {
    //添加错误信息,直接添加到loop请求队列
    SET_REQ_ERROR(req, WSAGetLastError());
    uv_insert_pending_req(loop, (uv_req_t*)req);
    handle->reqs_pending++;
    //关闭socket
    closesocket(accept_socket);
    //销毁事件句柄
    if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
      CloseHandle(req->u.io.overlapped.hEvent);
      req->event_handle = NULL;
    }
  }
}

uv_run中调用uv_process_reqs对accpet请求的处理:

case UV_ACCEPT:
        DELEGATE_STREAM_REQ(loop, req, accept, data);
        break;

最终调用uv_process_tcp_accept_req

void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
    uv_req_t* raw_req) {
  uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
  int err;
  assert(handle->type == UV_TCP);
  //如果handle->accepted_socket不可用,表示uv_queue_accept 失败了,使用失败信息调用
  //connection_cb
  if (req->accept_socket == INVALID_SOCKET) {
    if (handle->flags & UV_HANDLE_LISTENING) {
      handle->flags &= ~UV_HANDLE_LISTENING;
      DECREASE_ACTIVE_COUNT(loop, handle);//handle活动计数递减
      if (handle->stream.serv.connection_cb) {
        err = GET_REQ_SOCK_ERROR(req);
        handle->stream.serv.connection_cb((uv_stream_t*)handle,
                                      uv_translate_sys_error(err));
      }
    }
  } else if (REQ_SUCCESS(req) &&
      setsockopt(req->accept_socket,
                  SOL_SOCKET,
                  SO_UPDATE_ACCEPT_CONTEXT,
                  (char*)&handle->socket,
                  sizeof(handle->socket)) == 0) {
    //成功
    req->next_pending = handle->tcp.serv.pending_accepts;
    handle->tcp.serv.pending_accepts = req;
    //调用回调
    if (handle->stream.serv.connection_cb) {
      handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
    }
  } else {
    //收到的socket的错误将被忽略,因为服务端socket可能还是正常的 
    closesocket(req->accept_socket);
    req->accept_socket = INVALID_SOCKET;
    if (handle->flags & UV_HANDLE_LISTENING) {
      uv_tcp_queue_accept(handle, req);//继续监听
    }
  }
  DECREASE_PENDING_REQ_COUNT(handle);//handle等待处理的请求计数减一
}
uv_accept,导出函数,在uv.h中声明,在stream.c中定义

uv_accept一般在uv_listen的回调函数中调用,对于tcp,最终调用uv_tcp_accept

int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
  uv_loop_t* loop = server->loop;
  int err = 0;
  int family;
  uv_tcp_accept_t* req = server->tcp.serv.pending_accepts;
  //没有请求,返回错误,比如uv_process_tcp_accept_req中出现了错误
  if (!req) {
    return WSAEWOULDBLOCK;
  }
  //
  if (req->accept_socket == INVALID_SOCKET) {
    return WSAENOTCONN;
  }
  if (server->flags & UV_HANDLE_IPV6) {
    family = AF_INET6;
  } else {
    family = AF_INET;
  }
  //将获取到的socket与loop的iocp端口联系起来,与client联系起来
  err = uv_tcp_set_socket(client->loop,
                          client,
                          req->accept_socket,
                          family,
                          0);
  if (err) {
    closesocket(req->accept_socket);
  } else {
    //按链接socket初始化获取到的socket,也就是初始化客户端会用到的地方,主要是write以及shutdown相
    //关的请求
    uv_connection_init((uv_stream_t*) client);
    // AcceptEx() 已经绑定了接受到的socket的地址
    client->flags |= UV_HANDLE_BOUND | UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
  }
  //为接下来的acceptex做准备
  server->tcp.serv.pending_accepts = req->next_pending;
  req->next_pending = NULL;
  req->accept_socket = INVALID_SOCKET;
  if (!(server->flags & UV__HANDLE_CLOSING)) {
    //accept的行为并未被改变
    if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
      uv_tcp_queue_accept(server, req);//继续accept
    } else {
      assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
      server->tcp.serv.processed_accepts++;
      if (server->tcp.serv.processed_accepts >= uv_simultaneous_server_accepts) {
        server->tcp.serv.processed_accepts = 0;
        //所有之前的accept请求都被处理了,现在只发送一个
        uv_tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
        server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
        server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
      }
    }
  }
  loop->active_tcp_streams++;//tcp流数量加一
  return err;
}

与一般流程不同的是,libuv在listen时内部就调用了acceptex,而uv_accept一般是在listen的回调中使用的,用来获取acceptsocket并对其进行一些设置,将其用传入的client封装。

uv_write,导出函数,在uv.h中声明,在stream.c中定义

内部调用uv_tcp_write

int uv_tcp_write(uv_loop_t* loop,
                 uv_write_t* req,
                 uv_tcp_t* handle,
                 const uv_buf_t bufs[],
                 unsigned int nbufs,
                 uv_write_cb cb) {
  int result;
  DWORD bytes;
  uv_req_init(loop, (uv_req_t*) req);
  req->type = UV_WRITE;
  req->handle = (uv_stream_t*) handle;
  req->cb = cb;
  //准备重叠结构体
  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
    req->event_handle = CreateEvent(NULL, 0, 0, NULL);
    if (!req->event_handle) {
      uv_fatal_error(GetLastError(), "CreateEvent");
    }
    req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
    req->wait_handle = INVALID_HANDLE_VALUE;
  }
  //异步发送
  result = WSASend(handle->socket,
                   (WSABUF*) bufs,
                   nbufs,
                   &bytes,
                   0,
                   &req->u.io.overlapped,
                   NULL);

  if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
    //请求立刻就完成了,直接添加请求道loop请求队列
    req->u.io.queued_bytes = 0;
    handle->reqs_pending++;
    handle->stream.conn.write_reqs_pending++;
    REGISTER_HANDLE_REQ(loop, handle, req);
    uv_insert_pending_req(loop, (uv_req_t*) req);
  } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
    //通过iocp处理
    req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
    handle->reqs_pending++;
    handle->stream.conn.write_reqs_pending++;
    REGISTER_HANDLE_REQ(loop, handle, req);
    handle->write_queue_size += req->u.io.queued_bytes;
    if (handle->flags & UV_HANDLE_EMULATE_IOCP &&//模拟iocp
        !RegisterWaitForSingleObject(&req->wait_handle,
          req->event_handle, post_write_completion, (void*) req,
          INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
      SET_REQ_ERROR(req, GetLastError());
      uv_insert_pending_req(loop, (uv_req_t*)req);
    }
  } else {
    //发送错误,直接将包含错误信息的请求添加到loop的请求队列
    req->u.io.queued_bytes = 0;
    handle->reqs_pending++;
    handle->stream.conn.write_reqs_pending++;
    REGISTER_HANDLE_REQ(loop, handle, req);
    SET_REQ_ERROR(req, WSAGetLastError());
    uv_insert_pending_req(loop, (uv_req_t*) req);
  }
  return 0;
}

对write请求的处理

void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
    uv_write_t* req) {
  int err;
  assert(handle->type == UV_TCP);
  assert(handle->write_queue_size >= req->u.io.queued_bytes);
  handle->write_queue_size -= req->u.io.queued_bytes;//剩下需要发送的数据
  UNREGISTER_HANDLE_REQ(loop, handle, req);

  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
    if (req->wait_handle != INVALID_HANDLE_VALUE) {
      UnregisterWait(req->wait_handle);//停止监控
      req->wait_handle = INVALID_HANDLE_VALUE;
    }
    if (req->event_handle) {
      CloseHandle(req->event_handle);
      req->event_handle = NULL;
    }
  }
  if (req->cb) {
    err = uv_translate_sys_error(GET_REQ_SOCK_ERROR(req));
    if (err == UV_ECONNABORTED) {
      err = UV_ECANCELED;
    }
    req->cb(req, err);//调用回调
  }
  handle->stream.conn.write_reqs_pending--;
  if (handle->stream.conn.shutdown_req != NULL &&
      handle->stream.conn.write_reqs_pending == 0) {
    uv_want_endgame(loop, (uv_handle_t*)handle);
  }
  DECREASE_PENDING_REQ_COUNT(handle);
}
uv_shutdown,在uv.h中声明,在stream.c中定义

从写端关闭

int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
  uv_loop_t* loop = handle->loop;
  //只处理可读的socket,也就是说只由建立了链接的tcp才可以,处于accept状态的也不行
  if (!(handle->flags & UV_HANDLE_WRITABLE)) {
    return UV_EPIPE;
  }
  uv_req_init(loop, (uv_req_t*) req);
  req->type = UV_SHUTDOWN;
  req->handle = handle;
  req->cb = cb;
  handle->flags &= ~UV_HANDLE_WRITABLE;//不可写状态
  handle->stream.conn.shutdown_req = req;
  handle->reqs_pending++;
  REGISTER_HANDLE_REQ(loop, handle, req);
  uv_want_endgame(loop, (uv_handle_t*)handle);//将handle添加到loop的关闭队列
  return 0;
}
uv_close关闭tcp handle

最终调用uv_tcp_close

void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
  int close_socket = 1;
  if (tcp->flags & UV_HANDLE_READ_PENDING) {//没有进行读操作
    if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
      shutdown(tcp->socket, SD_SEND);//关闭发送功能
    } else if (uv_tcp_try_cancel_io(tcp) == 0) {
      //取消io操作,并等待最后一次读请求的处理中关闭socket
      close_socket = 0;
    } else {
      //取消失败
    }
  } else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
             tcp->tcp.serv.accept_reqs != NULL) {
    //共享socket并且正在listen
    if (uv_tcp_try_cancel_io(tcp) != 0) {//取消i/o操作
      //取消失败,关闭已经接受到但是iocp流程尚未走完的socket。在uv_accept中如果acceptsocket为
      //空,那么就不会接着进行accept。
      unsigned int i;
      for (i = 0; i < uv_simultaneous_server_accepts; i++) {
        uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
        if (req->accept_socket != INVALID_SOCKET &&
            !HasOverlappedIoCompleted(&req->u.io.overlapped)) {
          closesocket(req->accept_socket);
          req->accept_socket = INVALID_SOCKET;
        }
      }
    }
  }

  if (tcp->flags & UV_HANDLE_READING) {
    tcp->flags &= ~UV_HANDLE_READING;
    DECREASE_ACTIVE_COUNT(loop, tcp);
  }

  if (tcp->flags & UV_HANDLE_LISTENING) {
    tcp->flags &= ~UV_HANDLE_LISTENING;
    DECREASE_ACTIVE_COUNT(loop, tcp);
  }

  if (close_socket) {
    closesocket(tcp->socket);
    tcp->socket = INVALID_SOCKET;
    tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
  }

  tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
  uv__handle_closing(tcp);

  if (tcp->reqs_pending == 0) {
    uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
  }
}

shutdown与closehandle的流程区别:
uv_shutdown——>uv_want_endgame——>uv_tcp_endgame

closehandle——>uv_tcp_close——>tcp handle上没有活动请求之后调用uv_tcp_endgame

uv_tcp_endgame

void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
  int err;
  unsigned int i;
  uv_tcp_accept_t* req;
  //有shutdown相关的请求信息,UV_HANDLE_CONNECTION 表示本socket是accept获取的socket或者链接服
  //务端的socket
  if (handle->flags & UV_HANDLE_CONNECTION &&
      handle->stream.conn.shutdown_req != NULL &&
      handle->stream.conn.write_reqs_pending == 0) {
    UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
    err = 0;
    if (handle->flags & UV__HANDLE_CLOSING) {
      err = ERROR_OPERATION_ABORTED;
    } else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) {
      err = WSAGetLastError();
    }

    if (handle->stream.conn.shutdown_req->cb) {
      handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req,
                               uv_translate_sys_error(err));
    }

    handle->stream.conn.shutdown_req = NULL;
    DECREASE_PENDING_REQ_COUNT(handle);
    return;
  }
  //有关闭标记并且活动请求数为0,关闭相关资源,否则只能等到最后一个请求处理的时候再次调用
  //uv_tcp_endgame了(每个请求处理都会调用DECREASE_PENDING_REQ_COUNT宏)
  if (handle->flags & UV__HANDLE_CLOSING &&
      handle->reqs_pending == 0) {
    assert(!(handle->flags & UV_HANDLE_CLOSED));
    if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
      closesocket(handle->socket);
      handle->socket = INVALID_SOCKET;
      handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
    }
    if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
      if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
        for (i = 0; i < uv_simultaneous_server_accepts; i++) {
          req = &handle->tcp.serv.accept_reqs[i];
          if (req->wait_handle != INVALID_HANDLE_VALUE) {
            UnregisterWait(req->wait_handle);
            req->wait_handle = INVALID_HANDLE_VALUE;
          }
          if (req->event_handle) {
            CloseHandle(req->event_handle);
            req->event_handle = NULL;
          }
        }
      }
      uv__free(handle->tcp.serv.accept_reqs);
      handle->tcp.serv.accept_reqs = NULL;
    }
    if (handle->flags & UV_HANDLE_CONNECTION &&
        handle->flags & UV_HANDLE_EMULATE_IOCP) {
      if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
        UnregisterWait(handle->read_req.wait_handle);
        handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
      }
      if (handle->read_req.event_handle) {
        CloseHandle(handle->read_req.event_handle);
        handle->read_req.event_handle = NULL;
      }
    }
    uv__handle_close(handle);
    loop->active_tcp_streams--;
  }
}

其他内容

1.uv_shutdown与uv_close的区别,

uv_shutdown只是关闭了UV_HANDLE_CONNECTION 标志socket的写功能,并不会修改UV_HANDLE_CLOSING标记,不会处理accept以及read请求

uv_close会调用uv_tcp_close,做全面的清理工作,停止(非立刻)accept以及read,取消所有i/o,修改handle标记为UV_HANDLE_CLOSING,这会导致在最后一个请求处理的时候调用uv_want_endgame,最终导致uv_close回调被调用。

2.

uv_tcp_t内部有一个联合体,
展开后如下:

  union {                                                                 
    struct { 
      uv_tcp_accept_t* accept_reqs;   //accept请求 
      //已经处理了的请求,最后关闭的时候会用到,需要等所有的acceptex调用的结果信息通过iocp接收到之后
      //才能关闭                                    
      unsigned int processed_accepts;                                       
      uv_tcp_accept_t* pending_accepts;                                         
      LPFN_ACCEPTEX func_acceptex;//acceptex函数指针
    } serv;                                    
    struct { 
      uv_buf_t read_buffer;//读取数据的缓存 
      LPFN_CONNECTEX func_connectex;//connectex的函数指针
    } conn;                               
  } tcp;

serv表示server端会用到的功能,也就是listen accept
conn表示客户端会用到的功能,包括主动连接服务端的socket以及通过accept获取到的socket

3.三种处理模式

1.第一种,UV_SUCCEEDED_WITHOUT_IOCP,也就是不通过iocp,直接将请求添加到loop的请求队列,这种情况个人没有遇到过,推测可能是类似阻塞式的i/o
2.第二种,UV_HANDLE_EMULATE_IOCP,模拟iocp,也就是通过RegisterWaitForSingleObject注册对于event的监控,而event就是自己创建的事件,对应io完成之后,会通知该event,进而被监控到,并在回调函数中向iocp端口发送信号。对于tcp,通过uv_tcp_open初始化的uv_tcp_t且与iocp端口联系失败时(uv_tcp_set_socket)
3.通过iocp,io操作完成之后直接可以通过iocp获取通知。

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值