Commit 634bdfb1 authored by Ondřej Surý's avatar Ondřej Surý
Browse files

Refactor netmgr and add more unit tests

This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested.  It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.

NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.

The changes that are included in this commit are listed here
(extensively, but not exclusively):

* The netmgr_test unit test was split into individual tests (udp_test,
  tcp_test, tcpdns_test and newly added tcp_quota_test)

* The udp_test and tcp_test has been extended to allow programatic
  failures from the libuv API.  Unfortunately, we can't use cmocka
  mock() and will_return(), so we emulate the behaviour with #define and
  including the netmgr/{udp,tcp}.c source file directly.

* The netievents that we put on the nm queue have variable num...
parent 3a366622
......@@ -106,6 +106,9 @@
(list
"--enable=all"
"--suppress=missingIncludeSystem"
"--suppress=nullPointerRedundantCheck"
(concat "--suppressions-list=" (expand-file-name
(concat directory-of-current-dir-locals-file "util/suppressions.txt")))
(concat "-include=" (expand-file-name
(concat directory-of-current-dir-locals-file "config.h")))
)
......
......@@ -3232,7 +3232,10 @@ tcp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
REQUIRE(DIG_VALID_QUERY(query));
REQUIRE(query->handle == NULL);
REQUIRE(!free_now);
INSIST(!free_now);
debug("tcp_connected(%p, %s, %p)", handle, isc_result_totext(eresult),
query);
LOCK_LOOKUP;
lookup_attach(query->lookup, &l);
......@@ -3303,7 +3306,10 @@ tcp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
launch_next_query(query);
query_detach(&query);
isc_nmhandle_detach(&handle);
if (l->tls_mode) {
/* FIXME: This is a accounting bug in TLSDNS */
isc_nmhandle_detach(&handle);
}
lookup_detach(&l);
UNLOCK_LOOKUP;
}
......
......@@ -8621,8 +8621,7 @@ load_configuration(const char *filename, named_server_t *server,
advertised = MAX_TCP_TIMEOUT;
}
isc_nm_tcp_settimeouts(named_g_nm, initial, idle, keepalive,
advertised);
isc_nm_settimeouts(named_g_nm, initial, idle, keepalive, advertised);
/*
* Configure sets of UDP query source ports.
......@@ -15950,8 +15949,8 @@ named_server_tcptimeouts(isc_lex_t *lex, isc_buffer_t **text) {
return (ISC_R_UNEXPECTEDEND);
}
isc_nm_tcp_gettimeouts(named_g_nm, &initial, &idle, &keepalive,
&advertised);
isc_nm_gettimeouts(named_g_nm, &initial, &idle, &keepalive,
&advertised);
/* Look for optional arguments. */
ptr = next_token(lex, NULL);
......@@ -16000,8 +15999,8 @@ named_server_tcptimeouts(isc_lex_t *lex, isc_buffer_t **text) {
result = isc_task_beginexclusive(named_g_server->task);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
isc_nm_tcp_settimeouts(named_g_nm, initial, idle, keepalive,
advertised);
isc_nm_settimeouts(named_g_nm, initial, idle, keepalive,
advertised);
isc_task_endexclusive(named_g_server->task);
}
......
......@@ -961,8 +961,6 @@ xfrin_connect_done(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
CHECK(xfrin_send_request(xfr));
failure:
isc_nmhandle_detach(&handle);
if (result != ISC_R_SUCCESS && result != ISC_R_SHUTTINGDOWN) {
xfrin_fail(xfr, result, "failed to connect");
}
......
......@@ -127,6 +127,7 @@ libisc_la_SOURCES = \
netmgr/netmgr.c \
netmgr/tcp.c \
netmgr/tcpdns.c \
netmgr/tlsdns.c \
netmgr/tls.c \
netmgr/udp.c \
netmgr/uv-compat.c \
......
......@@ -18,6 +18,7 @@
#endif /* HAVE_UCHAR_H */
#include <isc/mutex.h>
#include <isc/util.h>
#if !defined(__has_feature)
#define __has_feature(x) 0
......
......@@ -111,10 +111,22 @@ isc_nmsocket_close(isc_nmsocket_t **sockp);
* sockets with active handles, the socket will be closed.
*/
#ifdef NETMGR_TRACE
#define isc_nmhandle_attach(handle, dest) \
isc__nmhandle_attach(handle, dest, __FILE__, __LINE__, __func__)
#define isc_nmhandle_detach(handlep) \
isc__nmhandle_detach(handlep, __FILE__, __LINE__, __func__)
#define FLARG , const char *file, unsigned int line, const char *func
#else
#define isc_nmhandle_attach(handle, dest) isc__nmhandle_attach(handle, dest)
#define isc_nmhandle_detach(handlep) isc__nmhandle_detach(handlep)
#define FLARG
#endif
void
isc_nmhandle_attach(isc_nmhandle_t *handle, isc_nmhandle_t **dest);
isc__nmhandle_attach(isc_nmhandle_t *handle, isc_nmhandle_t **dest FLARG);
void
isc_nmhandle_detach(isc_nmhandle_t **handlep);
isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG);
/*%<
* Increment/decrement the reference counter in a netmgr handle,
* but (unlike the attach/detach functions) do not change the pointer
......@@ -127,6 +139,7 @@ isc_nmhandle_detach(isc_nmhandle_t **handlep);
* otherwise know that the handle was in use and might free it, along
* with the client.)
*/
#undef FLARG
void *
isc_nmhandle_getdata(isc_nmhandle_t *handle);
......@@ -302,9 +315,6 @@ isc_nm_listentcp(isc_nm_t *mgr, isc_nmiface_t *iface,
* If 'quota' is not NULL, then the socket is attached to the specified
* quota. This allows us to enforce TCP client quota limits.
*
* NOTE: This is currently only called inside isc_nm_listentcpdns(), which
* creates a 'wrapper' socket that sends and receives DNS messages
* prepended with a two-byte length field, and handles buffering.
*/
isc_result_t
......@@ -326,10 +336,11 @@ isc_nm_tcpconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
*/
isc_result_t
isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
void *cbarg, isc_nm_accept_cb_t accept_cb,
void *accept_cbarg, size_t extrahandlesize, int backlog,
isc_quota_t *quota, isc_nmsocket_t **sockp);
isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
isc_nm_recv_cb_t recv_cb, void *recv_cbarg,
isc_nm_accept_cb_t accept_cb, void *accept_cbarg,
size_t extrahandlesize, int backlog, isc_quota_t *quota,
isc_nmsocket_t **sockp);
/*%<
* Start listening for DNS messages over the TCP interface 'iface', using
* net manager 'mgr'.
......@@ -391,8 +402,35 @@ isc_nm_tcpdns_keepalive(isc_nmhandle_t *handle, bool value);
*/
void
isc_nm_tcp_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
uint32_t keepalive, uint32_t advertised);
isc_nm_tlsdns_sequential(isc_nmhandle_t *handle);
/*%<
* Disable pipelining on this connection. Each DNS packet will be only
* processed after the previous completes.
*
* The socket must be unpaused after the query is processed. This is done
* the response is sent, or if we're dropping the query, it will be done
* when a handle is fully dereferenced by calling the socket's
* closehandle_cb callback.
*
* Note: This can only be run while a message is being processed; if it is
* run before any messages are read, no messages will be read.
*
* Also note: once this has been set, it cannot be reversed for a given
* connection.
*/
void
isc_nm_tlsdns_keepalive(isc_nmhandle_t *handle, bool value);
/*%<
* Enable/disable keepalive on this connection by setting it to 'value'.
*
* When keepalive is active, we switch to using the keepalive timeout
* to determine when to close a connection, rather than the idle timeout.
*/
void
isc_nm_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
uint32_t keepalive, uint32_t advertised);
/*%<
* Sets the initial, idle, and keepalive timeout values to use for
* TCP connections, and the timeout value to advertise in responses using
......@@ -404,8 +442,8 @@ isc_nm_tcp_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
*/
void
isc_nm_tcp_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
uint32_t *keepalive, uint32_t *advertised);
isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
uint32_t *keepalive, uint32_t *advertised);
/*%<
* Gets the initial, idle, keepalive, or advertised timeout values,
* in tenths of seconds.
......
......@@ -31,6 +31,7 @@
#include <isc/atomic.h>
#include <isc/lang.h>
#include <isc/magic.h>
#include <isc/mutex.h>
#include <isc/types.h>
......@@ -44,6 +45,7 @@ ISC_LANG_BEGINDECLS
typedef struct isc_quota_cb isc_quota_cb_t;
typedef void (*isc_quota_cb_func_t)(isc_quota_t *quota, void *data);
struct isc_quota_cb {
int magic;
isc_quota_cb_func_t cb_func;
void * data;
ISC_LINK(isc_quota_cb_t) link;
......@@ -51,6 +53,7 @@ struct isc_quota_cb {
/*% isc_quota structure */
struct isc_quota {
int magic;
atomic_uint_fast32_t max;
atomic_uint_fast32_t used;
atomic_uint_fast32_t soft;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -91,8 +91,7 @@ tls_senddone(isc_nmhandle_t *handle, isc_result_t eresult, void *cbarg) {
static void
async_tls_do_bio(isc_nmsocket_t *sock) {
isc__netievent_tlsdobio_t *ievent =
isc__nm_get_ievent(sock->mgr, netievent_tlsdobio);
ievent->sock = sock;
isc__nm_get_netievent_tlsdobio(sock->mgr, sock);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent);
}
......@@ -314,10 +313,11 @@ initialize_tls(isc_nmsocket_t *sock, bool server) {
static isc_result_t
tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
REQUIRE(VALID_NMSOCK(cbarg));
isc_nmsocket_t *tlslistensock = (isc_nmsocket_t *)cbarg;
isc_nmsocket_t *tlssock = NULL;
int r;
REQUIRE(VALID_NMSOCK(tlslistensock));
REQUIRE(tlslistensock->type == isc_nm_tlslistener);
/* If accept() was unsuccessful we can't do anything */
......@@ -350,8 +350,10 @@ tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
return (ISC_R_TLSERROR);
}
uv_timer_init(&tlssock->mgr->workers[isc_nm_tid()].loop,
&tlssock->timer);
r = uv_timer_init(&tlssock->mgr->workers[isc_nm_tid()].loop,
&tlssock->timer);
RUNTIME_CHECK(r == 0);
tlssock->timer.data = tlssock;
tlssock->timer_initialized = true;
tlssock->tls.ctx = tlslistensock->tls.ctx;
......@@ -410,7 +412,8 @@ isc__nm_async_tlssend(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__nm_uvreq_t *req = ievent->req;
ievent->req = NULL;
REQUIRE(VALID_UVREQ(req));
REQUIRE(worker->id == sock->tid);
REQUIRE(sock->tid == isc_nm_tid());
UNUSED(worker);
if (inactive(sock)) {
req->cb.send(req->handle, ISC_R_CANCELED, req->cbarg);
......@@ -449,7 +452,7 @@ isc__nm_async_tlssend(isc__networker_t *worker, isc__netievent_t *ev0) {
void
isc__nm_tls_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
void *cbarg) {
isc__netievent_tcpsend_t *ievent = NULL;
isc__netievent_tlssend_t *ievent = NULL;
isc__nm_uvreq_t *uvreq = NULL;
isc_nmsocket_t *sock = NULL;
REQUIRE(VALID_NMHANDLE(handle));
......@@ -475,60 +478,61 @@ isc__nm_tls_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
/*
* We need to create an event and pass it using async channel
*/
ievent = isc__nm_get_ievent(sock->mgr, netievent_tlssend);
ievent->sock = sock;
ievent->req = uvreq;
ievent = isc__nm_get_netievent_tlssend(sock->mgr, sock, uvreq);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent);
}
void
isc__nm_async_tls_startread(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_startread_t *ievent = (isc__netievent_startread_t *)ev0;
isc__nm_async_tlsstartread(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_tlsstartread_t *ievent =
(isc__netievent_tlsstartread_t *)ev0;
isc_nmsocket_t *sock = ievent->sock;
REQUIRE(worker->id == isc_nm_tid());
REQUIRE(sock->tid == isc_nm_tid());
UNUSED(worker);
tls_do_bio(sock);
}
void
isc__nm_tls_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
isc_nmsocket_t *sock = NULL;
isc__netievent_startread_t *ievent = NULL;
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
REQUIRE(handle->sock->statichandle == handle);
REQUIRE(handle->sock->tid == isc_nm_tid());
sock = handle->sock;
REQUIRE(sock->statichandle == handle);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->recv_cb == NULL);
REQUIRE(sock->tid == isc_nm_tid());
isc__netievent_tlsstartread_t *ievent = NULL;
isc_nmsocket_t *sock = handle->sock;
if (inactive(sock)) {
cb(handle, ISC_R_NOTCONNECTED, NULL, cbarg);
return;
}
sock = handle->sock;
sock->recv_cb = cb;
sock->recv_cbarg = cbarg;
ievent = isc__nm_get_ievent(sock->mgr, netievent_tlsstartread);
ievent->sock = sock;
ievent = isc__nm_get_netievent_tlsstartread(sock->mgr, sock);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent);
}
void
isc__nm_tls_pauseread(isc_nmsocket_t *sock) {
isc__nm_tls_pauseread(isc_nmhandle_t *handle) {
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
isc_nmsocket_t *sock = handle->sock;
atomic_store(&sock->readpaused, true);
}
void
isc__nm_tls_resumeread(isc_nmsocket_t *sock) {
isc__nm_tls_resumeread(isc_nmhandle_t *handle) {
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
isc_nmsocket_t *sock = handle->sock;
atomic_store(&sock->readpaused, false);
async_tls_do_bio(sock);
}
......@@ -536,12 +540,12 @@ isc__nm_tls_resumeread(isc_nmsocket_t *sock) {
static void
timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = (isc_nmsocket_t *)uv_handle_get_data(handle);
INSIST(VALID_NMSOCK(sock));
tls_close_direct(sock);
}
static void
tls_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid());
if (sock->timer_running) {
......@@ -602,9 +606,7 @@ isc__nm_tls_close(isc_nmsocket_t *sock) {
tls_close_direct(sock);
} else {
isc__netievent_tlsclose_t *ievent =
isc__nm_get_ievent(sock->mgr, netievent_tlsclose);
ievent->sock = sock;
isc__nm_get_netievent_tlsclose(sock->mgr, sock);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent);
}
......@@ -614,7 +616,8 @@ void
isc__nm_async_tlsclose(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_tlsclose_t *ievent = (isc__netievent_tlsclose_t *)ev0;
REQUIRE(worker->id == ievent->sock->tid);
REQUIRE(ievent->sock->tid == isc_nm_tid());
UNUSED(worker);
tls_close_direct(ievent->sock);
}
......@@ -644,7 +647,7 @@ isc_result_t
isc_nm_tlsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
isc_nm_cb_t cb, void *cbarg, SSL_CTX *ctx,
unsigned int timeout, size_t extrahandlesize) {
isc_nmsocket_t *nsock = NULL, *tmp = NULL;
isc_nmsocket_t *nsock = NULL;
isc__netievent_tlsconnect_t *ievent = NULL;
isc_result_t result = ISC_R_SUCCESS;
......@@ -653,7 +656,7 @@ isc_nm_tlsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
nsock = isc_mem_get(mgr->mctx, sizeof(*nsock));
isc__nmsocket_init(nsock, mgr, isc_nm_tlssocket, local);
nsock->extrahandlesize = extrahandlesize;
atomic_init(&nsock->result, ISC_R_SUCCESS);
nsock->result = ISC_R_SUCCESS;
nsock->connect_cb = cb;
nsock->connect_cbarg = cbarg;
nsock->connect_timeout = timeout;
......@@ -667,31 +670,22 @@ isc_nm_tlsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
return (ISC_R_TLSERROR);
}
ievent = isc__nm_get_ievent(mgr, netievent_tlsconnect);
ievent->sock = nsock;
ievent = isc__nm_get_netievent_tlsconnect(mgr, nsock);
ievent->local = local->addr;
ievent->peer = peer->addr;
ievent->ctx = ctx;
/*
* Async callbacks can dereference the socket in the meantime,
* we need to hold an additional reference to it.
*/
isc__nmsocket_attach(nsock, &tmp);
if (isc__nm_in_netthread()) {
nsock->tid = isc_nm_tid();
isc__nm_async_tlsconnect(&mgr->workers[nsock->tid],
(isc__netievent_t *)ievent);
isc__nm_put_ievent(mgr, ievent);
isc__nm_put_netievent_tlsconnect(mgr, ievent);
} else {
nsock->tid = isc_random_uniform(mgr->nworkers);
isc__nm_enqueue_ievent(&mgr->workers[nsock->tid],
(isc__netievent_t *)ievent);
}
isc__nmsocket_detach(&tmp);
return (result);
}
......@@ -703,8 +697,9 @@ tls_connect_cb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
if (result != ISC_R_SUCCESS) {
tlssock->connect_cb(handle, result, tlssock->connect_cbarg);
atomic_store(&tlssock->result, result);
atomic_store(&tlssock->connect_error, true);
LOCK(&tlssock->parent->lock);
tlssock->parent->result = result;
UNLOCK(&tlssock->parent->lock);
tls_close_direct(tlssock);
return;
}
......@@ -716,8 +711,9 @@ tls_connect_cb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
result = initialize_tls(tlssock, false);
if (result != ISC_R_SUCCESS) {
tlssock->connect_cb(handle, result, tlssock->connect_cbarg);
atomic_store(&tlssock->result, result);
atomic_store(&tlssock->connect_error, true);
LOCK(&tlssock->parent->lock);
tlssock->parent->result = result;
UNLOCK(&tlssock->parent->lock);
tls_close_direct(tlssock);
return;
}
......@@ -728,12 +724,15 @@ isc__nm_async_tlsconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
(isc__netievent_tlsconnect_t *)ev0;
isc_nmsocket_t *tlssock = ievent->sock;
isc_result_t result;
int r;
UNUSED(worker);
tlssock->tid = isc_nm_tid();
uv_timer_init(&tlssock->mgr->workers[isc_nm_tid()].loop,
&tlssock->timer);
r = uv_timer_init(&tlssock->mgr->workers[isc_nm_tid()].loop,
&tlssock->timer);
RUNTIME_CHECK(r == 0);
tlssock->timer.data = tlssock;
tlssock->timer_initialized = true;
tlssock->tls.state = TLS_INIT;
......@@ -745,15 +744,16 @@ isc__nm_async_tlsconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
if (result != ISC_R_SUCCESS) {
/* FIXME: We need to pass valid handle */
tlssock->connect_cb(NULL, result, tlssock->connect_cbarg);
atomic_store(&tlssock->result, result);
atomic_store(&tlssock->connect_error, true);
LOCK(&tlssock->parent->lock);
tlssock->parent->result = result;
UNLOCK(&tlssock->parent->lock);
tls_close_direct(tlssock);
return;
}
}
void
isc__nm_async_tls_do_bio(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__nm_async_tlsdobio(isc__networker_t *worker, isc__netievent_t *ev0) {
UNUSED(worker);
isc__netievent_tlsdobio_t *ievent = (isc__netievent_tlsdobio_t *)ev0;
tls_do_bio(ievent->sock);
......
This diff is collapsed.
This diff is collapsed.
......@@ -14,179 +14,7 @@
#include <isc/util.h>
#ifndef HAVE_UV_IMPORT
/*
* XXXWPK: This code goes into libuv internals and it's platform dependent.
* It's ugly, we shouldn't do it, but the alternative with passing sockets
* over IPC sockets is even worse, and causes all kind of different
* problems. We should try to push these things upstream.
*/
#ifdef WIN32
/* This code is adapted from libuv/src/win/internal.h */
typedef enum {
UV__IPC_SOCKET_XFER_NONE = 0,
UV__IPC_SOCKET_XFER_TCP_CONNECTION,
UV__IPC_SOCKET_XFER_TCP_SERVER
} uv__ipc_socket_xfer_type_t;
typedef struct {
WSAPROTOCOL_INFOW socket_info;
uint32_t delayed_error;
} uv__ipc_socket_xfer_info_t;
/*
* Needed to make sure that the internal structure that we pulled out of
* libuv hasn't changed.
*/
int
uv__tcp_xfer_import(uv_tcp_t *tcp, uv__ipc_socket_xfer_type_t xfer_type,
uv__ipc_socket_xfer_info_t *xfer_info);
int
uv__tcp_xfer_export(uv_tcp_t *handle, int target_pid,
uv__ipc_socket_xfer_type_t *xfer_type,
uv__ipc_socket_xfer_info_t *xfer_info);
int
isc_uv_export(uv_stream_t *stream, isc_uv_stream_info_t *info) {
uv__ipc_socket_xfer_info_t xfer_info;
uv__ipc_socket_xfer_type_t xfer_type = UV__IPC_SOCKET_XFER_NONE;
/*
* Needed to make sure that the internal structure that we pulled
* out of libuv hasn't changed.
*/
RUNTIME_CHECK(sizeof(uv__ipc_socket_xfer_info_t) == 632);
if (stream->type != UV_TCP) {
return (-1);
}
int r = uv__tcp_xfer_export((uv_tcp_t *)stream, GetCurrentProcessId(),
&xfer_type, &xfer_info);
if (r != 0) {
return (r);
}
if (xfer_info.delayed_error != 0) {
return (xfer_info.delayed_error);
}
INSIST(xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION);
info->type = UV_TCP;
info->socket_info = xfer_info.socket_info;
return (0);
}
int
isc_uv_import(uv_stream_t *stream, isc_uv_stream_info_t *info) {
if (stream->type != UV_TCP || info->type != UV_TCP) {
return (-1);
}
return (uv__tcp_xfer_import(
(uv_tcp_t *)stream, UV__IPC_SOCKET_XFER_TCP_CONNECTION,
&(uv__ipc_socket_xfer_info_t){
.socket_info = info->socket_info }));
}
#else /* WIN32 */
/* Adapted from libuv/src/unix/internal.h */
#include <fcntl.h>
#include <sys/ioctl.h>
static int
isc_uv__cloexec(int fd, int set) {
int r;
/*
* This #ifdef is taken directly from the libuv sources.
* We use FIOCLEX and FIONCLEX ioctl() calls when possible,
* but on some platforms are not implemented, or defined but
* not implemented correctly. On those, we use the FD_CLOEXEC
* fcntl() call, which adds extra system call overhead, but
* works.
*/
#if defined(_AIX) || defined(__APPLE__) || defined(__DragonFly__) || \
defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
defined(__linux__) || defined(__OpenBSD__) || defined(__NetBSD__)
do {
r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
} while (r == -1 && errno == EINTR);
#else /* FIOCLEX/FIONCLEX unsupported */