client.c 111 KB
Newer Older
Bob Halley's avatar
add  
Bob Halley committed
1
/*
2
 * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
3
 *
4 5 6
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 8 9
 *
 * See the COPYRIGHT file distributed with this work for additional
 * information regarding copyright ownership.
Bob Halley's avatar
add  
Bob Halley committed
10 11 12 13
 */

#include <config.h>

14
#include <inttypes.h>
15
#include <stdbool.h>
16

17
#include <isc/aes.h>
18
#include <isc/formatcheck.h>
19
#include <isc/hmacsha.h>
Brian Wellington's avatar
Brian Wellington committed
20
#include <isc/mutex.h>
21
#include <isc/once.h>
22
#include <isc/platform.h>
Andreas Gustafsson's avatar
Andreas Gustafsson committed
23
#include <isc/print.h>
24
#include <isc/queue.h>
25
#include <isc/random.h>
26
#include <isc/safe.h>
27
#include <isc/serial.h>
28
#include <isc/stats.h>
29
#include <isc/stdio.h>
30
#include <isc/string.h>
Andreas Gustafsson's avatar
Andreas Gustafsson committed
31
#include <isc/task.h>
Bob Halley's avatar
add  
Bob Halley committed
32
#include <isc/timer.h>
Michael Graff's avatar
Michael Graff committed
33
#include <isc/util.h>
Bob Halley's avatar
add  
Bob Halley committed
34

35
#include <dns/adb.h>
Evan Hunt's avatar
Evan Hunt committed
36
#include <dns/badcache.h>
37
#include <dns/db.h>
Bob Halley's avatar
add  
Bob Halley committed
38
#include <dns/dispatch.h>
Evan Hunt's avatar
Evan Hunt committed
39
#include <dns/dnstap.h>
40
#include <dns/cache.h>
41
#include <dns/edns.h>
Bob Halley's avatar
add  
Bob Halley committed
42 43
#include <dns/events.h>
#include <dns/message.h>
44
#include <dns/peer.h>
45
#include <dns/rcode.h>
Bob Halley's avatar
EDNS0  
Bob Halley committed
46
#include <dns/rdata.h>
47
#include <dns/rdataclass.h>
Bob Halley's avatar
EDNS0  
Bob Halley committed
48 49
#include <dns/rdatalist.h>
#include <dns/rdataset.h>
50
#include <dns/resolver.h>
51
#include <dns/stats.h>
52
#include <dns/tsig.h>
53
#include <dns/view.h>
54
#include <dns/zone.h>
Bob Halley's avatar
add  
Bob Halley committed
55

56
#include <named/fuzz.h>
57
#include <named/interfacemgr.h>
58
#include <named/log.h>
59
#include <named/notify.h>
60
#include <named/os.h>
61
#include <named/server.h>
62
#include <named/update.h>
63 64 65 66 67

/***
 *** Client
 ***/

68 69 70
/*! \file
 * Client Routines
 *
71 72 73 74 75 76 77 78 79
 * Important note!
 *
 * All client state changes, other than that from idle to listening, occur
 * as a result of events.  This guarantees serialization and avoids the
 * need for locking.
 *
 * If a routine is ever created that allows someone other than the client's
 * task to change the client, then the client will have to be locked.
 */
Bob Halley's avatar
add  
Bob Halley committed
80 81 82

#define NS_CLIENT_TRACE
#ifdef NS_CLIENT_TRACE
83
#define CTRACE(m)	ns_client_log(client, \
84 85
				      NS_LOGCATEGORY_CLIENT, \
				      NS_LOGMODULE_CLIENT, \
Bob Halley's avatar
Bob Halley committed
86
				      ISC_LOG_DEBUG(3), \
87
				      "%s", (m))
88 89 90
#define MTRACE(m)	isc_log_write(ns_g_lctx, \
				      NS_LOGCATEGORY_GENERAL, \
				      NS_LOGMODULE_CLIENT, \
Bob Halley's avatar
Bob Halley committed
91
				      ISC_LOG_DEBUG(3), \
92
				      "clientmgr @%p: %s", manager, (m))
93 94 95
#else
#define CTRACE(m)	((void)(m))
#define MTRACE(m)	((void)(m))
Bob Halley's avatar
add  
Bob Halley committed
96 97
#endif

Bob Halley's avatar
Bob Halley committed
98 99
#define TCP_CLIENT(c)	(((c)->attributes & NS_CLIENTATTR_TCP) != 0)

100
#define TCP_BUFFER_SIZE			(65535 + 2)
101 102
#define SEND_BUFFER_SIZE		4096
#define RECV_BUFFER_SIZE		4096
Bob Halley's avatar
add  
Bob Halley committed
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
#ifdef ISC_PLATFORM_USETHREADS
#define NMCTXS				100
/*%<
 * Number of 'mctx pools' for clients. (Should this be configurable?)
 * When enabling threads, we use a pool of memory contexts shared by
 * client objects, since concurrent access to a shared context would cause
 * heavy contentions.  The above constant is expected to be enough for
 * completely avoiding contentions among threads for an authoritative-only
 * server.
 */
#else
#define NMCTXS				0
/*%<
 * If named with built without thread, simply share manager's context.  Using
 * a separate context in this case would simply waste memory.
 */
#endif

122
#define COOKIE_SIZE 24U /* 8 + 4 + 4 + 8 */
Evan Hunt's avatar
Evan Hunt committed
123
#define ECS_SIZE 20U /* 2 + 1 + 1 + [0..16] */
124

125 126 127
#define WANTNSID(x) (((x)->attributes & NS_CLIENTATTR_WANTNSID) != 0)
#define WANTEXPIRE(x) (((x)->attributes & NS_CLIENTATTR_WANTEXPIRE) != 0)

128
/*% nameserver client manager structure */
Bob Halley's avatar
add  
Bob Halley committed
129 130 131
struct ns_clientmgr {
	/* Unlocked. */
	unsigned int			magic;
132 133 134 135

	/* The queue object has its own locks */
	client_queue_t			inactive;     /*%< To be recycled */

Bob Halley's avatar
add  
Bob Halley committed
136 137 138
	isc_mem_t *			mctx;
	isc_taskmgr_t *			taskmgr;
	isc_timermgr_t *		timermgr;
139 140

	/* Lock covers manager state. */
Bob Halley's avatar
add  
Bob Halley committed
141
	isc_mutex_t			lock;
142
	bool			exiting;
143 144 145 146 147 148 149 150 151

	/* Lock covers the clients list */
	isc_mutex_t			listlock;
	client_list_t			clients;      /*%< All active clients */

	/* Lock covers the recursing list */
	isc_mutex_t			reclock;
	client_list_t			recursing;    /*%< Recursing clients */

152 153 154 155 156
#if NMCTXS > 0
	/*%< mctx pool for clients. */
	unsigned int			nextmctx;
	isc_mem_t *			mctxpool[NMCTXS];
#endif
Bob Halley's avatar
add  
Bob Halley committed
157 158
};

159 160
#define MANAGER_MAGIC			ISC_MAGIC('N', 'S', 'C', 'm')
#define VALID_MANAGER(m)		ISC_MAGIC_VALID(m, MANAGER_MAGIC)
Bob Halley's avatar
add  
Bob Halley committed
161

Automatic Updater's avatar
Automatic Updater committed
162
/*!
163 164 165 166 167 168 169 170 171 172
 * Client object states.  Ordering is significant: higher-numbered
 * states are generally "more active", meaning that the client can
 * have more dynamically allocated data, outstanding events, etc.
 * In the list below, any such properties listed for state N
 * also apply to any state > N.
 *
 * To force the client into a less active state, set client->newstate
 * to that state and call exit_check().  This will cause any
 * activities defined for higher-numbered states to be aborted.
 */
Bob Halley's avatar
add  
Bob Halley committed
173

174
#define NS_CLIENTSTATE_FREED    0
175
/*%<
176 177
 * The client object no longer exists.
 */
Bob Halley's avatar
add  
Bob Halley committed
178

179
#define NS_CLIENTSTATE_INACTIVE 1
180
/*%<
181
 * The client object exists and has a task and timer.
182 183 184 185
 * Its "query" struct and sendbuf are initialized.
 * It is on the client manager's list of inactive clients.
 * It has a message and OPT, both in the reset state.
 */
Bob Halley's avatar
add  
Bob Halley committed
186

187
#define NS_CLIENTSTATE_READY    2
188
/*%<
189 190 191
 * The client object is either a TCP or a UDP one, and
 * it is associated with a network interface.  It is on the
 * client manager's list of active clients.
Bob Halley's avatar
add  
Bob Halley committed
192
 *
193
 * If it is a TCP client object, it has a TCP listener socket
194
 * and an outstanding TCP listen request.
Bob Halley's avatar
add  
Bob Halley committed
195
 *
196 197
 * If it is a UDP client object, it has a UDP listener socket
 * and an outstanding UDP receive request.
Bob Halley's avatar
add  
Bob Halley committed
198 199
 */

200
#define NS_CLIENTSTATE_READING  3
201
/*%<
202
 * The client object is a TCP client object that has received
203
 * a connection.  It has a tcpsocket, tcpmsg, TCP quota, and an
204 205 206 207 208
 * outstanding TCP read request.  This state is not used for
 * UDP client objects.
 */

#define NS_CLIENTSTATE_WORKING  4
209
/*%<
210
 * The client object has received a request and is working
Andreas Gustafsson's avatar
spacing  
Andreas Gustafsson committed
211
 * on it.  It has a view, and it may have any of a non-reset OPT,
212
 * recursion quota, and an outstanding write request.
213
 */
214

215 216 217 218 219 220
#define NS_CLIENTSTATE_RECURSING  5
/*%<
 * The client object is recursing.  It will be on the 'recursing'
 * list.
 */

221
#define NS_CLIENTSTATE_MAX      9
222
/*%<
223 224 225 226 227
 * Sentinel value used to indicate "no state".  When client->newstate
 * has this value, we are not attempting to exit the current state.
 * Must be greater than any valid state.
 */

228 229 230 231 232 233 234
/*
 * Enable ns_client_dropport() by default.
 */
#ifndef NS_CLIENT_DROPPORT
#define NS_CLIENT_DROPPORT 1
#endif

235
unsigned int ns_client_requests;
236 237 238

static void client_read(ns_client_t *client);
static void client_accept(ns_client_t *client);
239
static void client_udprecv(ns_client_t *client);
240
static void clientmgr_destroy(ns_clientmgr_t *manager);
241
static bool exit_check(ns_client_t *client);
242
static void ns_client_endrequest(ns_client_t *client);
243 244
static void client_start(isc_task_t *task, isc_event_t *event);
static void client_request(isc_task_t *task, isc_event_t *event);
245
static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
246
static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
247
			       dns_dispatch_t *disp, bool tcp);
248
static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
249
			       isc_socket_t *sock, ns_client_t *oldclient);
250
static inline bool
251 252
allowed(isc_netaddr_t *addr, dns_name_t *signer,
	isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
253
	uint8_t *ecs_scope, dns_acl_t *acl);
254 255
static void compute_cookie(ns_client_t *client, uint32_t when,
			   uint32_t nonce, const unsigned char *secret,
256
			   isc_buffer_t *buf);
257

258
void
259
ns_client_recursing(ns_client_t *client) {
260
	REQUIRE(NS_CLIENT_VALID(client));
261
	REQUIRE(client->state == NS_CLIENTSTATE_WORKING);
262

263
	LOCK(&client->manager->reclock);
264
	client->newstate = client->state = NS_CLIENTSTATE_RECURSING;
265
	ISC_LIST_APPEND(client->manager->recursing, client, rlink);
266
	UNLOCK(&client->manager->reclock);
267 268
}

269 270 271 272 273
void
ns_client_killoldestquery(ns_client_t *client) {
	ns_client_t *oldest;
	REQUIRE(NS_CLIENT_VALID(client));

274
	LOCK(&client->manager->reclock);
275 276
	oldest = ISC_LIST_HEAD(client->manager->recursing);
	if (oldest != NULL) {
277 278
		ISC_LIST_UNLINK(client->manager->recursing, oldest, rlink);
		UNLOCK(&client->manager->reclock);
279
		ns_query_cancel(oldest);
280 281
	} else
		UNLOCK(&client->manager->reclock);
282 283
}

284 285
void
ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
286 287
	isc_result_t result;
	isc_interval_t interval;
288

289 290
	isc_interval_set(&interval, seconds, 0);
	result = isc_timer_reset(client->timer, isc_timertype_once, NULL,
291 292
				 &interval, false);
	client->timerset = true;
293
	if (result != ISC_R_SUCCESS) {
294
		ns_client_log(client, NS_LOGCATEGORY_CLIENT,
295
			      NS_LOGMODULE_CLIENT, ISC_LOG_ERROR,
296
			      "setting timeout: %s",
297 298 299 300 301
			      isc_result_totext(result));
		/* Continue anyway. */
	}
}

302
/*%
303 304 305 306 307
 * Allocate a reference-counted object that will maintain a single pointer to
 * the (also reference-counted) TCP client quota, shared between all the
 * clients processing queries on a single TCP connection, so that all
 * clients sharing the one socket will together consume only one slot in
 * the 'tcp-clients' quota.
308
 */
309 310 311 312 313
static isc_result_t
tcpconn_init(ns_client_t *client, bool force) {
	isc_result_t result;
	isc_quota_t *quota = NULL;
	ns_tcpconn_t *tconn = NULL;
314

315 316 317 318 319 320 321 322 323 324 325 326 327 328
	REQUIRE(client->tcpconn == NULL);

	/*
	 * Try to attach to the quota first, so we won't pointlessly
	 * allocate memory for a tcpconn object if we can't get one.
	 */
	if (force) {
		result = isc_quota_force(&ns_g_server->tcpquota, &quota);
	} else {
		result = isc_quota_attach(&ns_g_server->tcpquota, &quota);
	}
	if (result != ISC_R_SUCCESS) {
		return (result);
	}
329 330 331 332 333 334 335 336 337

	/*
	 * A global memory context is used for the allocation as different
	 * client structures may have different memory contexts assigned and a
	 * reference counter allocated here might need to be freed by a
	 * different client.  The performance impact caused by memory context
	 * contention here is expected to be negligible, given that this code
	 * is only executed for TCP connections.
	 */
338 339 340 341 342 343 344 345 346 347
	tconn = isc_mem_allocate(ns_g_mctx, sizeof(*tconn));

	isc_refcount_init(&tconn->refs, 1);
	tconn->tcpquota = quota;
	quota = NULL;
	tconn->pipelined = false;

	client->tcpconn = tconn;

	return (ISC_R_SUCCESS);
348 349 350
}

/*%
351 352 353
 * Increase the count of client structures sharing the TCP connection
 * that 'source' is associated with; add a pointer to the same tcpconn
 * to 'target', thus associating it with the same TCP connection.
354 355
 */
static void
356
tcpconn_attach(ns_client_t *source, ns_client_t *target) {
357
	int refs;
358

359 360 361
	REQUIRE(source->tcpconn != NULL);
	REQUIRE(target->tcpconn == NULL);
	REQUIRE(source->tcpconn->pipelined);
362

363
	isc_refcount_increment(&source->tcpconn->refs, &refs);
364
	INSIST(refs > 1);
365
	target->tcpconn = source->tcpconn;
366 367 368
}

/*%
369
 * Decrease the count of client structures sharing the TCP connection that
370
 * 'client' is associated with.  If this is the last client using this TCP
371 372
 * connection, we detach from the TCP quota and free the tcpconn
 * object. Either way, client->tcpconn is set to NULL.
373
 */
374 375 376
static void
tcpconn_detach(ns_client_t *client) {
	ns_tcpconn_t *tconn = NULL;
377
	int refs;
378

379
	REQUIRE(client->tcpconn != NULL);
380

381 382
	tconn = client->tcpconn;
	client->tcpconn = NULL;
383

384
	isc_refcount_decrement(&tconn->refs, &refs);
385
	if (refs == 0) {
386 387
		isc_quota_detach(&tconn->tcpquota);
		isc_mem_free(ns_g_mctx, tconn);
388 389 390
	}
}

391 392 393 394 395 396
/*%
 * Mark a client as active and increment the interface's 'ntcpactive'
 * counter, as a signal that there is at least one client servicing
 * TCP queries for the interface. If we reach the TCP client quota at
 * some point, this will be used to determine whether a quota overrun
 * should be permitted.
397
 *
398 399 400
 * Marking the client active with the 'tcpactive' flag ensures proper
 * accounting, by preventing us from incrementing or decrementing
 * 'ntcpactive' more than once per client.
401 402
 */
static void
403 404 405 406 407 408 409 410 411
mark_tcp_active(ns_client_t *client, bool active) {
	if (active && !client->tcpactive) {
		isc_atomic_xadd(&client->interface->ntcpactive, 1);
		client->tcpactive = active;
	} else if (!active && client->tcpactive) {
		uint32_t old =
			isc_atomic_xadd(&client->interface->ntcpactive, -1);
		INSIST(old > 0);
		client->tcpactive = active;
412 413 414
	}
}

415
/*%
416
 * Check for a deactivation or shutdown request and take appropriate
417
 * action.  Returns true if either is in progress; in this case
418 419 420
 * the caller must no longer use the client object as it may have been
 * freed.
 */
421
static bool
422
exit_check(ns_client_t *client) {
423
	bool destroy_manager = false;
424
	ns_clientmgr_t *manager = NULL;
425

426
	REQUIRE(NS_CLIENT_VALID(client));
427
	manager = client->manager;
428 429

	if (client->state <= client->newstate)
430
		return (false); /* Business as usual. */
431

432
	INSIST(client->newstate < NS_CLIENTSTATE_RECURSING);
433

434 435 436 437 438 439
	/*
	 * We need to detach from the view early when shutting down
	 * the server to break the following vicious circle:
	 *
	 *  - The resolver will not shut down until the view refcount is zero
	 *  - The view refcount does not go to zero until all clients detach
440 441
	 *  - The client does not detach from the view until references is zero
	 *  - references does not go to zero until the resolver has shut down
442
	 *
443
	 * Keep the view attached until any outstanding updates complete.
444
	 */
Automatic Updater's avatar
Automatic Updater committed
445
	if (client->nupdates == 0 &&
446
	    client->newstate == NS_CLIENTSTATE_FREED && client->view != NULL)
447
		dns_view_detach(&client->view);
448

449 450 451
	if (client->state == NS_CLIENTSTATE_WORKING ||
	    client->state == NS_CLIENTSTATE_RECURSING)
	{
452
		INSIST(client->newstate <= NS_CLIENTSTATE_READING);
453 454 455 456
		/*
		 * Let the update processing complete.
		 */
		if (client->nupdates > 0)
457
			return (true);
458

459 460 461 462
		/*
		 * We are trying to abort request processing.
		 */
		if (client->nsends > 0) {
463
			isc_socket_t *sock;
464
			if (TCP_CLIENT(client))
465
				sock = client->tcpsocket;
466
			else
467 468
				sock = client->udpsocket;
			isc_socket_cancel(sock, client->task,
469 470
					  ISC_SOCKCANCEL_SEND);
		}
471

472 473 474
		if (! (client->nsends == 0 && client->nrecvs == 0 &&
		       client->references == 0))
		{
475 476 477 478
			/*
			 * Still waiting for I/O cancel completion.
			 * or lingering references.
			 */
479
			return (true);
480
		}
481

482 483
		/*
		 * I/O cancel is complete.  Burn down all state
484
		 * related to the current request.  Ensure that
485
		 * the client is no longer on the recursing list.
486 487 488 489
		 *
		 * We need to check whether the client is still linked,
		 * because it may already have been removed from the
		 * recursing list by ns_client_killoldestquery()
490
		 */
491 492
		if (client->state == NS_CLIENTSTATE_RECURSING) {
			LOCK(&manager->reclock);
493 494 495
			if (ISC_LINK_LINKED(client, rlink))
				ISC_LIST_UNLINK(manager->recursing,
						client, rlink);
496
			UNLOCK(&manager->reclock);
497
		}
498 499 500
		ns_client_endrequest(client);

		client->state = NS_CLIENTSTATE_READING;
501
		INSIST(client->recursionquota == NULL);
502

503
		if (NS_CLIENTSTATE_READING == client->newstate) {
504 505
			INSIST(client->tcpconn != NULL);
			if (!client->tcpconn->pipelined) {
506 507
				client_read(client);
				client->newstate = NS_CLIENTSTATE_MAX;
508
				return (true); /* We're done. */
509 510
			} else if (client->mortal) {
				client->newstate = NS_CLIENTSTATE_INACTIVE;
511
			} else
512
				return (false);
513 514 515 516 517 518 519 520
		}
	}

	if (client->state == NS_CLIENTSTATE_READING) {
		/*
		 * We are trying to abort the current TCP connection,
		 * if any.
		 */
521
		INSIST(client->recursionquota == NULL);
522
		INSIST(client->newstate <= NS_CLIENTSTATE_READY);
523 524

		if (client->nreads > 0) {
525
			dns_tcpmsg_cancelread(&client->tcpmsg);
526 527
		}

528 529
		/* Still waiting for read cancel completion. */
		if (client->nreads > 0) {
530
			return (true);
531 532 533 534
		}

		if (client->tcpmsg_valid) {
			dns_tcpmsg_invalidate(&client->tcpmsg);
535
			client->tcpmsg_valid = false;
536
		}
537

538
		/*
539 540 541 542 543
		 * Soon the client will be ready to accept a new TCP
		 * connection or UDP request, but we may have enough
		 * clients doing that already.  Check whether this client
		 * needs to remain active and allow it go inactive if
		 * not.
544
		 *
545 546 547 548
		 * UDP clients always go inactive at this point, but a TCP
		 * client may need to stay active and return to READY
		 * state if no other clients are available to listen
		 * for TCP requests on this interface.
549
		 *
550 551 552
		 * Regardless, if we're going to FREED state, that means
		 * the system is shutting down and we don't need to
		 * retain clients.
553
		 */
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
		if (client->mortal && TCP_CLIENT(client) &&
		    client->newstate != NS_CLIENTSTATE_FREED &&
		    !ns_g_clienttest &&
		    isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
		{
			/* Nobody else is accepting */
			client->mortal = false;
			client->newstate = NS_CLIENTSTATE_READY;
		}

		/*
		 * Detach from TCP connection and TCP client quota,
		 * if appropriate. If this is the last reference to
		 * the TCP connection in our pipeline group, the
		 * TCP quota slot will be released.
		 */
		if (client->tcpconn) {
			tcpconn_detach(client);
572 573
		}

574 575
		if (client->tcpsocket != NULL) {
			CTRACE("closetcp");
576
			isc_socket_detach(&client->tcpsocket);
577
			mark_tcp_active(client, false);
578
		}
579

580
		if (client->timerset) {
Andreas Gustafsson's avatar
spacing  
Andreas Gustafsson committed
581 582
			(void)isc_timer_reset(client->timer,
					      isc_timertype_inactive,
583 584
					      NULL, NULL, true);
			client->timerset = false;
585
		}
586

587
		client->peeraddr_valid = false;
588

589
		client->state = NS_CLIENTSTATE_READY;
590

591 592 593 594 595
		/*
		 * We don't need the client; send it to the inactive
		 * queue for recycling.
		 */
		if (client->mortal) {
596
			if (client->newstate > NS_CLIENTSTATE_INACTIVE) {
597
				client->newstate = NS_CLIENTSTATE_INACTIVE;
598
			}
599
		}
600

601 602 603
		if (NS_CLIENTSTATE_READY == client->newstate) {
			if (TCP_CLIENT(client)) {
				client_accept(client);
604
			} else {
605
				client_udprecv(client);
606
			}
607
			client->newstate = NS_CLIENTSTATE_MAX;
608
			return (true);
609 610 611 612
		}
	}

	if (client->state == NS_CLIENTSTATE_READY) {
613
		INSIST(client->newstate <= NS_CLIENTSTATE_INACTIVE);
614

615 616 617
		/*
		 * We are trying to enter the inactive state.
		 */
618
		if (client->naccepts > 0) {
619 620
			isc_socket_cancel(client->tcplistener, client->task,
					  ISC_SOCKCANCEL_ACCEPT);
621
		}
622

623
		/* Still waiting for accept cancel completion. */
624
		if (client->naccepts > 0) {
625
			return (true);
626
		}
627

628
		/* Accept cancel is complete. */
629
		if (client->nrecvs > 0) {
630 631
			isc_socket_cancel(client->udpsocket, client->task,
					  ISC_SOCKCANCEL_RECV);
632
		}
633 634

		/* Still waiting for recv cancel completion. */
635
		if (client->nrecvs > 0) {
636
			return (true);
637
		}
638

639
		/* Still waiting for control event to be delivered */
640
		if (client->nctls > 0) {
641
			return (true);
642
		}
643 644 645

		INSIST(client->naccepts == 0);
		INSIST(client->recursionquota == NULL);
646
		if (client->tcplistener != NULL) {
647
			isc_socket_detach(&client->tcplistener);
648
			mark_tcp_active(client, false);
649 650
		}
		if (client->udpsocket != NULL) {
651
			isc_socket_detach(&client->udpsocket);
652
		}
653

654 655 656 657 658 659
		/* Deactivate the client. */
		if (client->interface != NULL) {
			ns_interface_detach(&client->interface);
		}

		if (client->dispatch != NULL) {
660
			dns_dispatch_detach(&client->dispatch);
661
		}
662 663

		client->attributes = 0;
664
		client->mortal = false;
665

666 667 668 669 670 671
		if (client->keytag != NULL) {
			isc_mem_put(client->mctx, client->keytag,
				    client->keytag_len);
			client->keytag_len = 0;
		}

672 673 674 675 676 677 678
		/*
		 * Put the client on the inactive list.  If we are aiming for
		 * the "freed" state, it will be removed from the inactive
		 * list shortly, and we need to keep the manager locked until
		 * that has been done, lest the manager decide to reactivate
		 * the dying client inbetween.
		 */
679
		client->state = NS_CLIENTSTATE_INACTIVE;
680
		INSIST(client->recursionquota == NULL);
681

682 683
		if (client->state == client->newstate) {
			client->newstate = NS_CLIENTSTATE_MAX;
684 685
			if (!ns_g_clienttest && manager != NULL &&
			    !manager->exiting)
686
			{
687 688
				ISC_QUEUE_PUSH(manager->inactive, client,
					       ilink);
689 690
			}
			if (client->needshutdown) {
691
				isc_task_shutdown(client->task);
692
			}
693
			return (true);
694 695 696 697 698 699 700
		}
	}

	if (client->state == NS_CLIENTSTATE_INACTIVE) {
		INSIST(client->newstate == NS_CLIENTSTATE_FREED);
		/*
		 * We are trying to free the client.
701 702 703 704 705
		 *
		 * When "shuttingdown" is true, either the task has received
		 * its shutdown event or no shutdown event has ever been
		 * set up.  Thus, we have no outstanding shutdown
		 * event at this point.
706
		 */
707 708 709
		REQUIRE(client->state == NS_CLIENTSTATE_INACTIVE);

		INSIST(client->recursionquota == NULL);
710
		INSIST(!ISC_QLINK_LINKED(client, ilink));
711

712 713 714 715 716 717
		if (manager != NULL) {
			LOCK(&manager->listlock);
			ISC_LIST_UNLINK(manager->clients, client, link);
			LOCK(&manager->lock);
			if (manager->exiting &&
			    ISC_LIST_EMPTY(manager->clients))
718
				destroy_manager = true;
719 720 721 722
			UNLOCK(&manager->lock);
			UNLOCK(&manager->listlock);
		}

723 724 725 726 727
		ns_query_free(client);
		isc_mem_put(client->mctx, client->recvbuf, RECV_BUFFER_SIZE);
		isc_event_free((isc_event_t **)&client->sendevent);
		isc_event_free((isc_event_t **)&client->recvevent);
		isc_timer_detach(&client->timer);
728 729
		if (client->delaytimer != NULL)
			isc_timer_detach(&client->delaytimer);
730 731

		if (client->tcpbuf != NULL)
732 733
			isc_mem_put(client->mctx, client->tcpbuf,
				    TCP_BUFFER_SIZE);
734 735 736
		if (client->opt != NULL) {
			INSIST(dns_rdataset_isassociated(client->opt));
			dns_rdataset_disassociate(client->opt);
737 738
			dns_message_puttemprdataset(client->message,
						    &client->opt);
739
		}
740 741 742 743 744
		if (client->keytag != NULL) {
			isc_mem_put(client->mctx, client->keytag,
				    client->keytag_len);
			client->keytag_len = 0;
		}
745

746
		dns_message_destroy(&client->message);
747

748 749 750 751 752 753 754 755 756 757
		/*
		 * Detaching the task must be done after unlinking from
		 * the manager's lists because the manager accesses
		 * client->task.
		 */
		if (client->task != NULL)
			isc_task_detach(&client->task);

		CTRACE("free");
		client->magic = 0;
758

759 760 761 762 763 764 765
		/*
		 * Check that there are no other external references to
		 * the memory context.
		 */
		if (ns_g_clienttest && isc_mem_references(client->mctx) != 1) {
			isc_mem_stats(client->mctx, stderr);
			INSIST(0);
766
			ISC_UNREACHABLE();
767
		}
768 769 770 771 772 773 774

		/*
		 * Destroy the fetchlock mutex that was created in
		 * ns_query_init().
		 */
		DESTROYLOCK(&client->query.fetchlock);

775
		isc_mem_putanddetach(&client->mctx, client, sizeof(*client));
776 777
	}

778 779
	if (destroy_manager && manager != NULL)
		clientmgr_destroy(manager);
780

781
	return (true);
782 783
}

784
/*%
785 786 787 788 789 790 791 792
 * The client's task has received the client's control event
 * as part of the startup process.
 */
static void
client_start(isc_task_t *task, isc_event_t *event) {
	ns_client_t *client = (ns_client_t *) event->ev_arg;

	INSIST(task == client->task);
793

794 795
	UNUSED(task);

796 797 798 799 800 801
	INSIST(client->nctls == 1);
	client->nctls--;

	if (exit_check(client))
		return;

802
	if (TCP_CLIENT(client)) {
803
		if (client->tcpconn != NULL) {
804 805 806 807
			client_read(client);
		} else {
			client_accept(client);
		}
808
	} else {
809
		client_udprecv(client);
810 811 812
	}
}

813
/*%
814 815
 * The client's task has received a shutdown event.
 */
Bob Halley's avatar
add  
Bob Halley committed
816 817 818 819