1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include "resolv_cache.h"
30 #include <resolv.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <time.h>
34 #include "pthread.h"
36 #include <errno.h>
37 #include <arpa/nameser.h>
38 #include <sys/system_properties.h>
39 #include <net/if.h>
40 #include <netdb.h>
41 #include <linux/if.h>
43 #include <arpa/inet.h>
44 #include "resolv_private.h"
45 #include "resolv_iface.h"
46 #include "res_private.h"
48 /* This code implements a small and *simple* DNS resolver cache.
49 *
50 * It is only used to cache DNS answers for a time defined by the smallest TTL
51 * among the answer records in order to reduce DNS traffic. It is not supposed
52 * to be a full DNS cache, since we plan to implement that in the future in a
53 * dedicated process running on the system.
54 *
55 * Note that its design is kept simple very intentionally, i.e.:
56 *
57 * - it takes raw DNS query packet data as input, and returns raw DNS
58 * answer packet data as output
59 *
60 * (this means that two similar queries that encode the DNS name
61 * differently will be treated distinctly).
62 *
63 * the smallest TTL value among the answer records are used as the time
64 * to keep an answer in the cache.
65 *
66 * this is bad, but we absolutely want to avoid parsing the answer packets
67 * (and should be solved by the later full DNS cache process).
68 *
69 * - the implementation is just a (query-data) => (answer-data) hash table
70 * with a trivial least-recently-used expiration policy.
71 *
72 * Doing this keeps the code simple and avoids to deal with a lot of things
73 * that a full DNS cache is expected to do.
74 *
75 * The API is also very simple:
76 *
77 * - the client calls _resolv_cache_get() to obtain a handle to the cache.
78 * this will initialize the cache on first usage. the result can be NULL
79 * if the cache is disabled.
80 *
81 * - the client calls _resolv_cache_lookup() before performing a query
82 *
83 * if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
84 * has been copied into the client-provided answer buffer.
85 *
86 * if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
87 * a request normally, *then* call _resolv_cache_add() to add the received
88 * answer to the cache.
89 *
90 * if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
91 * perform a request normally, and *not* call _resolv_cache_add()
92 *
93 * note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
94 * is too short to accomodate the cached result.
95 *
96 * - when network settings change, the cache must be flushed since the list
97 * of DNS servers probably changed. this is done by calling
98 * _resolv_cache_reset()
99 *
100 * the parameter to this function must be an ever-increasing generation
101 * number corresponding to the current network settings state.
102 *
103 * This is done because several threads could detect the same network
104 * settings change (but at different times) and will all end up calling the
105 * same function. Comparing with the last used generation number ensures
106 * that the cache is only flushed once per network change.
107 */
109 /* the name of an environment variable that will be checked the first time
110 * this code is called if its value is "0", then the resolver cache is
111 * disabled.
112 */
113 #define CONFIG_ENV "BIONIC_DNSCACHE"
115 /* entries older than CONFIG_SECONDS seconds are always discarded.
116 */
117 #define CONFIG_SECONDS (60*10) /* 10 minutes */
119 /* default number of entries kept in the cache. This value has been
120 * determined by browsing through various sites and counting the number
121 * of corresponding requests. Keep in mind that our framework is currently
122 * performing two requests per name lookup (one for IPv4, the other for IPv6)
123 *
124 * www.google.com 4
125 * www.ysearch.com 6
126 * www.amazon.com 8
127 * www.nytimes.com 22
128 * www.espn.com 28
129 * www.msn.com 28
130 * www.lemonde.fr 35
131 *
132 * (determined in 2009-2-17 from Paris, France, results may vary depending
133 * on location)
134 *
135 * most high-level websites use lots of media/ad servers with different names
136 * but these are generally reused when browsing through the site.
137 *
138 * As such, a value of 64 should be relatively comfortable at the moment.
139 *
140 * ******************************************
141 * * NOTE - this has changed.
142 * * 1) we've added IPv6 support so each dns query results in 2 responses
143 * * 2) we've made this a system-wide cache, so the cost is less (it's not
144 * * duplicated in each process) and the need is greater (more processes
145 * * making different requests).
146 * * Upping by 2x for IPv6
147 * * Upping by another 5x for the centralized nature
148 * *****************************************
149 */
150 #define CONFIG_MAX_ENTRIES 64 * 2 * 5
151 /* name of the system property that can be used to set the cache size */
153 /****************************************************************************/
154 /****************************************************************************/
155 /***** *****/
156 /***** *****/
157 /***** *****/
158 /****************************************************************************/
159 /****************************************************************************/
161 /* set to 1 to debug cache operations */
162 #define DEBUG 0
164 /* set to 1 to debug query data */
165 #define DEBUG_DATA 0
167 #undef XLOG
168 #if DEBUG
169 # include "private/libc_logging.h"
170 # define XLOG(...) __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__)
172 #include <stdio.h>
173 #include <stdarg.h>
175 /** BOUNDED BUFFER FORMATTING
176 **/
178 /* technical note:
179 *
180 * the following debugging routines are used to append data to a bounded
181 * buffer they take two parameters that are:
182 *
183 * - p : a pointer to the current cursor position in the buffer
184 * this value is initially set to the buffer's address.
185 *
186 * - end : the address of the buffer's limit, i.e. of the first byte
187 * after the buffer. this address should never be touched.
188 *
189 * IMPORTANT: it is assumed that end > buffer_address, i.e.
190 * that the buffer is at least one byte.
191 *
192 * the _bprint_() functions return the new value of 'p' after the data
193 * has been appended, and also ensure the following:
194 *
195 * - the returned value will never be strictly greater than 'end'
196 *
197 * - a return value equal to 'end' means that truncation occured
198 * (in which case, end[-1] will be set to 0)
199 *
200 * - after returning from a _bprint_() function, the content of the buffer
201 * is always 0-terminated, even in the event of truncation.
202 *
203 * these conventions allow you to call _bprint_ functions multiple times and
204 * only check for truncation at the end of the sequence, as in:
205 *
206 * char buff[1000], *p = buff, *end = p + sizeof(buff);
207 *
208 * p = _bprint_c(p, end, '"');
209 * p = _bprint_s(p, end, my_string);
210 * p = _bprint_c(p, end, '"');
211 *
212 * if (p >= end) {
213 * // buffer was too small
214 * }
215 *
216 * printf( "%s", buff );
217 */
219 /* add a char to a bounded buffer */
220 static char*
221 _bprint_c( char* p, char* end, int c )
222 {
223 if (p < end) {
224 if (p+1 == end)
225 *p++ = 0;
226 else {
227 *p++ = (char) c;
228 *p = 0;
229 }
230 }
231 return p;
232 }
234 /* add a sequence of bytes to a bounded buffer */
235 static char*
236 _bprint_b( char* p, char* end, const char* buf, int len )
237 {
238 int avail = end - p;
240 if (avail <= 0 || len <= 0)
241 return p;
243 if (avail > len)
244 avail = len;
246 memcpy( p, buf, avail );
247 p += avail;
249 if (p < end)
250 p[0] = 0;
251 else
252 end[-1] = 0;
254 return p;
255 }
257 /* add a string to a bounded buffer */
258 static char*
259 _bprint_s( char* p, char* end, const char* str )
260 {
261 return _bprint_b(p, end, str, strlen(str));
262 }
264 /* add a formatted string to a bounded buffer */
265 static char*
266 _bprint( char* p, char* end, const char* format, ... )
267 {
268 int avail, n;
269 va_list args;
271 avail = end - p;
273 if (avail <= 0)
274 return p;
276 va_start(args, format);
277 n = vsnprintf( p, avail, format, args);
278 va_end(args);
280 /* certain C libraries return -1 in case of truncation */
281 if (n < 0 || n > avail)
282 n = avail;
284 p += n;
285 /* certain C libraries do not zero-terminate in case of truncation */
286 if (p == end)
287 p[-1] = 0;
289 return p;
290 }
292 /* add a hex value to a bounded buffer, up to 8 digits */
293 static char*
294 _bprint_hex( char* p, char* end, unsigned value, int numDigits )
295 {
296 char text[sizeof(unsigned)*2];
297 int nn = 0;
299 while (numDigits-- > 0) {
300 text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
301 }
302 return _bprint_b(p, end, text, nn);
303 }
305 /* add the hexadecimal dump of some memory area to a bounded buffer */
306 static char*
307 _bprint_hexdump( char* p, char* end, const uint8_t* data, int datalen )
308 {
309 int lineSize = 16;
311 while (datalen > 0) {
312 int avail = datalen;
313 int nn;
315 if (avail > lineSize)
316 avail = lineSize;
318 for (nn = 0; nn < avail; nn++) {
319 if (nn > 0)
320 p = _bprint_c(p, end, ' ');
321 p = _bprint_hex(p, end, data[nn], 2);
322 }
323 for ( ; nn < lineSize; nn++ ) {
324 p = _bprint_s(p, end, " ");
325 }
326 p = _bprint_s(p, end, " ");
328 for (nn = 0; nn < avail; nn++) {
329 int c = data[nn];
331 if (c < 32 || c > 127)
332 c = '.';
334 p = _bprint_c(p, end, c);
335 }
336 p = _bprint_c(p, end, '\n');
338 data += avail;
339 datalen -= avail;
340 }
341 return p;
342 }
344 /* dump the content of a query of packet to the log */
345 static void
346 XLOG_BYTES( const void* base, int len )
347 {
348 char buff[1024];
349 char* p = buff, *end = p + sizeof(buff);
351 p = _bprint_hexdump(p, end, base, len);
352 XLOG("%s",buff);
353 }
355 #else /* !DEBUG */
356 # define XLOG(...) ((void)0)
357 # define XLOG_BYTES(a,b) ((void)0)
358 #endif
360 static time_t
361 _time_now( void )
362 {
363 struct timeval tv;
365 gettimeofday( &tv, NULL );
366 return tv.tv_sec;
367 }
369 /* reminder: the general format of a DNS packet is the following:
370 *
371 * HEADER (12 bytes)
372 * QUESTION (variable)
373 * ANSWER (variable)
374 * AUTHORITY (variable)
375 * ADDITIONNAL (variable)
376 *
377 * the HEADER is made of:
378 *
379 * ID : 16 : 16-bit unique query identification field
380 *
381 * QR : 1 : set to 0 for queries, and 1 for responses
382 * Opcode : 4 : set to 0 for queries
383 * AA : 1 : set to 0 for queries
384 * TC : 1 : truncation flag, will be set to 0 in queries
385 * RD : 1 : recursion desired
386 *
387 * RA : 1 : recursion available (0 in queries)
388 * Z : 3 : three reserved zero bits
389 * RCODE : 4 : response code (always 0=NOERROR in queries)
390 *
391 * QDCount: 16 : question count
392 * ANCount: 16 : Answer count (0 in queries)
393 * NSCount: 16: Authority Record count (0 in queries)
394 * ARCount: 16: Additionnal Record count (0 in queries)
395 *
396 * the QUESTION is made of QDCount Question Record (QRs)
397 * the ANSWER is made of ANCount RRs
398 * the AUTHORITY is made of NSCount RRs
399 * the ADDITIONNAL is made of ARCount RRs
400 *
401 * Each Question Record (QR) is made of:
402 *
403 * QNAME : variable : Query DNS NAME
404 * TYPE : 16 : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
405 * CLASS : 16 : class of query (IN=1)
406 *
407 * Each Resource Record (RR) is made of:
408 *
409 * NAME : variable : DNS NAME
410 * TYPE : 16 : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
411 * CLASS : 16 : class of query (IN=1)
412 * TTL : 32 : seconds to cache this RR (0=none)
413 * RDLENGTH: 16 : size of RDDATA in bytes
414 * RDDATA : variable : RR data (depends on TYPE)
415 *
416 * Each QNAME contains a domain name encoded as a sequence of 'labels'
417 * terminated by a zero. Each label has the following format:
418 *
419 * LEN : 8 : lenght of label (MUST be < 64)
420 * NAME : 8*LEN : label length (must exclude dots)
421 *
422 * A value of 0 in the encoding is interpreted as the 'root' domain and
423 * terminates the encoding. So 'www.android.com' will be encoded as:
424 *
425 * <3>www<7>android<3>com<0>
426 *
427 * Where <n> represents the byte with value 'n'
428 *
429 * Each NAME reflects the QNAME of the question, but has a slightly more
430 * complex encoding in order to provide message compression. This is achieved
431 * by using a 2-byte pointer, with format:
432 *
433 * TYPE : 2 : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
434 * OFFSET : 14 : offset to another part of the DNS packet
435 *
436 * The offset is relative to the start of the DNS packet and must point
437 * A pointer terminates the encoding.
438 *
439 * The NAME can be encoded in one of the following formats:
440 *
441 * - a sequence of simple labels terminated by 0 (like QNAMEs)
442 * - a single pointer
443 * - a sequence of simple labels terminated by a pointer
444 *
445 * A pointer shall always point to either a pointer of a sequence of
446 * labels (which can themselves be terminated by either a 0 or a pointer)
447 *
448 * The expanded length of a given domain name should not exceed 255 bytes.
449 *
450 * NOTE: we don't parse the answer packets, so don't need to deal with NAME
451 * records, only QNAMEs.
452 */
454 #define DNS_HEADER_SIZE 12
456 #define DNS_TYPE_A "\00\01" /* big-endian decimal 1 */
457 #define DNS_TYPE_PTR "\00\014" /* big-endian decimal 12 */
458 #define DNS_TYPE_MX "\00\017" /* big-endian decimal 15 */
459 #define DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
460 #define DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
462 #define DNS_CLASS_IN "\00\01" /* big-endian decimal 1 */
464 typedef struct {
465 const uint8_t* base;
466 const uint8_t* end;
467 const uint8_t* cursor;
468 } DnsPacket;
470 static void
471 _dnsPacket_init( DnsPacket* packet, const uint8_t* buff, int bufflen )
472 {
473 packet->base = buff;
474 packet->end = buff + bufflen;
475 packet->cursor = buff;
476 }
478 static void
479 _dnsPacket_rewind( DnsPacket* packet )
480 {
481 packet->cursor = packet->base;
482 }
484 static void
485 _dnsPacket_skip( DnsPacket* packet, int count )
486 {
487 const uint8_t* p = packet->cursor + count;
489 if (p > packet->end)
490 p = packet->end;
492 packet->cursor = p;
493 }
495 static int
496 _dnsPacket_readInt16( DnsPacket* packet )
497 {
498 const uint8_t* p = packet->cursor;
500 if (p+2 > packet->end)
501 return -1;
503 packet->cursor = p+2;
504 return (p[0]<< 8) | p[1];
505 }
507 /** QUERY CHECKING
508 **/
510 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
511 * the cursor is only advanced in the case of success
512 */
513 static int
514 _dnsPacket_checkBytes( DnsPacket* packet, int numBytes, const void* bytes )
515 {
516 const uint8_t* p = packet->cursor;
518 if (p + numBytes > packet->end)
519 return 0;
521 if (memcmp(p, bytes, numBytes) != 0)
522 return 0;
524 packet->cursor = p + numBytes;
525 return 1;
526 }
528 /* parse and skip a given QNAME stored in a query packet,
529 * from the current cursor position. returns 1 on success,
530 * or 0 for malformed data.
531 */
532 static int
533 _dnsPacket_checkQName( DnsPacket* packet )
534 {
535 const uint8_t* p = packet->cursor;
536 const uint8_t* end = packet->end;
538 for (;;) {
539 int c;
541 if (p >= end)
542 break;
544 c = *p++;
546 if (c == 0) {
547 packet->cursor = p;
548 return 1;
549 }
551 /* we don't expect label compression in QNAMEs */
552 if (c >= 64)
553 break;
555 p += c;
556 /* we rely on the bound check at the start
557 * of the loop here */
558 }
559 /* malformed data */
560 XLOG("malformed QNAME");
561 return 0;
562 }
564 /* parse and skip a given QR stored in a packet.
565 * returns 1 on success, and 0 on failure
566 */
567 static int
568 _dnsPacket_checkQR( DnsPacket* packet )
569 {
570 if (!_dnsPacket_checkQName(packet))
571 return 0;
573 /* TYPE must be one of the things we support */
574 if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
575 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
576 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
577 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
578 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
579 {
580 XLOG("unsupported TYPE");
581 return 0;
582 }
583 /* CLASS must be IN */
584 if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
585 XLOG("unsupported CLASS");
586 return 0;
587 }
589 return 1;
590 }
592 /* check the header of a DNS Query packet, return 1 if it is one
593 * type of query we can cache, or 0 otherwise
594 */
595 static int
596 _dnsPacket_checkQuery( DnsPacket* packet )
597 {
598 const uint8_t* p = packet->base;
599 int qdCount, anCount, dnCount, arCount;
601 if (p + DNS_HEADER_SIZE > packet->end) {
602 XLOG("query packet too small");
603 return 0;
604 }
606 /* QR must be set to 0, opcode must be 0 and AA must be 0 */
607 /* RA, Z, and RCODE must be 0 */
608 if ((p[2] & 0xFC) != 0 || p[3] != 0) {
609 XLOG("query packet flags unsupported");
610 return 0;
611 }
613 /* Note that we ignore the TC and RD bits here for the
614 * following reasons:
615 *
616 * - there is no point for a query packet sent to a server
617 * to have the TC bit set, but the implementation might
618 * set the bit in the query buffer for its own needs
619 * between a _resolv_cache_lookup and a
620 * _resolv_cache_add. We should not freak out if this
621 * is the case.
622 *
623 * - we consider that the result from a RD=0 or a RD=1
624 * query might be different, hence that the RD bit
625 * should be used to differentiate cached result.
626 *
627 * this implies that RD is checked when hashing or
628 * comparing query packets, but not TC
629 */
631 /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
632 qdCount = (p[4] << 8) | p[5];
633 anCount = (p[6] << 8) | p[7];
634 dnCount = (p[8] << 8) | p[9];
635 arCount = (p[10]<< 8) | p[11];
637 if (anCount != 0 || dnCount != 0 || arCount != 0) {
638 XLOG("query packet contains non-query records");
639 return 0;
640 }
642 if (qdCount == 0) {
643 XLOG("query packet doesn't contain query record");
644 return 0;
645 }
647 /* Check QDCOUNT QRs */
648 packet->cursor = p + DNS_HEADER_SIZE;
650 for (;qdCount > 0; qdCount--)
651 if (!_dnsPacket_checkQR(packet))
652 return 0;
654 return 1;
655 }
657 /** QUERY DEBUGGING
658 **/
659 #if DEBUG
660 static char*
661 _dnsPacket_bprintQName(DnsPacket* packet, char* bp, char* bend)
662 {
663 const uint8_t* p = packet->cursor;
664 const uint8_t* end = packet->end;
665 int first = 1;
667 for (;;) {
668 int c;
670 if (p >= end)
671 break;
673 c = *p++;
675 if (c == 0) {
676 packet->cursor = p;
677 return bp;
678 }
680 /* we don't expect label compression in QNAMEs */
681 if (c >= 64)
682 break;
684 if (first)
685 first = 0;
686 else
687 bp = _bprint_c(bp, bend, '.');
689 bp = _bprint_b(bp, bend, (const char*)p, c);
691 p += c;
692 /* we rely on the bound check at the start
693 * of the loop here */
694 }
695 /* malformed data */
696 bp = _bprint_s(bp, bend, "<MALFORMED>");
697 return bp;
698 }
700 static char*
701 _dnsPacket_bprintQR(DnsPacket* packet, char* p, char* end)
702 {
703 #define QQ(x) { DNS_TYPE_##x, #x }
704 static const struct {
705 const char* typeBytes;
706 const char* typeString;
707 } qTypes[] =
708 {
709 QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
710 { NULL, NULL }
711 };
712 int nn;
713 const char* typeString = NULL;
715 /* dump QNAME */
716 p = _dnsPacket_bprintQName(packet, p, end);
718 /* dump TYPE */
719 p = _bprint_s(p, end, " (");
721 for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
722 if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
723 typeString = qTypes[nn].typeString;
724 break;
725 }
726 }
728 if (typeString != NULL)
729 p = _bprint_s(p, end, typeString);
730 else {
731 int typeCode = _dnsPacket_readInt16(packet);
732 p = _bprint(p, end, "UNKNOWN-%d", typeCode);
733 }
735 p = _bprint_c(p, end, ')');
737 /* skip CLASS */
738 _dnsPacket_skip(packet, 2);
739 return p;
740 }
742 /* this function assumes the packet has already been checked */
743 static char*
744 _dnsPacket_bprintQuery( DnsPacket* packet, char* p, char* end )
745 {
746 int qdCount;
748 if (packet->base[2] & 0x1) {
749 p = _bprint_s(p, end, "RECURSIVE ");
750 }
752 _dnsPacket_skip(packet, 4);
753 qdCount = _dnsPacket_readInt16(packet);
754 _dnsPacket_skip(packet, 6);
756 for ( ; qdCount > 0; qdCount-- ) {
757 p = _dnsPacket_bprintQR(packet, p, end);
758 }
759 return p;
760 }
761 #endif
764 /** QUERY HASHING SUPPORT
765 **
766 ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
767 ** BEEN SUCCESFULLY CHECKED.
768 **/
770 /* use 32-bit FNV hash function */
771 #define FNV_MULT 16777619U
772 #define FNV_BASIS 2166136261U
774 static unsigned
775 _dnsPacket_hashBytes( DnsPacket* packet, int numBytes, unsigned hash )
776 {
777 const uint8_t* p = packet->cursor;
778 const uint8_t* end = packet->end;
780 while (numBytes > 0 && p < end) {
781 hash = hash*FNV_MULT ^ *p++;
782 }
783 packet->cursor = p;
784 return hash;
785 }
788 static unsigned
789 _dnsPacket_hashQName( DnsPacket* packet, unsigned hash )
790 {
791 const uint8_t* p = packet->cursor;
792 const uint8_t* end = packet->end;
794 for (;;) {
795 int c;
797 if (p >= end) { /* should not happen */
798 XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
799 break;
800 }
802 c = *p++;
804 if (c == 0)
805 break;
807 if (c >= 64) {
808 XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
809 break;
810 }
811 if (p + c >= end) {
812 XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
813 __FUNCTION__);
814 break;
815 }
816 while (c > 0) {
817 hash = hash*FNV_MULT ^ *p++;
818 c -= 1;
819 }
820 }
821 packet->cursor = p;
822 return hash;
823 }
825 static unsigned
826 _dnsPacket_hashQR( DnsPacket* packet, unsigned hash )
827 {
828 hash = _dnsPacket_hashQName(packet, hash);
829 hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
830 return hash;
831 }
833 static unsigned
834 _dnsPacket_hashQuery( DnsPacket* packet )
835 {
836 unsigned hash = FNV_BASIS;
837 int count;
838 _dnsPacket_rewind(packet);
840 /* we ignore the TC bit for reasons explained in
841 * _dnsPacket_checkQuery().
842 *
843 * however we hash the RD bit to differentiate
844 * between answers for recursive and non-recursive
845 * queries.
846 */
847 hash = hash*FNV_MULT ^ (packet->base[2] & 1);
849 /* assume: other flags are 0 */
850 _dnsPacket_skip(packet, 4);
852 /* read QDCOUNT */
853 count = _dnsPacket_readInt16(packet);
855 /* assume: ANcount, NScount, ARcount are 0 */
856 _dnsPacket_skip(packet, 6);
858 /* hash QDCOUNT QRs */
859 for ( ; count > 0; count-- )
860 hash = _dnsPacket_hashQR(packet, hash);
862 return hash;
863 }
866 /** QUERY COMPARISON
867 **
868 ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
869 ** BEEN SUCCESFULLY CHECKED.
870 **/
872 static int
873 _dnsPacket_isEqualDomainName( DnsPacket* pack1, DnsPacket* pack2 )
874 {
875 const uint8_t* p1 = pack1->cursor;
876 const uint8_t* end1 = pack1->end;
877 const uint8_t* p2 = pack2->cursor;
878 const uint8_t* end2 = pack2->end;
880 for (;;) {
881 int c1, c2;
883 if (p1 >= end1 || p2 >= end2) {
884 XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
885 break;
886 }
887 c1 = *p1++;
888 c2 = *p2++;
889 if (c1 != c2)
890 break;
892 if (c1 == 0) {
893 pack1->cursor = p1;
894 pack2->cursor = p2;
895 return 1;
896 }
897 if (c1 >= 64) {
898 XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
899 break;
900 }
901 if ((p1+c1 > end1) || (p2+c1 > end2)) {
902 XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
903 __FUNCTION__);
904 break;
905 }
906 if (memcmp(p1, p2, c1) != 0)
907 break;
908 p1 += c1;
909 p2 += c1;
910 /* we rely on the bound checks at the start of the loop */
911 }
912 /* not the same, or one is malformed */
913 XLOG("different DN");
914 return 0;
915 }
917 static int
918 _dnsPacket_isEqualBytes( DnsPacket* pack1, DnsPacket* pack2, int numBytes )
919 {
920 const uint8_t* p1 = pack1->cursor;
921 const uint8_t* p2 = pack2->cursor;
923 if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
924 return 0;
926 if ( memcmp(p1, p2, numBytes) != 0 )
927 return 0;
929 pack1->cursor += numBytes;
930 pack2->cursor += numBytes;
931 return 1;
932 }
934 static int
935 _dnsPacket_isEqualQR( DnsPacket* pack1, DnsPacket* pack2 )
936 {
937 /* compare domain name encoding + TYPE + CLASS */
938 if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
939 !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
940 return 0;
942 return 1;
943 }
945 static int
946 _dnsPacket_isEqualQuery( DnsPacket* pack1, DnsPacket* pack2 )
947 {
948 int count1, count2;
950 /* compare the headers, ignore most fields */
951 _dnsPacket_rewind(pack1);
952 _dnsPacket_rewind(pack2);
954 /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
955 if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
956 XLOG("different RD");
957 return 0;
958 }
960 /* assume: other flags are all 0 */
961 _dnsPacket_skip(pack1, 4);
962 _dnsPacket_skip(pack2, 4);
964 /* compare QDCOUNT */
965 count1 = _dnsPacket_readInt16(pack1);
966 count2 = _dnsPacket_readInt16(pack2);
967 if (count1 != count2 || count1 < 0) {
968 XLOG("different QDCOUNT");
969 return 0;
970 }
972 /* assume: ANcount, NScount and ARcount are all 0 */
973 _dnsPacket_skip(pack1, 6);
974 _dnsPacket_skip(pack2, 6);
976 /* compare the QDCOUNT QRs */
977 for ( ; count1 > 0; count1-- ) {
978 if (!_dnsPacket_isEqualQR(pack1, pack2)) {
979 XLOG("different QR");
980 return 0;
981 }
982 }
983 return 1;
984 }
986 /****************************************************************************/
987 /****************************************************************************/
988 /***** *****/
989 /***** *****/
990 /***** *****/
991 /****************************************************************************/
992 /****************************************************************************/
994 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
995 * structure though they are conceptually part of the hash table.
996 *
997 * similarly, mru_next and mru_prev are part of the global MRU list
998 */
999 typedef struct Entry {
1000 unsigned int hash; /* hash value */
1001 struct Entry* hlink; /* next in collision chain */
1002 struct Entry* mru_prev;
1003 struct Entry* mru_next;
1005 const uint8_t* query;
1006 int querylen;
1007 const uint8_t* answer;
1008 int answerlen;
1009 time_t expires; /* time_t when the entry isn't valid any more */
1010 int id; /* for debugging purpose */
1011 } Entry;
1013 /**
1014 * Find the TTL for a negative DNS result. This is defined as the minimum
1015 * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
1016 *
1017 * Return 0 if not found.
1018 */
1019 static u_long
1020 answer_getNegativeTTL(ns_msg handle) {
1021 int n, nscount;
1022 u_long result = 0;
1023 ns_rr rr;
1025 nscount = ns_msg_count(handle, ns_s_ns);
1026 for (n = 0; n < nscount; n++) {
1027 if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
1028 const u_char *rdata = ns_rr_rdata(rr); // find the data
1029 const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
1030 int len;
1031 u_long ttl, rec_result = ns_rr_ttl(rr);
1033 // find the MINIMUM-TTL field from the blob of binary data for this record
1034 // skip the server name
1035 len = dn_skipname(rdata, edata);
1036 if (len == -1) continue; // error skipping
1037 rdata += len;
1039 // skip the admin name
1040 len = dn_skipname(rdata, edata);
1041 if (len == -1) continue; // error skipping
1042 rdata += len;
1044 if (edata - rdata != 5*NS_INT32SZ) continue;
1045 // skip: serial number + refresh interval + retry interval + expiry
1046 rdata += NS_INT32SZ * 4;
1047 // finally read the MINIMUM TTL
1048 ttl = ns_get32(rdata);
1049 if (ttl < rec_result) {
1050 rec_result = ttl;
1051 }
1052 // Now that the record is read successfully, apply the new min TTL
1053 if (n == 0 || rec_result < result) {
1054 result = rec_result;
1055 }
1056 }
1057 }
1058 return result;
1059 }
1061 /**
1062 * Parse the answer records and find the appropriate
1063 * smallest TTL among the records. This might be from
1064 * the answer records if found or from the SOA record
1065 * if it's a negative result.
1066 *
1067 * The returned TTL is the number of seconds to
1068 * keep the answer in the cache.
1069 *
1070 * In case of parse error zero (0) is returned which
1071 * indicates that the answer shall not be cached.
1072 */
1073 static u_long
1074 answer_getTTL(const void* answer, int answerlen)
1075 {
1076 ns_msg handle;
1077 int ancount, n;
1078 u_long result, ttl;
1079 ns_rr rr;
1081 result = 0;
1082 if (ns_initparse(answer, answerlen, &handle) >= 0) {
1083 // get number of answer records
1084 ancount = ns_msg_count(handle, ns_s_an);
1086 if (ancount == 0) {
1087 // a response with no answers? Cache this negative result.
1088 result = answer_getNegativeTTL(handle);
1089 } else {
1090 for (n = 0; n < ancount; n++) {
1091 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1092 ttl = ns_rr_ttl(rr);
1093 if (n == 0 || ttl < result) {
1094 result = ttl;
1095 }
1096 } else {
1097 XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1098 }
1099 }
1100 }
1101 } else {
1102 XLOG("ns_parserr failed. %s\n", strerror(errno));
1103 }
1105 XLOG("TTL = %d\n", result);
1107 return result;
1108 }
1110 static void
1111 entry_free( Entry* e )
1112 {
1113 /* everything is allocated in a single memory block */
1114 if (e) {
1115 free(e);
1116 }
1117 }
1119 static __inline__ void
1120 entry_mru_remove( Entry* e )
1121 {
1122 e->mru_prev->mru_next = e->mru_next;
1123 e->mru_next->mru_prev = e->mru_prev;
1124 }
1126 static __inline__ void
1127 entry_mru_add( Entry* e, Entry* list )
1128 {
1129 Entry* first = list->mru_next;
1131 e->mru_next = first;
1132 e->mru_prev = list;
1134 list->mru_next = e;
1135 first->mru_prev = e;
1136 }
1138 /* compute the hash of a given entry, this is a hash of most
1139 * data in the query (key) */
1140 static unsigned
1141 entry_hash( const Entry* e )
1142 {
1143 DnsPacket pack[1];
1145 _dnsPacket_init(pack, e->query, e->querylen);
1146 return _dnsPacket_hashQuery(pack);
1147 }
1149 /* initialize an Entry as a search key, this also checks the input query packet
1150 * returns 1 on success, or 0 in case of unsupported/malformed data */
1151 static int
1152 entry_init_key( Entry* e, const void* query, int querylen )
1153 {
1154 DnsPacket pack[1];
1156 memset(e, 0, sizeof(*e));
1158 e->query = query;
1159 e->querylen = querylen;
1160 e->hash = entry_hash(e);
1162 _dnsPacket_init(pack, query, querylen);
1164 return _dnsPacket_checkQuery(pack);
1165 }
1167 /* allocate a new entry as a cache node */
1168 static Entry*
1169 entry_alloc( const Entry* init, const void* answer, int answerlen )
1170 {
1171 Entry* e;
1172 int size;
1174 size = sizeof(*e) + init->querylen + answerlen;
1175 e = calloc(size, 1);
1176 if (e == NULL)
1177 return e;
1179 e->hash = init->hash;
1180 e->query = (const uint8_t*)(e+1);
1181 e->querylen = init->querylen;
1183 memcpy( (char*)e->query, init->query, e->querylen );
1185 e->answer = e->query + e->querylen;
1186 e->answerlen = answerlen;
1188 memcpy( (char*)e->answer, answer, e->answerlen );
1190 return e;
1191 }
1193 static int
1194 entry_equals( const Entry* e1, const Entry* e2 )
1195 {
1196 DnsPacket pack1[1], pack2[1];
1198 if (e1->querylen != e2->querylen) {
1199 return 0;
1200 }
1201 _dnsPacket_init(pack1, e1->query, e1->querylen);
1202 _dnsPacket_init(pack2, e2->query, e2->querylen);
1204 return _dnsPacket_isEqualQuery(pack1, pack2);
1205 }
1207 /****************************************************************************/
1208 /****************************************************************************/
1209 /***** *****/
1210 /***** *****/
1211 /***** *****/
1212 /****************************************************************************/
1213 /****************************************************************************/
1215 /* We use a simple hash table with external collision lists
1216 * for simplicity, the hash-table fields 'hash' and 'hlink' are
1217 * inlined in the Entry structure.
1218 */
1220 /* Maximum time for a thread to wait for an pending request */
1221 #define PENDING_REQUEST_TIMEOUT 20;
1223 typedef struct pending_req_info {
1224 unsigned int hash;
1225 pthread_cond_t cond;
1226 struct pending_req_info* next;
1227 } PendingReqInfo;
1229 typedef struct resolv_cache {
1230 int max_entries;
1231 int num_entries;
1232 Entry mru_list;
1233 pthread_mutex_t lock;
1234 unsigned generation;
1235 int last_id;
1236 Entry* entries;
1237 PendingReqInfo pending_requests;
1238 } Cache;
1240 typedef struct resolv_cache_info {
1241 char ifname[IF_NAMESIZE + 1];
1242 struct in_addr ifaddr;
1243 Cache* cache;
1244 struct resolv_cache_info* next;
1245 char* nameservers[MAXNS +1];
1246 struct addrinfo* nsaddrinfo[MAXNS + 1];
1247 char defdname[256];
1248 int dnsrch_offset[MAXDNSRCH+1]; // offsets into defdname
1249 } CacheInfo;
1251 typedef struct resolv_pidiface_info {
1252 int pid;
1253 char ifname[IF_NAMESIZE + 1];
1254 struct resolv_pidiface_info* next;
1255 } PidIfaceInfo;
1256 typedef struct resolv_uidiface_info {
1257 int uid_start;
1258 int uid_end;
1259 char ifname[IF_NAMESIZE + 1];
1260 struct resolv_uidiface_info* next;
1261 } UidIfaceInfo;
1263 #define HTABLE_VALID(x) ((x) != NULL && (x) != HTABLE_DELETED)
1265 static void
1266 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
1267 {
1268 struct pending_req_info *ri, *tmp;
1269 if (cache) {
1270 ri = cache->pending_requests.next;
1272 while (ri) {
1273 tmp = ri;
1274 ri = ri->next;
1275 pthread_cond_broadcast(&tmp->cond);
1277 pthread_cond_destroy(&tmp->cond);
1278 free(tmp);
1279 }
1281 cache->pending_requests.next = NULL;
1282 }
1283 }
1285 /* return 0 if no pending request is found matching the key
1286 * if a matching request is found the calling thread will wait
1287 * and return 1 when released */
1288 static int
1289 _cache_check_pending_request_locked( struct resolv_cache* cache, Entry* key )
1290 {
1291 struct pending_req_info *ri, *prev;
1292 int exist = 0;
1294 if (cache && key) {
1295 ri = cache->pending_requests.next;
1296 prev = &cache->pending_requests;
1297 while (ri) {
1298 if (ri->hash == key->hash) {
1299 exist = 1;
1300 break;
1301 }
1302 prev = ri;
1303 ri = ri->next;
1304 }
1306 if (!exist) {
1307 ri = calloc(1, sizeof(struct pending_req_info));
1308 if (ri) {
1309 ri->hash = key->hash;
1310 pthread_cond_init(&ri->cond, NULL);
1311 prev->next = ri;
1312 }
1313 } else {
1314 struct timespec ts = {0,0};
1315 XLOG("Waiting for previous request");
1316 ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
1317 pthread_cond_timedwait(&ri->cond, &cache->lock, &ts);
1318 }
1319 }
1321 return exist;
1322 }
1324 /* notify any waiting thread that waiting on a request
1325 * matching the key has been added to the cache */
1326 static void
1327 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1328 {
1329 struct pending_req_info *ri, *prev;
1331 if (cache && key) {
1332 ri = cache->pending_requests.next;
1333 prev = &cache->pending_requests;
1334 while (ri) {
1335 if (ri->hash == key->hash) {
1336 pthread_cond_broadcast(&ri->cond);
1337 break;
1338 }
1339 prev = ri;
1340 ri = ri->next;
1341 }
1343 // remove item from list and destroy
1344 if (ri) {
1345 prev->next = ri->next;
1346 pthread_cond_destroy(&ri->cond);
1347 free(ri);
1348 }
1349 }
1350 }
1352 /* notify the cache that the query failed */
1353 void
1354 _resolv_cache_query_failed( struct resolv_cache* cache,
1355 const void* query,
1356 int querylen)
1357 {
1358 Entry key[1];
1360 if (cache && entry_init_key(key, query, querylen)) {
1361 pthread_mutex_lock(&cache->lock);
1362 _cache_notify_waiting_tid_locked(cache, key);
1363 pthread_mutex_unlock(&cache->lock);
1364 }
1365 }
1367 static void
1368 _cache_flush_locked( Cache* cache )
1369 {
1370 int nn;
1372 for (nn = 0; nn < cache->max_entries; nn++)
1373 {
1374 Entry** pnode = (Entry**) &cache->entries[nn];
1376 while (*pnode != NULL) {
1377 Entry* node = *pnode;
1378 *pnode = node->hlink;
1379 entry_free(node);
1380 }
1381 }
1383 // flush pending request
1384 _cache_flush_pending_requests_locked(cache);
1386 cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1387 cache->num_entries = 0;
1388 cache->last_id = 0;
1390 XLOG("*************************\n"
1391 "*** DNS CACHE FLUSHED ***\n"
1392 "*************************");
1393 }
1395 static int
1396 _res_cache_get_max_entries( void )
1397 {
1398 int cache_size = CONFIG_MAX_ENTRIES;
1400 const char* cache_mode = getenv("ANDROID_DNS_MODE");
1401 if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
1402 // Don't use the cache in local mode. This is used by the proxy itself.
1403 cache_size = 0;
1404 }
1406 XLOG("cache size: %d", cache_size);
1407 return cache_size;
1408 }
1410 static struct resolv_cache*
1411 _resolv_cache_create( void )
1412 {
1413 struct resolv_cache* cache;
1415 cache = calloc(sizeof(*cache), 1);
1416 if (cache) {
1417 cache->max_entries = _res_cache_get_max_entries();
1418 cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1419 if (cache->entries) {
1420 cache->generation = ~0U;
1421 pthread_mutex_init( &cache->lock, NULL );
1422 cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1423 XLOG("%s: cache created\n", __FUNCTION__);
1424 } else {
1425 free(cache);
1426 cache = NULL;
1427 }
1428 }
1429 return cache;
1430 }
1433 #if DEBUG
1434 static void
1435 _dump_query( const uint8_t* query, int querylen )
1436 {
1437 char temp[256], *p=temp, *end=p+sizeof(temp);
1438 DnsPacket pack[1];
1440 _dnsPacket_init(pack, query, querylen);
1441 p = _dnsPacket_bprintQuery(pack, p, end);
1442 XLOG("QUERY: %s", temp);
1443 }
1445 static void
1446 _cache_dump_mru( Cache* cache )
1447 {
1448 char temp[512], *p=temp, *end=p+sizeof(temp);
1449 Entry* e;
1451 p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1452 for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1453 p = _bprint(p, end, " %d", e->id);
1455 XLOG("%s", temp);
1456 }
1458 static void
1459 _dump_answer(const void* answer, int answerlen)
1460 {
1461 res_state statep;
1462 FILE* fp;
1463 char* buf;
1464 int fileLen;
1466 fp = fopen("/data/reslog.txt", "w+");
1467 if (fp != NULL) {
1468 statep = __res_get_state();
1470 res_pquery(statep, answer, answerlen, fp);
1472 //Get file length
1473 fseek(fp, 0, SEEK_END);
1474 fileLen=ftell(fp);
1475 fseek(fp, 0, SEEK_SET);
1476 buf = (char *)malloc(fileLen+1);
1477 if (buf != NULL) {
1478 //Read file contents into buffer
1479 fread(buf, fileLen, 1, fp);
1480 XLOG("%s\n", buf);
1481 free(buf);
1482 }
1483 fclose(fp);
1484 remove("/data/reslog.txt");
1485 }
1486 else {
1487 errno = 0; // else debug is introducing error signals
1488 XLOG("_dump_answer: can't open file\n");
1489 }
1490 }
1491 #endif
1493 #if DEBUG
1494 # define XLOG_QUERY(q,len) _dump_query((q), (len))
1495 # define XLOG_ANSWER(a, len) _dump_answer((a), (len))
1496 #else
1497 # define XLOG_QUERY(q,len) ((void)0)
1498 # define XLOG_ANSWER(a,len) ((void)0)
1499 #endif
1501 /* This function tries to find a key within the hash table
1502 * In case of success, it will return a *pointer* to the hashed key.
1503 * In case of failure, it will return a *pointer* to NULL
1504 *
1505 * So, the caller must check '*result' to check for success/failure.
1506 *
1507 * The main idea is that the result can later be used directly in
1508 * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1509 * parameter. This makes the code simpler and avoids re-searching
1510 * for the key position in the htable.
1511 *
1512 * The result of a lookup_p is only valid until you alter the hash
1513 * table.
1514 */
1515 static Entry**
1516 _cache_lookup_p( Cache* cache,
1517 Entry* key )
1518 {
1519 int index = key->hash % cache->max_entries;
1520 Entry** pnode = (Entry**) &cache->entries[ index ];
1522 while (*pnode != NULL) {
1523 Entry* node = *pnode;
1525 if (node == NULL)
1526 break;
1528 if (node->hash == key->hash && entry_equals(node, key))
1529 break;
1531 pnode = &node->hlink;
1532 }
1533 return pnode;
1534 }
1536 /* Add a new entry to the hash table. 'lookup' must be the
1537 * result of an immediate previous failed _lookup_p() call
1538 * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1539 * newly created entry
1540 */
1541 static void
1542 _cache_add_p( Cache* cache,
1543 Entry** lookup,
1544 Entry* e )
1545 {
1546 *lookup = e;
1547 e->id = ++cache->last_id;
1548 entry_mru_add(e, &cache->mru_list);
1549 cache->num_entries += 1;
1551 XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1552 e->id, cache->num_entries);
1553 }
1555 /* Remove an existing entry from the hash table,
1556 * 'lookup' must be the result of an immediate previous
1557 * and succesful _lookup_p() call.
1558 */
1559 static void
1560 _cache_remove_p( Cache* cache,
1561 Entry** lookup )
1562 {
1563 Entry* e = *lookup;
1565 XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1566 e->id, cache->num_entries-1);
1568 entry_mru_remove(e);
1569 *lookup = e->hlink;
1570 entry_free(e);
1571 cache->num_entries -= 1;
1572 }
1574 /* Remove the oldest entry from the hash table.
1575 */
1576 static void
1577 _cache_remove_oldest( Cache* cache )
1578 {
1579 Entry* oldest = cache->mru_list.mru_prev;
1580 Entry** lookup = _cache_lookup_p(cache, oldest);
1582 if (*lookup == NULL) { /* should not happen */
1583 XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1584 return;
1585 }
1586 if (DEBUG) {
1587 XLOG("Cache full - removing oldest");
1588 XLOG_QUERY(oldest->query, oldest->querylen);
1589 }
1590 _cache_remove_p(cache, lookup);
1591 }
1593 /* Remove all expired entries from the hash table.
1594 */
1595 static void _cache_remove_expired(Cache* cache) {
1596 Entry* e;
1597 time_t now = _time_now();
1599 for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1600 // Entry is old, remove
1601 if (now >= e->expires) {
1602 Entry** lookup = _cache_lookup_p(cache, e);
1603 if (*lookup == NULL) { /* should not happen */
1604 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
1605 return;
1606 }
1607 e = e->mru_next;
1608 _cache_remove_p(cache, lookup);
1609 } else {
1610 e = e->mru_next;
1611 }
1612 }
1613 }
1615 ResolvCacheStatus
1616 _resolv_cache_lookup( struct resolv_cache* cache,
1617 const void* query,
1618 int querylen,
1619 void* answer,
1620 int answersize,
1621 int *answerlen )
1622 {
1623 Entry key[1];
1624 Entry** lookup;
1625 Entry* e;
1626 time_t now;
1628 ResolvCacheStatus result = RESOLV_CACHE_NOTFOUND;
1630 XLOG("%s: lookup", __FUNCTION__);
1631 XLOG_QUERY(query, querylen);
1633 /* we don't cache malformed queries */
1634 if (!entry_init_key(key, query, querylen)) {
1635 XLOG("%s: unsupported query", __FUNCTION__);
1636 return RESOLV_CACHE_UNSUPPORTED;
1637 }
1638 /* lookup cache */
1639 pthread_mutex_lock( &cache->lock );
1641 /* see the description of _lookup_p to understand this.
1642 * the function always return a non-NULL pointer.
1643 */
1644 lookup = _cache_lookup_p(cache, key);
1645 e = *lookup;
1647 if (e == NULL) {
1648 XLOG( "NOT IN CACHE");
1649 // calling thread will wait if an outstanding request is found
1650 // that matching this query
1651 if (!_cache_check_pending_request_locked(cache, key)) {
1652 goto Exit;
1653 } else {
1654 lookup = _cache_lookup_p(cache, key);
1655 e = *lookup;
1656 if (e == NULL) {
1657 goto Exit;
1658 }
1659 }
1660 }
1662 now = _time_now();
1664 /* remove stale entries here */
1665 if (now >= e->expires) {
1666 XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1667 XLOG_QUERY(e->query, e->querylen);
1668 _cache_remove_p(cache, lookup);
1669 goto Exit;
1670 }
1672 *answerlen = e->answerlen;
1673 if (e->answerlen > answersize) {
1674 /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1675 result = RESOLV_CACHE_UNSUPPORTED;
1676 XLOG(" ANSWER TOO LONG");
1677 goto Exit;
1678 }
1680 memcpy( answer, e->answer, e->answerlen );
1682 /* bump up this entry to the top of the MRU list */
1683 if (e != cache->mru_list.mru_next) {
1684 entry_mru_remove( e );
1685 entry_mru_add( e, &cache->mru_list );
1686 }
1688 XLOG( "FOUND IN CACHE entry=%p", e );
1689 result = RESOLV_CACHE_FOUND;
1691 Exit:
1692 pthread_mutex_unlock( &cache->lock );
1693 return result;
1694 }
1697 void
1698 _resolv_cache_add( struct resolv_cache* cache,
1699 const void* query,
1700 int querylen,
1701 const void* answer,
1702 int answerlen )
1703 {
1704 Entry key[1];
1705 Entry* e;
1706 Entry** lookup;
1707 u_long ttl;
1709 /* don't assume that the query has already been cached
1710 */
1711 if (!entry_init_key( key, query, querylen )) {
1712 XLOG( "%s: passed invalid query ?", __FUNCTION__);
1713 return;
1714 }
1716 pthread_mutex_lock( &cache->lock );
1718 XLOG( "%s: query:", __FUNCTION__ );
1719 XLOG_QUERY(query,querylen);
1720 XLOG_ANSWER(answer, answerlen);
1721 #if DEBUG_DATA
1722 XLOG( "answer:");
1723 XLOG_BYTES(answer,answerlen);
1724 #endif
1726 lookup = _cache_lookup_p(cache, key);
1727 e = *lookup;
1729 if (e != NULL) { /* should not happen */
1730 XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1731 __FUNCTION__, e);
1732 goto Exit;
1733 }
1735 if (cache->num_entries >= cache->max_entries) {
1736 _cache_remove_expired(cache);
1737 if (cache->num_entries >= cache->max_entries) {
1738 _cache_remove_oldest(cache);
1739 }
1740 /* need to lookup again */
1741 lookup = _cache_lookup_p(cache, key);
1742 e = *lookup;
1743 if (e != NULL) {
1744 XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1745 __FUNCTION__, e);
1746 goto Exit;
1747 }
1748 }
1750 ttl = answer_getTTL(answer, answerlen);
1751 if (ttl > 0) {
1752 e = entry_alloc(key, answer, answerlen);
1753 if (e != NULL) {
1754 e->expires = ttl + _time_now();
1755 _cache_add_p(cache, lookup, e);
1756 }
1757 }
1758 #if DEBUG
1759 _cache_dump_mru(cache);
1760 #endif
1761 Exit:
1762 _cache_notify_waiting_tid_locked(cache, key);
1763 pthread_mutex_unlock( &cache->lock );
1764 }
1766 /****************************************************************************/
1767 /****************************************************************************/
1768 /***** *****/
1769 /***** *****/
1770 /***** *****/
1771 /****************************************************************************/
1772 /****************************************************************************/
1774 static pthread_once_t _res_cache_once = PTHREAD_ONCE_INIT;
1776 // Head of the list of caches. Protected by _res_cache_list_lock.
1777 static struct resolv_cache_info _res_cache_list;
1779 // List of pid iface pairs
1780 static struct resolv_pidiface_info _res_pidiface_list;
1782 // List of uid iface pairs
1783 static struct resolv_uidiface_info _res_uidiface_list;
1785 // name of the current default inteface
1786 static char _res_default_ifname[IF_NAMESIZE + 1];
1788 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1789 static pthread_mutex_t _res_cache_list_lock;
1791 // lock protecting the _res_pid_iface_list
1792 static pthread_mutex_t _res_pidiface_list_lock;
1794 // lock protecting the _res_uidiface_list
1795 static pthread_mutex_t _res_uidiface_list_lock;
1797 /* lookup the default interface name */
1798 static char *_get_default_iface_locked();
1799 /* find the first cache that has an associated interface and return the name of the interface */
1800 static char* _find_any_iface_name_locked( void );
1802 /* insert resolv_cache_info into the list of resolv_cache_infos */
1803 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1804 /* creates a resolv_cache_info */
1805 static struct resolv_cache_info* _create_cache_info( void );
1806 /* gets cache associated with an interface name, or NULL if none exists */
1807 static struct resolv_cache* _find_named_cache_locked(const char* ifname);
1808 /* gets a resolv_cache_info associated with an interface name, or NULL if not found */
1809 static struct resolv_cache_info* _find_cache_info_locked(const char* ifname);
1810 /* look up the named cache, and creates one if needed */
1811 static struct resolv_cache* _get_res_cache_for_iface_locked(const char* ifname);
1812 /* empty the named cache */
1813 static void _flush_cache_for_iface_locked(const char* ifname);
1814 /* empty the nameservers set for the named cache */
1815 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1816 /* lookup the namserver for the name interface */
1817 static int _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen);
1818 /* lookup the addr of the nameserver for the named interface */
1819 static struct addrinfo* _get_nameserver_addr_locked(const char* ifname, int n);
1820 /* lookup the inteface's address */
1821 static struct in_addr* _get_addr_locked(const char * ifname);
1822 /* return 1 if the provided list of name servers differs from the list of name servers
1823 * currently attached to the provided cache_info */
1824 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1825 const char** servers, int numservers);
1826 /* remove a resolv_pidiface_info structure from _res_pidiface_list */
1827 static void _remove_pidiface_info_locked(int pid);
1828 /* get a resolv_pidiface_info structure from _res_pidiface_list with a certain pid */
1829 static struct resolv_pidiface_info* _get_pid_iface_info_locked(int pid);
1831 /* remove a resolv_pidiface_info structure from _res_uidiface_list */
1832 static int _remove_uidiface_info_locked(int uid_start, int uid_end);
1833 /* check if a range [low,high] overlaps with any already existing ranges in the uid=>iface map*/
1834 static int _resolv_check_uid_range_overlap_locked(int uid_start, int uid_end);
1835 /* get a resolv_uidiface_info structure from _res_uidiface_list with a certain uid */
1836 static struct resolv_uidiface_info* _get_uid_iface_info_locked(int uid);
1838 static void
1839 _res_cache_init(void)
1840 {
1841 const char* env = getenv(CONFIG_ENV);
1843 if (env && atoi(env) == 0) {
1844 /* the cache is disabled */
1845 return;
1846 }
1848 memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
1849 memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1850 memset(&_res_pidiface_list, 0, sizeof(_res_pidiface_list));
1851 memset(&_res_uidiface_list, 0, sizeof(_res_uidiface_list));
1852 pthread_mutex_init(&_res_cache_list_lock, NULL);
1853 pthread_mutex_init(&_res_pidiface_list_lock, NULL);
1854 pthread_mutex_init(&_res_uidiface_list_lock, NULL);
1855 }
1857 struct resolv_cache*
1858 __get_res_cache(const char* ifname)
1859 {
1860 struct resolv_cache *cache;
1862 pthread_once(&_res_cache_once, _res_cache_init);
1863 pthread_mutex_lock(&_res_cache_list_lock);
1865 char* iface;
1866 if (ifname == NULL || ifname[0] == '\0') {
1867 iface = _get_default_iface_locked();
1868 if (iface[0] == '\0') {
1869 char* tmp = _find_any_iface_name_locked();
1870 if (tmp) {
1871 iface = tmp;
1872 }
1873 }
1874 } else {
1875 iface = (char *) ifname;
1876 }
1878 cache = _get_res_cache_for_iface_locked(iface);
1880 pthread_mutex_unlock(&_res_cache_list_lock);
1881 XLOG("_get_res_cache: iface = %s, cache=%p\n", iface, cache);
1882 return cache;
1883 }
1885 static struct resolv_cache*
1886 _get_res_cache_for_iface_locked(const char* ifname)
1887 {
1888 if (ifname == NULL)
1889 return NULL;
1891 struct resolv_cache* cache = _find_named_cache_locked(ifname);
1892 if (!cache) {
1893 struct resolv_cache_info* cache_info = _create_cache_info();
1894 if (cache_info) {
1895 cache = _resolv_cache_create();
1896 if (cache) {
1897 int len = sizeof(cache_info->ifname);
1898 cache_info->cache = cache;
1899 strncpy(cache_info->ifname, ifname, len - 1);
1900 cache_info->ifname[len - 1] = '\0';
1902 _insert_cache_info_locked(cache_info);
1903 } else {
1904 free(cache_info);
1905 }
1906 }
1907 }
1908 return cache;
1909 }
1911 void
1912 _resolv_cache_reset(unsigned generation)
1913 {
1914 XLOG("%s: generation=%d", __FUNCTION__, generation);
1916 pthread_once(&_res_cache_once, _res_cache_init);
1917 pthread_mutex_lock(&_res_cache_list_lock);
1919 char* ifname = _get_default_iface_locked();
1920 // if default interface not set then use the first cache
1921 // associated with an interface as the default one.
1922 // Note: Copied the code from __get_res_cache since this
1923 // method will be deleted/obsolete when cache per interface
1924 // implemented all over
1925 if (ifname[0] == '\0') {
1926 struct resolv_cache_info* cache_info = _res_cache_list.next;
1927 while (cache_info) {
1928 if (cache_info->ifname[0] != '\0') {
1929 ifname = cache_info->ifname;
1930 break;
1931 }
1933 cache_info = cache_info->next;
1934 }
1935 }
1936 struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
1938 if (cache != NULL) {
1939 pthread_mutex_lock( &cache->lock );
1940 if (cache->generation != generation) {
1941 _cache_flush_locked(cache);
1942 cache->generation = generation;
1943 }
1944 pthread_mutex_unlock( &cache->lock );
1945 }
1947 pthread_mutex_unlock(&_res_cache_list_lock);
1948 }
1950 void
1951 _resolv_flush_cache_for_default_iface(void)
1952 {
1953 char* ifname;
1955 pthread_once(&_res_cache_once, _res_cache_init);
1956 pthread_mutex_lock(&_res_cache_list_lock);
1958 ifname = _get_default_iface_locked();
1959 _flush_cache_for_iface_locked(ifname);
1961 pthread_mutex_unlock(&_res_cache_list_lock);
1962 }
1964 void
1965 _resolv_flush_cache_for_iface(const char* ifname)
1966 {
1967 pthread_once(&_res_cache_once, _res_cache_init);
1968 pthread_mutex_lock(&_res_cache_list_lock);
1970 _flush_cache_for_iface_locked(ifname);
1972 pthread_mutex_unlock(&_res_cache_list_lock);
1973 }
1975 static void
1976 _flush_cache_for_iface_locked(const char* ifname)
1977 {
1978 struct resolv_cache* cache = _find_named_cache_locked(ifname);
1979 if (cache) {
1980 pthread_mutex_lock(&cache->lock);
1981 _cache_flush_locked(cache);
1982 pthread_mutex_unlock(&cache->lock);
1983 }
1984 }
1986 static struct resolv_cache_info*
1987 _create_cache_info(void)
1988 {
1989 struct resolv_cache_info* cache_info;
1991 cache_info = calloc(sizeof(*cache_info), 1);
1992 return cache_info;
1993 }
1995 static void
1996 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
1997 {
1998 struct resolv_cache_info* last;
2000 for (last = &_res_cache_list; last->next; last = last->next);
2002 last->next = cache_info;
2004 }
2006 static struct resolv_cache*
2007 _find_named_cache_locked(const char* ifname) {
2009 struct resolv_cache_info* info = _find_cache_info_locked(ifname);
2011 if (info != NULL) return info->cache;
2013 return NULL;
2014 }
2016 static struct resolv_cache_info*
2017 _find_cache_info_locked(const char* ifname)
2018 {
2019 if (ifname == NULL)
2020 return NULL;
2022 struct resolv_cache_info* cache_info = _res_cache_list.next;
2024 while (cache_info) {
2025 if (strcmp(cache_info->ifname, ifname) == 0) {
2026 break;
2027 }
2029 cache_info = cache_info->next;
2030 }
2031 return cache_info;
2032 }
2034 static char*
2035 _get_default_iface_locked(void)
2036 {
2038 char* iface = _res_default_ifname;
2040 return iface;
2041 }
2043 static char*
2044 _find_any_iface_name_locked( void ) {
2045 char* ifname = NULL;
2047 struct resolv_cache_info* cache_info = _res_cache_list.next;
2048 while (cache_info) {
2049 if (cache_info->ifname[0] != '\0') {
2050 ifname = cache_info->ifname;
2051 break;
2052 }
2054 cache_info = cache_info->next;
2055 }
2057 return ifname;
2058 }
2060 void
2061 _resolv_set_default_iface(const char* ifname)
2062 {
2063 XLOG("_resolv_set_default_if ifname %s\n",ifname);
2065 pthread_once(&_res_cache_once, _res_cache_init);
2066 pthread_mutex_lock(&_res_cache_list_lock);
2068 int size = sizeof(_res_default_ifname);
2069 memset(_res_default_ifname, 0, size);
2070 strncpy(_res_default_ifname, ifname, size - 1);
2071 _res_default_ifname[size - 1] = '\0';
2073 pthread_mutex_unlock(&_res_cache_list_lock);
2074 }
2076 void
2077 _resolv_set_nameservers_for_iface(const char* ifname, const char** servers, int numservers,
2078 const char *domains)
2079 {
2080 int i, rt, index;
2081 struct addrinfo hints;
2082 char sbuf[NI_MAXSERV];
2083 register char *cp;
2084 int *offset;
2086 pthread_once(&_res_cache_once, _res_cache_init);
2087 pthread_mutex_lock(&_res_cache_list_lock);
2089 // creates the cache if not created
2090 _get_res_cache_for_iface_locked(ifname);
2092 struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2094 if (cache_info != NULL &&
2095 !_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
2096 // free current before adding new
2097 _free_nameservers_locked(cache_info);
2099 memset(&hints, 0, sizeof(hints));
2100 hints.ai_family = PF_UNSPEC;
2101 hints.ai_socktype = SOCK_DGRAM; /*dummy*/
2102 hints.ai_flags = AI_NUMERICHOST;
2103 sprintf(sbuf, "%u", NAMESERVER_PORT);
2105 index = 0;
2106 for (i = 0; i < numservers && i < MAXNS; i++) {
2107 rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
2108 if (rt == 0) {
2109 cache_info->nameservers[index] = strdup(servers[i]);
2110 index++;
2111 XLOG("_resolv_set_nameservers_for_iface: iface = %s, addr = %s\n",
2112 ifname, servers[i]);
2113 } else {
2114 cache_info->nsaddrinfo[index] = NULL;
2115 }
2116 }
2118 // code moved from res_init.c, load_domain_search_list
2119 strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
2120 if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
2121 *cp = '\0';
2122 cp = cache_info->defdname;
2123 offset = cache_info->dnsrch_offset;
2124 while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
2125 while (*cp == ' ' || *cp == '\t') /* skip leading white space */
2126 cp++;
2127 if (*cp == '\0') /* stop if nothing more to do */
2128 break;
2129 *offset++ = cp - cache_info->defdname; /* record this search domain */
2130 while (*cp) { /* zero-terminate it */
2131 if (*cp == ' '|| *cp == '\t') {
2132 *cp++ = '\0';
2133 break;
2134 }
2135 cp++;
2136 }
2137 }
2138 *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
2140 // flush cache since new settings
2141 _flush_cache_for_iface_locked(ifname);
2143 }
2145 pthread_mutex_unlock(&_res_cache_list_lock);
2146 }
2148 static int
2149 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
2150 const char** servers, int numservers)
2151 {
2152 int i;
2153 char** ns;
2154 int equal = 1;
2156 // compare each name server against current name servers
2157 if (numservers > MAXNS) numservers = MAXNS;
2158 for (i = 0; i < numservers && equal; i++) {
2159 ns = cache_info->nameservers;
2160 equal = 0;
2161 while(*ns) {
2162 if (strcmp(*ns, servers[i]) == 0) {
2163 equal = 1;
2164 break;
2165 }
2166 ns++;
2167 }
2168 }
2170 return equal;
2171 }
2173 static void
2174 _free_nameservers_locked(struct resolv_cache_info* cache_info)
2175 {
2176 int i;
2177 for (i = 0; i <= MAXNS; i++) {
2178 free(cache_info->nameservers[i]);
2179 cache_info->nameservers[i] = NULL;
2180 if (cache_info->nsaddrinfo[i] != NULL) {
2181 freeaddrinfo(cache_info->nsaddrinfo[i]);
2182 cache_info->nsaddrinfo[i] = NULL;
2183 }
2184 }
2185 }
2187 int
2188 _resolv_cache_get_nameserver(int n, char* addr, int addrLen)
2189 {
2190 char *ifname;
2191 int result = 0;
2193 pthread_once(&_res_cache_once, _res_cache_init);
2194 pthread_mutex_lock(&_res_cache_list_lock);
2196 ifname = _get_default_iface_locked();
2197 result = _get_nameserver_locked(ifname, n, addr, addrLen);
2199 pthread_mutex_unlock(&_res_cache_list_lock);
2200 return result;
2201 }
2203 static int
2204 _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen)
2205 {
2206 int len = 0;
2207 char* ns;
2208 struct resolv_cache_info* cache_info;
2210 if (n < 1 || n > MAXNS || !addr)
2211 return 0;
2213 cache_info = _find_cache_info_locked(ifname);
2214 if (cache_info) {
2215 ns = cache_info->nameservers[n - 1];
2216 if (ns) {
2217 len = strlen(ns);
2218 if (len < addrLen) {
2219 strncpy(addr, ns, len);
2220 addr[len] = '\0';
2221 } else {
2222 len = 0;
2223 }
2224 }
2225 }
2227 return len;
2228 }
2230 struct addrinfo*
2231 _cache_get_nameserver_addr(int n)
2232 {
2233 struct addrinfo *result;
2234 char* ifname;
2236 pthread_once(&_res_cache_once, _res_cache_init);
2237 pthread_mutex_lock(&_res_cache_list_lock);
2239 ifname = _get_default_iface_locked();
2241 result = _get_nameserver_addr_locked(ifname, n);
2242 pthread_mutex_unlock(&_res_cache_list_lock);
2243 return result;
2244 }
2246 static struct addrinfo*
2247 _get_nameserver_addr_locked(const char* ifname, int n)
2248 {
2249 struct addrinfo* ai = NULL;
2250 struct resolv_cache_info* cache_info;
2252 if (n < 1 || n > MAXNS)
2253 return NULL;
2255 cache_info = _find_cache_info_locked(ifname);
2256 if (cache_info) {
2257 ai = cache_info->nsaddrinfo[n - 1];
2258 }
2259 return ai;
2260 }
2262 void
2263 _resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr)
2264 {
2265 pthread_once(&_res_cache_once, _res_cache_init);
2266 pthread_mutex_lock(&_res_cache_list_lock);
2267 struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2268 if (cache_info) {
2269 memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
2271 if (DEBUG) {
2272 XLOG("address of interface %s is %s\n",
2273 ifname, inet_ntoa(cache_info->ifaddr));
2274 }
2275 }
2276 pthread_mutex_unlock(&_res_cache_list_lock);
2277 }
2279 struct in_addr*
2280 _resolv_get_addr_of_default_iface(void)
2281 {
2282 struct in_addr* ai = NULL;
2283 char* ifname;
2285 pthread_once(&_res_cache_once, _res_cache_init);
2286 pthread_mutex_lock(&_res_cache_list_lock);
2287 ifname = _get_default_iface_locked();
2288 ai = _get_addr_locked(ifname);
2289 pthread_mutex_unlock(&_res_cache_list_lock);
2291 return ai;
2292 }
2294 struct in_addr*
2295 _resolv_get_addr_of_iface(const char* ifname)
2296 {
2297 struct in_addr* ai = NULL;
2299 pthread_once(&_res_cache_once, _res_cache_init);
2300 pthread_mutex_lock(&_res_cache_list_lock);
2301 ai =_get_addr_locked(ifname);
2302 pthread_mutex_unlock(&_res_cache_list_lock);
2303 return ai;
2304 }
2306 static struct in_addr*
2307 _get_addr_locked(const char * ifname)
2308 {
2309 struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2310 if (cache_info) {
2311 return &cache_info->ifaddr;
2312 }
2313 return NULL;
2314 }
2316 static void
2317 _remove_pidiface_info_locked(int pid) {
2318 struct resolv_pidiface_info* result = &_res_pidiface_list;
2319 struct resolv_pidiface_info* prev = NULL;
2321 while (result != NULL && result->pid != pid) {
2322 prev = result;
2323 result = result->next;
2324 }
2325 if (prev != NULL && result != NULL) {
2326 prev->next = result->next;
2327 free(result);
2328 }
2329 }
2331 static struct resolv_pidiface_info*
2332 _get_pid_iface_info_locked(int pid)
2333 {
2334 struct resolv_pidiface_info* result = &_res_pidiface_list;
2335 while (result != NULL && result->pid != pid) {
2336 result = result->next;
2337 }
2339 return result;
2340 }
2342 void
2343 _resolv_set_iface_for_pid(const char* ifname, int pid)
2344 {
2345 // make sure the pid iface list is created
2346 pthread_once(&_res_cache_once, _res_cache_init);
2347 pthread_mutex_lock(&_res_pidiface_list_lock);
2349 struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
2350 if (!pidiface_info) {
2351 pidiface_info = calloc(sizeof(*pidiface_info), 1);
2352 if (pidiface_info) {
2353 pidiface_info->pid = pid;
2354 int len = sizeof(pidiface_info->ifname);
2355 strncpy(pidiface_info->ifname, ifname, len - 1);
2356 pidiface_info->ifname[len - 1] = '\0';
2358 pidiface_info->next = _res_pidiface_list.next;
2359 _res_pidiface_list.next = pidiface_info;
2361 XLOG("_resolv_set_iface_for_pid: pid %d , iface %s\n", pid, ifname);
2362 } else {
2363 XLOG("_resolv_set_iface_for_pid failing calloc");
2364 }
2365 }
2367 pthread_mutex_unlock(&_res_pidiface_list_lock);
2368 }
2370 void
2371 _resolv_clear_iface_for_pid(int pid)
2372 {
2373 pthread_once(&_res_cache_once, _res_cache_init);
2374 pthread_mutex_lock(&_res_pidiface_list_lock);
2376 _remove_pidiface_info_locked(pid);
2378 XLOG("_resolv_clear_iface_for_pid: pid %d\n", pid);
2380 pthread_mutex_unlock(&_res_pidiface_list_lock);
2381 }
2383 int
2384 _resolv_get_pids_associated_interface(int pid, char* buff, int buffLen)
2385 {
2386 int len = 0;
2388 if (!buff) {
2389 return -1;
2390 }
2392 pthread_once(&_res_cache_once, _res_cache_init);
2393 pthread_mutex_lock(&_res_pidiface_list_lock);
2395 struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
2396 buff[0] = '\0';
2397 if (pidiface_info) {
2398 len = strlen(pidiface_info->ifname);
2399 if (len < buffLen) {
2400 strncpy(buff, pidiface_info->ifname, len);
2401 buff[len] = '\0';
2402 }
2403 }
2405 XLOG("_resolv_get_pids_associated_interface buff: %s\n", buff);
2407 pthread_mutex_unlock(&_res_pidiface_list_lock);
2409 return len;
2410 }
2412 static int
2413 _remove_uidiface_info_locked(int uid_start, int uid_end) {
2414 struct resolv_uidiface_info* result = _res_uidiface_list.next;
2415 struct resolv_uidiface_info* prev = &_res_uidiface_list;
2417 while (result != NULL && result->uid_start != uid_start && result->uid_end != uid_end) {
2418 prev = result;
2419 result = result->next;
2420 }
2421 if (prev != NULL && result != NULL) {
2422 prev->next = result->next;
2423 free(result);
2424 return 0;
2425 }
2426 errno = EINVAL;
2427 return -1;
2428 }
2430 static struct resolv_uidiface_info*
2431 _get_uid_iface_info_locked(int uid)
2432 {
2433 struct resolv_uidiface_info* result = _res_uidiface_list.next;
2434 while (result != NULL && !(result->uid_start <= uid && result->uid_end >= uid)) {
2435 result = result->next;
2436 }
2438 return result;
2439 }
2441 static int
2442 _resolv_check_uid_range_overlap_locked(int uid_start, int uid_end)
2443 {
2444 struct resolv_uidiface_info* cur = _res_uidiface_list.next;
2445 while (cur != NULL) {
2446 if (cur->uid_start <= uid_end && cur->uid_end >= uid_start) {
2447 return -1;
2448 }
2449 cur = cur->next;
2450 }
2451 return 0;
2452 }
2454 void
2455 _resolv_clear_iface_uid_range_mapping()
2456 {
2457 pthread_once(&_res_cache_once, _res_cache_init);
2458 pthread_mutex_lock(&_res_uidiface_list_lock);
2459 struct resolv_uidiface_info *current = _res_uidiface_list.next;
2460 struct resolv_uidiface_info *next;
2461 while (current != NULL) {
2462 next = current->next;
2463 free(current);
2464 current = next;
2465 }
2466 _res_uidiface_list.next = NULL;
2467 pthread_mutex_unlock(&_res_uidiface_list_lock);
2468 }
2470 void
2471 _resolv_clear_iface_pid_mapping()
2472 {
2473 pthread_once(&_res_cache_once, _res_cache_init);
2474 pthread_mutex_lock(&_res_pidiface_list_lock);
2475 struct resolv_pidiface_info *current = _res_pidiface_list.next;
2476 struct resolv_pidiface_info *next;
2477 while (current != NULL) {
2478 next = current->next;
2479 free(current);
2480 current = next;
2481 }
2482 _res_pidiface_list.next = NULL;
2483 pthread_mutex_unlock(&_res_pidiface_list_lock);
2484 }
2486 int
2487 _resolv_set_iface_for_uid_range(const char* ifname, int uid_start, int uid_end)
2488 {
2489 int rv = 0;
2490 struct resolv_uidiface_info* uidiface_info;
2491 // make sure the uid iface list is created
2492 pthread_once(&_res_cache_once, _res_cache_init);
2493 if (uid_start > uid_end) {
2494 errno = EINVAL;
2495 return -1;
2496 }
2497 pthread_mutex_lock(&_res_uidiface_list_lock);
2498 //check that we aren't adding an overlapping range
2499 if (!_resolv_check_uid_range_overlap_locked(uid_start, uid_end)) {
2500 uidiface_info = calloc(sizeof(*uidiface_info), 1);
2501 if (uidiface_info) {
2502 uidiface_info->uid_start = uid_start;
2503 uidiface_info->uid_end = uid_end;
2504 int len = sizeof(uidiface_info->ifname);
2505 strncpy(uidiface_info->ifname, ifname, len - 1);
2506 uidiface_info->ifname[len - 1] = '\0';
2508 uidiface_info->next = _res_uidiface_list.next;
2509 _res_uidiface_list.next = uidiface_info;
2511 XLOG("_resolv_set_iface_for_uid_range: [%d,%d], iface %s\n", uid_start, uid_end,
2512 ifname);
2513 } else {
2514 XLOG("_resolv_set_iface_for_uid_range failing calloc\n");
2515 rv = -1;
2516 errno = EINVAL;
2517 }
2518 } else {
2519 XLOG("_resolv_set_iface_for_uid_range range [%d,%d] overlaps\n", uid_start, uid_end);
2520 rv = -1;
2521 errno = EINVAL;
2522 }
2524 pthread_mutex_unlock(&_res_uidiface_list_lock);
2525 return rv;
2526 }
2528 int
2529 _resolv_clear_iface_for_uid_range(int uid_start, int uid_end)
2530 {
2531 pthread_once(&_res_cache_once, _res_cache_init);
2532 pthread_mutex_lock(&_res_uidiface_list_lock);
2534 int rv = _remove_uidiface_info_locked(uid_start, uid_end);
2536 XLOG("_resolv_clear_iface_for_uid_range: [%d,%d]\n", uid_start, uid_end);
2538 pthread_mutex_unlock(&_res_uidiface_list_lock);
2540 return rv;
2541 }
2543 int
2544 _resolv_get_uids_associated_interface(int uid, char* buff, int buffLen)
2545 {
2546 int len = 0;
2548 if (!buff) {
2549 return -1;
2550 }
2552 pthread_once(&_res_cache_once, _res_cache_init);
2553 pthread_mutex_lock(&_res_uidiface_list_lock);
2555 struct resolv_uidiface_info* uidiface_info = _get_uid_iface_info_locked(uid);
2556 buff[0] = '\0';
2557 if (uidiface_info) {
2558 len = strlen(uidiface_info->ifname);
2559 if (len < buffLen) {
2560 strncpy(buff, uidiface_info->ifname, len);
2561 buff[len] = '\0';
2562 }
2563 }
2565 XLOG("_resolv_get_uids_associated_interface buff: %s\n", buff);
2567 pthread_mutex_unlock(&_res_uidiface_list_lock);
2569 return len;
2570 }
2572 size_t
2573 _resolv_get_default_iface(char* buff, size_t buffLen)
2574 {
2575 if (!buff || buffLen == 0) {
2576 return 0;
2577 }
2579 pthread_once(&_res_cache_once, _res_cache_init);
2580 pthread_mutex_lock(&_res_cache_list_lock);
2582 char* ifname = _get_default_iface_locked(); // never null, but may be empty
2584 // if default interface not set give up.
2585 if (ifname[0] == '\0') {
2586 pthread_mutex_unlock(&_res_cache_list_lock);
2587 return 0;
2588 }
2590 size_t len = strlen(ifname);
2591 if (len < buffLen) {
2592 strncpy(buff, ifname, len);
2593 buff[len] = '\0';
2594 } else {
2595 buff[0] = '\0';
2596 }
2598 pthread_mutex_unlock(&_res_cache_list_lock);
2600 return len;
2601 }
2603 void
2604 _resolv_populate_res_for_iface(res_state statp)
2605 {
2606 if (statp == NULL) {
2607 return;
2608 }
2610 if (statp->iface[0] == '\0') { // no interface set assign default
2611 size_t if_len = _resolv_get_default_iface(statp->iface, sizeof(statp->iface));
2612 if (if_len + 1 > sizeof(statp->iface)) {
2613 XLOG("%s: INTERNAL_ERROR: can't fit interface name into statp->iface.\n", __FUNCTION__);
2614 return;
2615 }
2616 if (if_len == 0) {
2617 XLOG("%s: INTERNAL_ERROR: can't find any suitable interfaces.\n", __FUNCTION__);
2618 return;
2619 }
2620 }
2622 pthread_once(&_res_cache_once, _res_cache_init);
2623 pthread_mutex_lock(&_res_cache_list_lock);
2625 struct resolv_cache_info* info = _find_cache_info_locked(statp->iface);
2626 if (info != NULL) {
2627 int nserv;
2628 struct addrinfo* ai;
2629 XLOG("_resolv_populate_res_for_iface: %s\n", statp->iface);
2630 for (nserv = 0; nserv < MAXNS; nserv++) {
2631 ai = info->nsaddrinfo[nserv];
2632 if (ai == NULL) {
2633 break;
2634 }
2636 if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
2637 if (statp->_u._ext.ext != NULL) {
2638 memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
2639 statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2640 } else {
2641 if ((size_t) ai->ai_addrlen
2642 <= sizeof(statp->nsaddr_list[0])) {
2643 memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
2644 ai->ai_addrlen);
2645 } else {
2646 statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2647 }
2648 }
2649 } else {
2650 XLOG("_resolv_populate_res_for_iface found too long addrlen");
2651 }
2652 }
2653 statp->nscount = nserv;
2654 // now do search domains. Note that we cache the offsets as this code runs alot
2655 // but the setting/offset-computer only runs when set/changed
2656 strlcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
2657 register char **pp = statp->dnsrch;
2658 register int *p = info->dnsrch_offset;
2659 while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
2660 *pp++ = &statp->defdname + *p++;
2661 }
2662 }
2663 pthread_mutex_unlock(&_res_cache_list_lock);
2664 }