summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Chtchetkine2011-11-30 12:20:27 -0600
committerXavier Ducrohet2011-12-05 15:14:23 -0600
commitce48083fc90cee807dd66608e2c3e496145af9ff (patch)
tree505652033adaca75e558edcb86c455da6eaf9ce4
parentf9c1fcc505d59fd87392e6af7341b1737f186081 (diff)
downloadplatform-system-core-ce48083fc90cee807dd66608e2c3e496145af9ff.tar.gz
platform-system-core-ce48083fc90cee807dd66608e2c3e496145af9ff.tar.xz
platform-system-core-ce48083fc90cee807dd66608e2c3e496145af9ff.zip
Fixes ADB crash on Windows due to large number of connections. do not merge.
The reason for the crash is that ADB on Windows uses WaitForMultipleObjects to wait on connection events. When number of connections exceeds 64, ADB crashes, because WaitForMultipleObjects API doesn't support more than 64 handles. This CL contains a fixer routine that allows waiting on an arbitrary number of handles. (cherry picked from commit ac52833e48f46dfd9f7c8f86236ee8b437850c0a) Change-Id: I8ad264765e5b38d01a31e42b445f97ea86e49948
-rw-r--r--adb/sysdeps_win32.c205
1 files changed, 190 insertions, 15 deletions
diff --git a/adb/sysdeps_win32.c b/adb/sysdeps_win32.c
index ced91e838..c42671811 100644
--- a/adb/sysdeps_win32.c
+++ b/adb/sysdeps_win32.c
@@ -352,7 +352,7 @@ int adb_open(const char* path, int options)
352 return -1; 352 return -1;
353 } 353 }
354 } 354 }
355 355
356 snprintf( f->name, sizeof(f->name), "%d(%s)", _fh_to_int(f), path ); 356 snprintf( f->name, sizeof(f->name), "%d(%s)", _fh_to_int(f), path );
357 D( "adb_open: '%s' => fd %d\n", path, _fh_to_int(f) ); 357 D( "adb_open: '%s' => fd %d\n", path, _fh_to_int(f) );
358 return _fh_to_int(f); 358 return _fh_to_int(f);
@@ -837,7 +837,7 @@ static void bip_dump_hex( const unsigned char* ptr, size_t len )
837 837
838 if (len2 > 8) len2 = 8; 838 if (len2 > 8) len2 = 8;
839 839
840 for (nn = 0; nn < len2; nn++) 840 for (nn = 0; nn < len2; nn++)
841 printf("%02x", ptr[nn]); 841 printf("%02x", ptr[nn]);
842 printf(" "); 842 printf(" ");
843 843
@@ -994,7 +994,7 @@ Exit:
994 SetEvent( bip->evt_read ); 994 SetEvent( bip->evt_read );
995 } 995 }
996 996
997 BIPD(( "bip_buffer_write: exit %d->%d count %d (as=%d ae=%d be=%d cw=%d cr=%d\n", 997 BIPD(( "bip_buffer_write: exit %d->%d count %d (as=%d ae=%d be=%d cw=%d cr=%d\n",
998 bip->fdin, bip->fdout, count, bip->a_start, bip->a_end, bip->b_end, bip->can_write, bip->can_read )); 998 bip->fdin, bip->fdout, count, bip->a_start, bip->a_end, bip->b_end, bip->can_write, bip->can_read ));
999 LeaveCriticalSection( &bip->lock ); 999 LeaveCriticalSection( &bip->lock );
1000 1000
@@ -1018,7 +1018,7 @@ bip_buffer_read( BipBuffer bip, void* dst, int len )
1018 LeaveCriticalSection( &bip->lock ); 1018 LeaveCriticalSection( &bip->lock );
1019 errno = EAGAIN; 1019 errno = EAGAIN;
1020 return -1; 1020 return -1;
1021#else 1021#else
1022 int ret; 1022 int ret;
1023 LeaveCriticalSection( &bip->lock ); 1023 LeaveCriticalSection( &bip->lock );
1024 1024
@@ -1087,14 +1087,14 @@ Exit:
1087 } 1087 }
1088 1088
1089 BIPDUMP( (const unsigned char*)dst - count, count ); 1089 BIPDUMP( (const unsigned char*)dst - count, count );
1090 BIPD(( "bip_buffer_read: exit %d->%d count %d (as=%d ae=%d be=%d cw=%d cr=%d\n", 1090 BIPD(( "bip_buffer_read: exit %d->%d count %d (as=%d ae=%d be=%d cw=%d cr=%d\n",
1091 bip->fdin, bip->fdout, count, bip->a_start, bip->a_end, bip->b_end, bip->can_write, bip->can_read )); 1091 bip->fdin, bip->fdout, count, bip->a_start, bip->a_end, bip->b_end, bip->can_write, bip->can_read ));
1092 LeaveCriticalSection( &bip->lock ); 1092 LeaveCriticalSection( &bip->lock );
1093 1093
1094 return count; 1094 return count;
1095} 1095}
1096 1096
1097typedef struct SocketPairRec_ 1097typedef struct SocketPairRec_
1098{ 1098{
1099 BipBufferRec a2b_bip; 1099 BipBufferRec a2b_bip;
1100 BipBufferRec b2a_bip; 1100 BipBufferRec b2a_bip;
@@ -1400,7 +1400,7 @@ event_looper_hook( EventLooper looper, int fd, int events )
1400 f->clazz->_fh_hook( f, events & ~node->wanted, node ); 1400 f->clazz->_fh_hook( f, events & ~node->wanted, node );
1401 node->wanted |= events; 1401 node->wanted |= events;
1402 } else { 1402 } else {
1403 D("event_looper_hook: ignoring events %x for %d wanted=%x)\n", 1403 D("event_looper_hook: ignoring events %x for %d wanted=%x)\n",
1404 events, fd, node->wanted); 1404 events, fd, node->wanted);
1405 } 1405 }
1406} 1406}
@@ -1426,6 +1426,180 @@ event_looper_unhook( EventLooper looper, int fd, int events )
1426 } 1426 }
1427} 1427}
1428 1428
1429/*
1430 * A fixer for WaitForMultipleObjects on condition that there are more than 64
1431 * handles to wait on.
1432 *
1433 * In cetain cases DDMS may establish more than 64 connections with ADB. For
1434 * instance, this may happen if there are more than 64 processes running on a
1435 * device, or there are multiple devices connected (including the emulator) with
1436 * the combined number of running processes greater than 64. In this case using
1437 * WaitForMultipleObjects to wait on connection events simply wouldn't cut,
1438 * because of the API limitations (64 handles max). So, we need to provide a way
1439 * to scale WaitForMultipleObjects to accept an arbitrary number of handles. The
1440 * easiest (and "Microsoft recommended") way to do that would be dividing the
1441 * handle array into chunks with the chunk size less than 64, and fire up as many
1442 * waiting threads as there are chunks. Then each thread would wait on a chunk of
1443 * handles, and will report back to the caller which handle has been set.
1444 * Here is the implementation of that algorithm.
1445 */
1446
1447/* Number of handles to wait on in each wating thread. */
1448#define WAIT_ALL_CHUNK_SIZE 63
1449
1450/* Descriptor for a wating thread */
1451typedef struct WaitForAllParam {
1452 /* A handle to an event to signal when waiting is over. This handle is shared
1453 * accross all the waiting threads, so each waiting thread knows when any
1454 * other thread has exited, so it can exit too. */
1455 HANDLE main_event;
1456 /* Upon exit from a waiting thread contains the index of the handle that has
1457 * been signaled. The index is an absolute index of the signaled handle in
1458 * the original array. This pointer is shared accross all the waiting threads
1459 * and it's not guaranteed (due to a race condition) that when all the
1460 * waiting threads exit, the value contained here would indicate the first
1461 * handle that was signaled. This is fine, because the caller cares only
1462 * about any handle being signaled. It doesn't care about the order, nor
1463 * about the whole list of handles that were signaled. */
1464 LONG volatile *signaled_index;
1465 /* Array of handles to wait on in a waiting thread. */
1466 HANDLE* handles;
1467 /* Number of handles in 'handles' array to wait on. */
1468 int handles_count;
1469 /* Index inside the main array of the first handle in the 'handles' array. */
1470 int first_handle_index;
1471 /* Waiting thread handle. */
1472 HANDLE thread;
1473} WaitForAllParam;
1474
1475/* Waiting thread routine. */
1476static unsigned __stdcall
1477_in_waiter_thread(void* arg)
1478{
1479 HANDLE wait_on[WAIT_ALL_CHUNK_SIZE + 1];
1480 int res;
1481 WaitForAllParam* const param = (WaitForAllParam*)arg;
1482
1483 /* We have to wait on the main_event in order to be notified when any of the
1484 * sibling threads is exiting. */
1485 wait_on[0] = param->main_event;
1486 /* The rest of the handles go behind the main event handle. */
1487 memcpy(wait_on + 1, param->handles, param->handles_count * sizeof(HANDLE));
1488
1489 res = WaitForMultipleObjects(param->handles_count + 1, wait_on, FALSE, INFINITE);
1490 if (res > 0 && res < (param->handles_count + 1)) {
1491 /* One of the original handles got signaled. Save its absolute index into
1492 * the output variable. */
1493 InterlockedCompareExchange(param->signaled_index,
1494 res - 1L + param->first_handle_index, -1L);
1495 }
1496
1497 /* Notify the caller (and the siblings) that the wait is over. */
1498 SetEvent(param->main_event);
1499
1500 _endthreadex(0);
1501 return 0;
1502}
1503
1504/* WaitForMultipeObjects fixer routine.
1505 * Param:
1506 * handles Array of handles to wait on.
1507 * handles_count Number of handles in the array.
1508 * Return:
1509 * (>= 0 && < handles_count) - Index of the signaled handle in the array, or
1510 * WAIT_FAILED on an error.
1511 */
1512static int
1513_wait_for_all(HANDLE* handles, int handles_count)
1514{
1515 WaitForAllParam* threads;
1516 HANDLE main_event;
1517 int chunks, chunk, remains;
1518
1519 /* This variable is going to be accessed by several threads at the same time,
1520 * this is bound to fail randomly when the core is run on multi-core machines.
1521 * To solve this, we need to do the following (1 _and_ 2):
1522 * 1. Use the "volatile" qualifier to ensure the compiler doesn't optimize
1523 * out the reads/writes in this function unexpectedly.
1524 * 2. Ensure correct memory ordering. The "simple" way to do that is to wrap
1525 * all accesses inside a critical section. But we can also use
1526 * InterlockedCompareExchange() which always provide a full memory barrier
1527 * on Win32.
1528 */
1529 volatile LONG sig_index = -1;
1530
1531 /* Calculate number of chunks, and allocate thread param array. */
1532 chunks = handles_count / WAIT_ALL_CHUNK_SIZE;
1533 remains = handles_count % WAIT_ALL_CHUNK_SIZE;
1534 threads = (WaitForAllParam*)malloc((chunks + (remains ? 1 : 0)) *
1535 sizeof(WaitForAllParam));
1536 if (threads == NULL) {
1537 D("Unable to allocate thread array for %d handles.", handles_count);
1538 return (int)WAIT_FAILED;
1539 }
1540
1541 /* Create main event to wait on for all waiting threads. This is a "manualy
1542 * reset" event that will remain set once it was set. */
1543 main_event = CreateEvent(NULL, TRUE, FALSE, NULL);
1544 if (main_event == NULL) {
1545 D("Unable to create main event. Error: %d", GetLastError());
1546 free(threads);
1547 return (int)WAIT_FAILED;
1548 }
1549
1550 /*
1551 * Initialize waiting thread parameters.
1552 */
1553
1554 for (chunk = 0; chunk < chunks; chunk++) {
1555 threads[chunk].main_event = main_event;
1556 threads[chunk].signaled_index = &sig_index;
1557 threads[chunk].first_handle_index = WAIT_ALL_CHUNK_SIZE * chunk;
1558 threads[chunk].handles = handles + threads[chunk].first_handle_index;
1559 threads[chunk].handles_count = WAIT_ALL_CHUNK_SIZE;
1560 }
1561 if (remains) {
1562 threads[chunk].main_event = main_event;
1563 threads[chunk].signaled_index = &sig_index;
1564 threads[chunk].first_handle_index = WAIT_ALL_CHUNK_SIZE * chunk;
1565 threads[chunk].handles = handles + threads[chunk].first_handle_index;
1566 threads[chunk].handles_count = remains;
1567 chunks++;
1568 }
1569
1570 /* Start the waiting threads. */
1571 for (chunk = 0; chunk < chunks; chunk++) {
1572 /* Note that using adb_thread_create is not appropriate here, since we
1573 * need a handle to wait on for thread termination. */
1574 threads[chunk].thread = (HANDLE)_beginthreadex(NULL, 0, _in_waiter_thread,
1575 &threads[chunk], 0, NULL);
1576 if (threads[chunk].thread == NULL) {
1577 /* Unable to create a waiter thread. Collapse. */
1578 D("Unable to create a waiting thread %d of %d. errno=%d",
1579 chunk, chunks, errno);
1580 chunks = chunk;
1581 SetEvent(main_event);
1582 break;
1583 }
1584 }
1585
1586 /* Wait on any of the threads to get signaled. */
1587 WaitForSingleObject(main_event, INFINITE);
1588
1589 /* Wait on all the waiting threads to exit. */
1590 for (chunk = 0; chunk < chunks; chunk++) {
1591 WaitForSingleObject(threads[chunk].thread, INFINITE);
1592 CloseHandle(threads[chunk].thread);
1593 }
1594
1595 CloseHandle(main_event);
1596 free(threads);
1597
1598
1599 const int ret = (int)InterlockedCompareExchange(&sig_index, -1, -1);
1600 return (ret >= 0) ? ret : (int)WAIT_FAILED;
1601}
1602
1429static EventLooperRec win32_looper; 1603static EventLooperRec win32_looper;
1430 1604
1431static void fdevent_init(void) 1605static void fdevent_init(void)
@@ -1494,7 +1668,7 @@ static void fdevent_process()
1494 { 1668 {
1495 looper->htab_count = 0; 1669 looper->htab_count = 0;
1496 1670
1497 for (hook = looper->hooks; hook; hook = hook->next) 1671 for (hook = looper->hooks; hook; hook = hook->next)
1498 { 1672 {
1499 if (hook->start && !hook->start(hook)) { 1673 if (hook->start && !hook->start(hook)) {
1500 D( "fdevent_process: error when starting a hook\n" ); 1674 D( "fdevent_process: error when starting a hook\n" );
@@ -1525,10 +1699,11 @@ static void fdevent_process()
1525 1699
1526 D( "adb_win32: waiting for %d events\n", looper->htab_count ); 1700 D( "adb_win32: waiting for %d events\n", looper->htab_count );
1527 if (looper->htab_count > MAXIMUM_WAIT_OBJECTS) { 1701 if (looper->htab_count > MAXIMUM_WAIT_OBJECTS) {
1528 D("handle count %d exceeds MAXIMUM_WAIT_OBJECTS, aborting!\n", looper->htab_count); 1702 D("handle count %d exceeds MAXIMUM_WAIT_OBJECTS.\n", looper->htab_count);
1529 abort(); 1703 wait_ret = _wait_for_all(looper->htab, looper->htab_count);
1704 } else {
1705 wait_ret = WaitForMultipleObjects( looper->htab_count, looper->htab, FALSE, INFINITE );
1530 } 1706 }
1531 wait_ret = WaitForMultipleObjects( looper->htab_count, looper->htab, FALSE, INFINITE );
1532 if (wait_ret == (int)WAIT_FAILED) { 1707 if (wait_ret == (int)WAIT_FAILED) {
1533 D( "adb_win32: wait failed, error %ld\n", GetLastError() ); 1708 D( "adb_win32: wait failed, error %ld\n", GetLastError() );
1534 } else { 1709 } else {
@@ -1669,7 +1844,7 @@ void fdevent_destroy(fdevent *fde)
1669 fdevent_remove(fde); 1844 fdevent_remove(fde);
1670} 1845}
1671 1846
1672void fdevent_install(fdevent *fde, int fd, fd_func func, void *arg) 1847void fdevent_install(fdevent *fde, int fd, fd_func func, void *arg)
1673{ 1848{
1674 memset(fde, 0, sizeof(fdevent)); 1849 memset(fde, 0, sizeof(fdevent));
1675 fde->state = FDE_ACTIVE; 1850 fde->state = FDE_ACTIVE;
@@ -1691,7 +1866,7 @@ void fdevent_remove(fdevent *fde)
1691 1866
1692 if(fde->state & FDE_ACTIVE) { 1867 if(fde->state & FDE_ACTIVE) {
1693 fdevent_disconnect(fde); 1868 fdevent_disconnect(fde);
1694 dump_fde(fde, "disconnect"); 1869 dump_fde(fde, "disconnect");
1695 fdevent_unregister(fde); 1870 fdevent_unregister(fde);
1696 } 1871 }
1697 1872
@@ -1917,7 +2092,7 @@ static void _event_socketpair_prepare( EventHook hook )
1917 if (hook->wanted & FDE_READ && rbip->can_read) 2092 if (hook->wanted & FDE_READ && rbip->can_read)
1918 hook->ready |= FDE_READ; 2093 hook->ready |= FDE_READ;
1919 2094
1920 if (hook->wanted & FDE_WRITE && wbip->can_write) 2095 if (hook->wanted & FDE_WRITE && wbip->can_write)
1921 hook->ready |= FDE_WRITE; 2096 hook->ready |= FDE_WRITE;
1922 } 2097 }
1923 2098
@@ -1938,7 +2113,7 @@ static void _event_socketpair_prepare( EventHook hook )
1938 D("_event_socketpair_start: can't handle FDE_READ+FDE_WRITE\n" ); 2113 D("_event_socketpair_start: can't handle FDE_READ+FDE_WRITE\n" );
1939 return 0; 2114 return 0;
1940 } 2115 }
1941 D( "_event_socketpair_start: hook %s for %x wanted=%x\n", 2116 D( "_event_socketpair_start: hook %s for %x wanted=%x\n",
1942 hook->fh->name, _fh_to_int(fh), hook->wanted); 2117 hook->fh->name, _fh_to_int(fh), hook->wanted);
1943 return 1; 2118 return 1;
1944} 2119}