1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
35 /* Handle HCI Event packets */
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
50 clear_bit(HCI_INQUIRY, &hdev->flags);
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
56 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
58 hci_conn_check_pending(hdev);
59 }
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
67 if (status)
68 return;
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
79 if (status)
80 return;
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
84 hci_conn_check_pending(hdev);
85 }
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
100 if (rp->status)
101 return;
103 hci_dev_lock(hdev);
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
113 hci_dev_unlock(hdev);
114 }
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123 if (rp->status)
124 return;
126 hci_dev_lock(hdev);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
132 hci_dev_unlock(hdev);
133 }
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
143 if (rp->status)
144 return;
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
150 hci_dev_lock(hdev);
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
156 hci_dev_unlock(hdev);
157 }
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
166 if (rp->status)
167 return;
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
187 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
188 }
190 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 {
192 __u8 status = *((__u8 *) skb->data);
194 BT_DBG("%s status 0x%2.2x", hdev->name, status);
196 clear_bit(HCI_RESET, &hdev->flags);
198 hci_req_complete(hdev, HCI_OP_RESET, status);
200 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
202 BIT(HCI_PERIODIC_INQ));
204 hdev->discovery.state = DISCOVERY_STOPPED;
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210 }
212 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 __u8 status = *((__u8 *) skb->data);
215 void *sent;
217 BT_DBG("%s status 0x%2.2x", hdev->name, status);
219 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (!sent)
221 return;
223 hci_dev_lock(hdev);
225 if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 mgmt_set_local_name_complete(hdev, sent, status);
227 else if (!status)
228 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
230 hci_dev_unlock(hdev);
232 if (!status && !test_bit(HCI_INIT, &hdev->flags))
233 hci_update_ad(hdev);
235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
236 }
238 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239 {
240 struct hci_rp_read_local_name *rp = (void *) skb->data;
242 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
244 if (rp->status)
245 return;
247 if (test_bit(HCI_SETUP, &hdev->dev_flags))
248 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249 }
251 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252 {
253 __u8 status = *((__u8 *) skb->data);
254 void *sent;
256 BT_DBG("%s status 0x%2.2x", hdev->name, status);
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 if (!sent)
260 return;
262 if (!status) {
263 __u8 param = *((__u8 *) sent);
265 if (param == AUTH_ENABLED)
266 set_bit(HCI_AUTH, &hdev->flags);
267 else
268 clear_bit(HCI_AUTH, &hdev->flags);
269 }
271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status);
274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
275 }
277 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
278 {
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
288 if (!status) {
289 __u8 param = *((__u8 *) sent);
291 if (param)
292 set_bit(HCI_ENCRYPT, &hdev->flags);
293 else
294 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 }
297 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
298 }
300 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 __u8 param, status = *((__u8 *) skb->data);
303 int old_pscan, old_iscan;
304 void *sent;
306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent)
310 return;
312 param = *((__u8 *) sent);
314 hci_dev_lock(hdev);
316 if (status) {
317 mgmt_write_scan_failed(hdev, param, status);
318 hdev->discov_timeout = 0;
319 goto done;
320 }
322 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
323 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
325 if (param & SCAN_INQUIRY) {
326 set_bit(HCI_ISCAN, &hdev->flags);
327 if (!old_iscan)
328 mgmt_discoverable(hdev, 1);
329 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to);
333 }
334 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0);
337 if (param & SCAN_PAGE) {
338 set_bit(HCI_PSCAN, &hdev->flags);
339 if (!old_pscan)
340 mgmt_connectable(hdev, 1);
341 } else if (old_pscan)
342 mgmt_connectable(hdev, 0);
344 done:
345 hci_dev_unlock(hdev);
346 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
347 }
349 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
353 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
355 if (rp->status)
356 return;
358 memcpy(hdev->dev_class, rp->dev_class, 3);
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362 }
364 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
365 {
366 __u8 status = *((__u8 *) skb->data);
367 void *sent;
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 if (!sent)
373 return;
375 hci_dev_lock(hdev);
377 if (status == 0)
378 memcpy(hdev->dev_class, sent, 3);
380 if (test_bit(HCI_MGMT, &hdev->dev_flags))
381 mgmt_set_class_of_dev_complete(hdev, sent, status);
383 hci_dev_unlock(hdev);
384 }
386 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387 {
388 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 __u16 setting;
391 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
393 if (rp->status)
394 return;
396 setting = __le16_to_cpu(rp->voice_setting);
398 if (hdev->voice_setting == setting)
399 return;
401 hdev->voice_setting = setting;
403 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
405 if (hdev->notify)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407 }
409 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
410 struct sk_buff *skb)
411 {
412 __u8 status = *((__u8 *) skb->data);
413 __u16 setting;
414 void *sent;
416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
418 if (status)
419 return;
421 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
422 if (!sent)
423 return;
425 setting = get_unaligned_le16(sent);
427 if (hdev->voice_setting == setting)
428 return;
430 hdev->voice_setting = setting;
432 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
434 if (hdev->notify)
435 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 }
438 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 __u8 status = *((__u8 *) skb->data);
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
445 }
447 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448 {
449 __u8 status = *((__u8 *) skb->data);
450 struct hci_cp_write_ssp_mode *sent;
452 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
455 if (!sent)
456 return;
458 if (!status) {
459 if (sent->mode)
460 hdev->host_features[0] |= LMP_HOST_SSP;
461 else
462 hdev->host_features[0] &= ~LMP_HOST_SSP;
463 }
465 if (test_bit(HCI_MGMT, &hdev->dev_flags))
466 mgmt_ssp_enable_complete(hdev, sent->mode, status);
467 else if (!status) {
468 if (sent->mode)
469 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
470 else
471 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
472 }
473 }
475 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
476 {
477 if (lmp_ext_inq_capable(hdev))
478 return 2;
480 if (lmp_inq_rssi_capable(hdev))
481 return 1;
483 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
484 hdev->lmp_subver == 0x0757)
485 return 1;
487 if (hdev->manufacturer == 15) {
488 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
489 return 1;
490 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
491 return 1;
492 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
493 return 1;
494 }
496 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
497 hdev->lmp_subver == 0x1805)
498 return 1;
500 return 0;
501 }
503 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
504 {
505 u8 mode;
507 mode = hci_get_inquiry_mode(hdev);
509 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
510 }
512 static void hci_setup_event_mask(struct hci_dev *hdev)
513 {
514 /* The second byte is 0xff instead of 0x9f (two reserved bits
515 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
516 * command otherwise */
517 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
519 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
520 * any event mask for pre 1.2 devices */
521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 return;
524 if (lmp_bredr_capable(hdev)) {
525 events[4] |= 0x01; /* Flow Specification Complete */
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
530 }
532 if (lmp_inq_rssi_capable(hdev))
533 events[4] |= 0x02; /* Inquiry Result with RSSI */
535 if (lmp_sniffsubr_capable(hdev))
536 events[5] |= 0x20; /* Sniff Subrating */
538 if (lmp_pause_enc_capable(hdev))
539 events[5] |= 0x80; /* Encryption Key Refresh Complete */
541 if (lmp_ext_inq_capable(hdev))
542 events[5] |= 0x40; /* Extended Inquiry Result */
544 if (lmp_no_flush_capable(hdev))
545 events[7] |= 0x01; /* Enhanced Flush Complete */
547 if (lmp_lsto_capable(hdev))
548 events[6] |= 0x80; /* Link Supervision Timeout Changed */
550 if (lmp_ssp_capable(hdev)) {
551 events[6] |= 0x01; /* IO Capability Request */
552 events[6] |= 0x02; /* IO Capability Response */
553 events[6] |= 0x04; /* User Confirmation Request */
554 events[6] |= 0x08; /* User Passkey Request */
555 events[6] |= 0x10; /* Remote OOB Data Request */
556 events[6] |= 0x20; /* Simple Pairing Complete */
557 events[7] |= 0x04; /* User Passkey Notification */
558 events[7] |= 0x08; /* Keypress Notification */
559 events[7] |= 0x10; /* Remote Host Supported
560 * Features Notification */
561 }
563 if (lmp_le_capable(hdev))
564 events[7] |= 0x20; /* LE Meta-Event */
566 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
568 if (lmp_le_capable(hdev)) {
569 memset(events, 0, sizeof(events));
570 events[0] = 0x1f;
571 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
572 sizeof(events), events);
573 }
574 }
576 static void bredr_setup(struct hci_dev *hdev)
577 {
578 struct hci_cp_delete_stored_link_key cp;
579 __le16 param;
580 __u8 flt_type;
582 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
583 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
585 /* Read Class of Device */
586 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
588 /* Read Local Name */
589 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
591 /* Read Voice Setting */
592 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
594 /* Clear Event Filters */
595 flt_type = HCI_FLT_CLEAR_ALL;
596 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
598 /* Connection accept timeout ~20 secs */
599 param = __constant_cpu_to_le16(0x7d00);
600 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
602 bacpy(&cp.bdaddr, BDADDR_ANY);
603 cp.delete_all = 1;
604 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
605 }
607 static void le_setup(struct hci_dev *hdev)
608 {
609 /* Read LE Buffer Size */
610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
612 /* Read LE Advertising Channel TX Power */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
614 }
616 static void hci_setup(struct hci_dev *hdev)
617 {
618 if (hdev->dev_type != HCI_BREDR)
619 return;
621 /* Read BD Address */
622 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
624 if (lmp_bredr_capable(hdev))
625 bredr_setup(hdev);
627 if (lmp_le_capable(hdev))
628 le_setup(hdev);
630 hci_setup_event_mask(hdev);
632 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
633 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
635 if (lmp_ssp_capable(hdev)) {
636 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
637 u8 mode = 0x01;
638 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
639 sizeof(mode), &mode);
640 } else {
641 struct hci_cp_write_eir cp;
643 memset(hdev->eir, 0, sizeof(hdev->eir));
644 memset(&cp, 0, sizeof(cp));
646 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
647 }
648 }
650 if (lmp_inq_rssi_capable(hdev))
651 hci_setup_inquiry_mode(hdev);
653 if (lmp_inq_tx_pwr_capable(hdev))
654 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
656 if (lmp_ext_feat_capable(hdev)) {
657 struct hci_cp_read_local_ext_features cp;
659 cp.page = 0x01;
660 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
661 &cp);
662 }
664 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
665 u8 enable = 1;
666 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
667 &enable);
668 }
669 }
671 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
672 {
673 struct hci_rp_read_local_version *rp = (void *) skb->data;
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
677 if (rp->status)
678 goto done;
680 hdev->hci_ver = rp->hci_ver;
681 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
682 hdev->lmp_ver = rp->lmp_ver;
683 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
684 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
686 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
687 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
689 if (test_bit(HCI_INIT, &hdev->flags))
690 hci_setup(hdev);
692 done:
693 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
694 }
696 static void hci_setup_link_policy(struct hci_dev *hdev)
697 {
698 struct hci_cp_write_def_link_policy cp;
699 u16 link_policy = 0;
701 if (lmp_rswitch_capable(hdev))
702 link_policy |= HCI_LP_RSWITCH;
703 if (lmp_hold_capable(hdev))
704 link_policy |= HCI_LP_HOLD;
705 if (lmp_sniff_capable(hdev))
706 link_policy |= HCI_LP_SNIFF;
707 if (lmp_park_capable(hdev))
708 link_policy |= HCI_LP_PARK;
710 cp.policy = cpu_to_le16(link_policy);
711 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
712 }
714 static void hci_cc_read_local_commands(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 struct hci_rp_read_local_commands *rp = (void *) skb->data;
719 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
721 if (rp->status)
722 goto done;
724 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
726 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
727 hci_setup_link_policy(hdev);
729 done:
730 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
731 }
733 static void hci_cc_read_local_features(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_local_features *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
740 if (rp->status)
741 return;
743 memcpy(hdev->features, rp->features, 8);
745 /* Adjust default settings according to features
746 * supported by device. */
748 if (hdev->features[0] & LMP_3SLOT)
749 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
751 if (hdev->features[0] & LMP_5SLOT)
752 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
754 if (hdev->features[1] & LMP_HV2) {
755 hdev->pkt_type |= (HCI_HV2);
756 hdev->esco_type |= (ESCO_HV2);
757 }
759 if (hdev->features[1] & LMP_HV3) {
760 hdev->pkt_type |= (HCI_HV3);
761 hdev->esco_type |= (ESCO_HV3);
762 }
764 if (lmp_esco_capable(hdev))
765 hdev->esco_type |= (ESCO_EV3);
767 if (hdev->features[4] & LMP_EV4)
768 hdev->esco_type |= (ESCO_EV4);
770 if (hdev->features[4] & LMP_EV5)
771 hdev->esco_type |= (ESCO_EV5);
773 if (hdev->features[5] & LMP_EDR_ESCO_2M)
774 hdev->esco_type |= (ESCO_2EV3);
776 if (hdev->features[5] & LMP_EDR_ESCO_3M)
777 hdev->esco_type |= (ESCO_3EV3);
779 if (hdev->features[5] & LMP_EDR_3S_ESCO)
780 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
782 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
783 hdev->features[0], hdev->features[1],
784 hdev->features[2], hdev->features[3],
785 hdev->features[4], hdev->features[5],
786 hdev->features[6], hdev->features[7]);
787 }
789 static void hci_set_le_support(struct hci_dev *hdev)
790 {
791 struct hci_cp_write_le_host_supported cp;
793 memset(&cp, 0, sizeof(cp));
795 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
796 cp.le = 1;
797 cp.simul = lmp_le_br_capable(hdev);
798 }
800 if (cp.le != lmp_host_le_capable(hdev))
801 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
802 &cp);
803 }
805 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
806 struct sk_buff *skb)
807 {
808 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
812 if (rp->status)
813 goto done;
815 switch (rp->page) {
816 case 0:
817 memcpy(hdev->features, rp->features, 8);
818 break;
819 case 1:
820 memcpy(hdev->host_features, rp->features, 8);
821 break;
822 }
824 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
825 hci_set_le_support(hdev);
827 done:
828 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
829 }
831 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
832 struct sk_buff *skb)
833 {
834 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
836 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
838 if (rp->status)
839 return;
841 hdev->flow_ctl_mode = rp->mode;
843 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
844 }
846 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
847 {
848 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 if (rp->status)
853 return;
855 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
856 hdev->sco_mtu = rp->sco_mtu;
857 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
858 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
860 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
861 hdev->sco_mtu = 64;
862 hdev->sco_pkts = 8;
863 }
865 hdev->acl_cnt = hdev->acl_pkts;
866 hdev->sco_cnt = hdev->sco_pkts;
868 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
869 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
870 }
872 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
873 {
874 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
876 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878 if (!rp->status)
879 bacpy(&hdev->bdaddr, &rp->bdaddr);
881 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
882 }
884 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
885 struct sk_buff *skb)
886 {
887 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
891 if (rp->status)
892 return;
894 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
895 hdev->block_len = __le16_to_cpu(rp->block_len);
896 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
898 hdev->block_cnt = hdev->num_blocks;
900 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
901 hdev->block_cnt, hdev->block_len);
903 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
904 }
906 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 __u8 status = *((__u8 *) skb->data);
910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
912 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
913 }
915 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
916 struct sk_buff *skb)
917 {
918 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
920 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
922 if (rp->status)
923 goto a2mp_rsp;
925 hdev->amp_status = rp->amp_status;
926 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
927 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
928 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
929 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
930 hdev->amp_type = rp->amp_type;
931 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
932 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
933 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
934 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
936 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
938 a2mp_rsp:
939 a2mp_send_getinfo_rsp(hdev);
940 }
942 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
943 struct sk_buff *skb)
944 {
945 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
946 struct amp_assoc *assoc = &hdev->loc_assoc;
947 size_t rem_len, frag_len;
949 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951 if (rp->status)
952 goto a2mp_rsp;
954 frag_len = skb->len - sizeof(*rp);
955 rem_len = __le16_to_cpu(rp->rem_len);
957 if (rem_len > frag_len) {
958 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
960 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
961 assoc->offset += frag_len;
963 /* Read other fragments */
964 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
966 return;
967 }
969 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
970 assoc->len = assoc->offset + rem_len;
971 assoc->offset = 0;
973 a2mp_rsp:
974 /* Send A2MP Rsp when all fragments are received */
975 a2mp_send_getampassoc_rsp(hdev, rp->status);
976 a2mp_send_create_phy_link_req(hdev, rp->status);
977 }
979 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
980 struct sk_buff *skb)
981 {
982 __u8 status = *((__u8 *) skb->data);
984 BT_DBG("%s status 0x%2.2x", hdev->name, status);
986 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
987 }
989 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
990 {
991 __u8 status = *((__u8 *) skb->data);
993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
995 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
996 }
998 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
999 struct sk_buff *skb)
1000 {
1001 __u8 status = *((__u8 *) skb->data);
1003 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1005 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
1006 }
1008 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 if (!rp->status)
1016 hdev->inq_tx_power = rp->tx_power;
1018 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
1019 }
1021 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 __u8 status = *((__u8 *) skb->data);
1025 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1027 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
1028 }
1030 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031 {
1032 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1033 struct hci_cp_pin_code_reply *cp;
1034 struct hci_conn *conn;
1036 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1038 hci_dev_lock(hdev);
1040 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1041 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1043 if (rp->status)
1044 goto unlock;
1046 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1047 if (!cp)
1048 goto unlock;
1050 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1051 if (conn)
1052 conn->pin_length = cp->pin_len;
1054 unlock:
1055 hci_dev_unlock(hdev);
1056 }
1058 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1059 {
1060 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1062 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1064 hci_dev_lock(hdev);
1066 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1067 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1068 rp->status);
1070 hci_dev_unlock(hdev);
1071 }
1073 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1074 struct sk_buff *skb)
1075 {
1076 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1078 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1080 if (rp->status)
1081 return;
1083 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1084 hdev->le_pkts = rp->le_max_pkt;
1086 hdev->le_cnt = hdev->le_pkts;
1088 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1090 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1091 }
1093 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1094 struct sk_buff *skb)
1095 {
1096 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1098 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1100 if (!rp->status) {
1101 hdev->adv_tx_power = rp->tx_power;
1102 if (!test_bit(HCI_INIT, &hdev->flags))
1103 hci_update_ad(hdev);
1104 }
1106 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1107 }
1109 static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1110 {
1111 __u8 status = *((__u8 *) skb->data);
1113 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1115 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1116 }
1118 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1119 {
1120 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1124 hci_dev_lock(hdev);
1126 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1127 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1128 rp->status);
1130 hci_dev_unlock(hdev);
1131 }
1133 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1134 struct sk_buff *skb)
1135 {
1136 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1138 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1140 hci_dev_lock(hdev);
1142 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1143 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1144 ACL_LINK, 0, rp->status);
1146 hci_dev_unlock(hdev);
1147 }
1149 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1150 {
1151 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1153 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1155 hci_dev_lock(hdev);
1157 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1158 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1159 0, rp->status);
1161 hci_dev_unlock(hdev);
1162 }
1164 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1165 struct sk_buff *skb)
1166 {
1167 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1169 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1171 hci_dev_lock(hdev);
1173 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1174 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1175 ACL_LINK, 0, rp->status);
1177 hci_dev_unlock(hdev);
1178 }
1180 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1181 struct sk_buff *skb)
1182 {
1183 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1185 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1187 hci_dev_lock(hdev);
1188 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1189 rp->randomizer, rp->status);
1190 hci_dev_unlock(hdev);
1191 }
1193 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1194 {
1195 __u8 *sent, status = *((__u8 *) skb->data);
1197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1199 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1200 if (!sent)
1201 return;
1203 hci_dev_lock(hdev);
1205 if (!status) {
1206 if (*sent)
1207 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1208 else
1209 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1210 }
1212 hci_dev_unlock(hdev);
1214 if (!test_bit(HCI_INIT, &hdev->flags))
1215 hci_update_ad(hdev);
1217 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
1218 }
1220 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1221 {
1222 __u8 status = *((__u8 *) skb->data);
1224 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1226 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1228 if (status) {
1229 hci_dev_lock(hdev);
1230 mgmt_start_discovery_failed(hdev, status);
1231 hci_dev_unlock(hdev);
1232 return;
1233 }
1234 }
1236 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1237 struct sk_buff *skb)
1238 {
1239 struct hci_cp_le_set_scan_enable *cp;
1240 __u8 status = *((__u8 *) skb->data);
1242 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1244 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1245 if (!cp)
1246 return;
1248 switch (cp->enable) {
1249 case LE_SCANNING_ENABLED:
1250 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1252 if (status) {
1253 hci_dev_lock(hdev);
1254 mgmt_start_discovery_failed(hdev, status);
1255 hci_dev_unlock(hdev);
1256 return;
1257 }
1259 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1261 hci_dev_lock(hdev);
1262 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1263 hci_dev_unlock(hdev);
1264 break;
1266 case LE_SCANNING_DISABLED:
1267 if (status) {
1268 hci_dev_lock(hdev);
1269 mgmt_stop_discovery_failed(hdev, status);
1270 hci_dev_unlock(hdev);
1271 return;
1272 }
1274 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1276 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1277 hdev->discovery.state == DISCOVERY_FINDING) {
1278 mgmt_interleaved_discovery(hdev);
1279 } else {
1280 hci_dev_lock(hdev);
1281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1282 hci_dev_unlock(hdev);
1283 }
1285 break;
1287 default:
1288 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1289 break;
1290 }
1291 }
1293 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1294 {
1295 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1297 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1299 if (rp->status)
1300 return;
1302 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1303 }
1305 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1306 {
1307 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1309 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1311 if (rp->status)
1312 return;
1314 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1315 }
1317 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1318 struct sk_buff *skb)
1319 {
1320 struct hci_cp_write_le_host_supported *sent;
1321 __u8 status = *((__u8 *) skb->data);
1323 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1325 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1326 if (!sent)
1327 return;
1329 if (!status) {
1330 if (sent->le)
1331 hdev->host_features[0] |= LMP_HOST_LE;
1332 else
1333 hdev->host_features[0] &= ~LMP_HOST_LE;
1335 if (sent->simul)
1336 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1337 else
1338 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1339 }
1341 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1342 !test_bit(HCI_INIT, &hdev->flags))
1343 mgmt_le_enable_complete(hdev, sent->le, status);
1345 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1346 }
1348 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1349 struct sk_buff *skb)
1350 {
1351 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1353 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1354 hdev->name, rp->status, rp->phy_handle);
1356 if (rp->status)
1357 return;
1359 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1360 }
1362 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1363 {
1364 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1366 if (status) {
1367 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1368 hci_conn_check_pending(hdev);
1369 hci_dev_lock(hdev);
1370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1371 mgmt_start_discovery_failed(hdev, status);
1372 hci_dev_unlock(hdev);
1373 return;
1374 }
1376 set_bit(HCI_INQUIRY, &hdev->flags);
1378 hci_dev_lock(hdev);
1379 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1380 hci_dev_unlock(hdev);
1381 }
1383 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1384 {
1385 struct hci_cp_create_conn *cp;
1386 struct hci_conn *conn;
1388 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1390 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1391 if (!cp)
1392 return;
1394 hci_dev_lock(hdev);
1396 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1398 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1400 if (status) {
1401 if (conn && conn->state == BT_CONNECT) {
1402 if (status != 0x0c || conn->attempt > 2) {
1403 conn->state = BT_CLOSED;
1404 hci_proto_connect_cfm(conn, status);
1405 hci_conn_del(conn);
1406 } else
1407 conn->state = BT_CONNECT2;
1408 }
1409 } else {
1410 if (!conn) {
1411 conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
1412 if (conn) {
1413 conn->out = true;
1414 conn->link_mode |= HCI_LM_MASTER;
1415 } else
1416 BT_ERR("No memory for new connection");
1417 }
1418 }
1420 hci_dev_unlock(hdev);
1421 }
1423 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1424 {
1425 struct hci_cp_add_sco *cp;
1426 struct hci_conn *acl, *sco;
1427 __u16 handle;
1429 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1431 if (!status)
1432 return;
1434 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1435 if (!cp)
1436 return;
1438 handle = __le16_to_cpu(cp->handle);
1440 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1442 hci_dev_lock(hdev);
1444 acl = hci_conn_hash_lookup_handle(hdev, handle);
1445 if (acl) {
1446 sco = acl->link;
1447 if (sco) {
1448 sco->state = BT_CLOSED;
1450 hci_proto_connect_cfm(sco, status);
1451 hci_conn_del(sco);
1452 }
1453 }
1455 hci_dev_unlock(hdev);
1456 }
1458 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1459 {
1460 struct hci_cp_auth_requested *cp;
1461 struct hci_conn *conn;
1463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1465 if (!status)
1466 return;
1468 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1469 if (!cp)
1470 return;
1472 hci_dev_lock(hdev);
1474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1475 if (conn) {
1476 if (conn->state == BT_CONFIG) {
1477 hci_proto_connect_cfm(conn, status);
1478 hci_conn_put(conn);
1479 }
1480 }
1482 hci_dev_unlock(hdev);
1483 }
1485 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1486 {
1487 struct hci_cp_set_conn_encrypt *cp;
1488 struct hci_conn *conn;
1490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1492 if (!status)
1493 return;
1495 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1496 if (!cp)
1497 return;
1499 hci_dev_lock(hdev);
1501 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1502 if (conn) {
1503 if (conn->state == BT_CONFIG) {
1504 hci_proto_connect_cfm(conn, status);
1505 hci_conn_put(conn);
1506 }
1507 }
1509 hci_dev_unlock(hdev);
1510 }
1512 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1513 struct hci_conn *conn)
1514 {
1515 if (conn->state != BT_CONFIG || !conn->out)
1516 return 0;
1518 if (conn->pending_sec_level == BT_SECURITY_SDP)
1519 return 0;
1521 /* Only request authentication for SSP connections or non-SSP
1522 * devices with sec_level HIGH or if MITM protection is requested */
1523 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1524 conn->pending_sec_level != BT_SECURITY_HIGH)
1525 return 0;
1527 return 1;
1528 }
1530 static int hci_resolve_name(struct hci_dev *hdev,
1531 struct inquiry_entry *e)
1532 {
1533 struct hci_cp_remote_name_req cp;
1535 memset(&cp, 0, sizeof(cp));
1537 bacpy(&cp.bdaddr, &e->data.bdaddr);
1538 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1539 cp.pscan_mode = e->data.pscan_mode;
1540 cp.clock_offset = e->data.clock_offset;
1542 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1543 }
1545 static bool hci_resolve_next_name(struct hci_dev *hdev)
1546 {
1547 struct discovery_state *discov = &hdev->discovery;
1548 struct inquiry_entry *e;
1550 if (list_empty(&discov->resolve))
1551 return false;
1553 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1554 if (!e)
1555 return false;
1557 if (hci_resolve_name(hdev, e) == 0) {
1558 e->name_state = NAME_PENDING;
1559 return true;
1560 }
1562 return false;
1563 }
1565 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1566 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1567 {
1568 struct discovery_state *discov = &hdev->discovery;
1569 struct inquiry_entry *e;
1571 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1572 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1573 name_len, conn->dev_class);
1575 if (discov->state == DISCOVERY_STOPPED)
1576 return;
1578 if (discov->state == DISCOVERY_STOPPING)
1579 goto discov_complete;
1581 if (discov->state != DISCOVERY_RESOLVING)
1582 return;
1584 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1585 /* If the device was not found in a list of found devices names of which
1586 * are pending. there is no need to continue resolving a next name as it
1587 * will be done upon receiving another Remote Name Request Complete
1588 * Event */
1589 if (!e)
1590 return;
1592 list_del(&e->list);
1593 if (name) {
1594 e->name_state = NAME_KNOWN;
1595 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1596 e->data.rssi, name, name_len);
1597 } else {
1598 e->name_state = NAME_NOT_KNOWN;
1599 }
1601 if (hci_resolve_next_name(hdev))
1602 return;
1604 discov_complete:
1605 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1606 }
1608 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1609 {
1610 struct hci_cp_remote_name_req *cp;
1611 struct hci_conn *conn;
1613 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1615 /* If successful wait for the name req complete event before
1616 * checking for the need to do authentication */
1617 if (!status)
1618 return;
1620 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1621 if (!cp)
1622 return;
1624 hci_dev_lock(hdev);
1626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1628 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1629 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1631 if (!conn)
1632 goto unlock;
1634 if (!hci_outgoing_auth_needed(hdev, conn))
1635 goto unlock;
1637 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1638 struct hci_cp_auth_requested cp;
1639 cp.handle = __cpu_to_le16(conn->handle);
1640 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1641 }
1643 unlock:
1644 hci_dev_unlock(hdev);
1645 }
1647 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1648 {
1649 struct hci_cp_read_remote_features *cp;
1650 struct hci_conn *conn;
1652 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1654 if (!status)
1655 return;
1657 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1658 if (!cp)
1659 return;
1661 hci_dev_lock(hdev);
1663 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1664 if (conn) {
1665 if (conn->state == BT_CONFIG) {
1666 hci_proto_connect_cfm(conn, status);
1667 hci_conn_put(conn);
1668 }
1669 }
1671 hci_dev_unlock(hdev);
1672 }
1674 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1675 {
1676 struct hci_cp_read_remote_ext_features *cp;
1677 struct hci_conn *conn;
1679 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1681 if (!status)
1682 return;
1684 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1685 if (!cp)
1686 return;
1688 hci_dev_lock(hdev);
1690 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1691 if (conn) {
1692 if (conn->state == BT_CONFIG) {
1693 hci_proto_connect_cfm(conn, status);
1694 hci_conn_put(conn);
1695 }
1696 }
1698 hci_dev_unlock(hdev);
1699 }
1701 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1702 {
1703 struct hci_cp_setup_sync_conn *cp;
1704 struct hci_conn *acl, *sco;
1705 __u16 handle;
1707 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1709 if (!status)
1710 return;
1712 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1713 if (!cp)
1714 return;
1716 handle = __le16_to_cpu(cp->handle);
1718 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1720 hci_dev_lock(hdev);
1722 acl = hci_conn_hash_lookup_handle(hdev, handle);
1723 if (acl) {
1724 sco = acl->link;
1725 if (sco) {
1726 sco->state = BT_CLOSED;
1728 hci_proto_connect_cfm(sco, status);
1729 hci_conn_del(sco);
1730 }
1731 }
1733 hci_dev_unlock(hdev);
1734 }
1736 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1737 {
1738 struct hci_cp_sniff_mode *cp;
1739 struct hci_conn *conn;
1741 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1743 if (!status)
1744 return;
1746 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1747 if (!cp)
1748 return;
1750 hci_dev_lock(hdev);
1752 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1753 if (conn) {
1754 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1756 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1757 hci_sco_setup(conn, status);
1758 }
1760 hci_dev_unlock(hdev);
1761 }
1763 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1764 {
1765 struct hci_cp_exit_sniff_mode *cp;
1766 struct hci_conn *conn;
1768 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1770 if (!status)
1771 return;
1773 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1774 if (!cp)
1775 return;
1777 hci_dev_lock(hdev);
1779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1780 if (conn) {
1781 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1783 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1784 hci_sco_setup(conn, status);
1785 }
1787 hci_dev_unlock(hdev);
1788 }
1790 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1791 {
1792 struct hci_cp_disconnect *cp;
1793 struct hci_conn *conn;
1795 if (!status)
1796 return;
1798 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1799 if (!cp)
1800 return;
1802 hci_dev_lock(hdev);
1804 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1805 if (conn)
1806 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1807 conn->dst_type, status);
1809 hci_dev_unlock(hdev);
1810 }
1812 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1813 {
1814 struct hci_conn *conn;
1816 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1818 if (status) {
1819 hci_dev_lock(hdev);
1821 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1822 if (!conn) {
1823 hci_dev_unlock(hdev);
1824 return;
1825 }
1827 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1829 conn->state = BT_CLOSED;
1830 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1831 conn->dst_type, status);
1832 hci_proto_connect_cfm(conn, status);
1833 hci_conn_del(conn);
1835 hci_dev_unlock(hdev);
1836 }
1837 }
1839 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1840 {
1841 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1842 }
1844 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1845 {
1846 struct hci_cp_create_phy_link *cp;
1848 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1851 if (!cp)
1852 return;
1854 hci_dev_lock(hdev);
1856 if (status) {
1857 struct hci_conn *hcon;
1859 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1860 if (hcon)
1861 hci_conn_del(hcon);
1862 } else {
1863 amp_write_remote_assoc(hdev, cp->phy_handle);
1864 }
1866 hci_dev_unlock(hdev);
1867 }
1869 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1870 {
1871 struct hci_cp_accept_phy_link *cp;
1873 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1875 if (status)
1876 return;
1878 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1879 if (!cp)
1880 return;
1882 amp_write_remote_assoc(hdev, cp->phy_handle);
1883 }
1885 static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
1886 {
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888 }
1890 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1891 {
1892 __u8 status = *((__u8 *) skb->data);
1893 struct discovery_state *discov = &hdev->discovery;
1894 struct inquiry_entry *e;
1896 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1898 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1900 hci_conn_check_pending(hdev);
1902 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1903 return;
1905 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1906 return;
1908 hci_dev_lock(hdev);
1910 if (discov->state != DISCOVERY_FINDING)
1911 goto unlock;
1913 if (list_empty(&discov->resolve)) {
1914 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1915 goto unlock;
1916 }
1918 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1919 if (e && hci_resolve_name(hdev, e) == 0) {
1920 e->name_state = NAME_PENDING;
1921 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1922 } else {
1923 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1924 }
1926 unlock:
1927 hci_dev_unlock(hdev);
1928 }
1930 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1931 {
1932 struct inquiry_data data;
1933 struct inquiry_info *info = (void *) (skb->data + 1);
1934 int num_rsp = *((__u8 *) skb->data);
1936 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1938 if (!num_rsp)
1939 return;
1941 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1942 return;
1944 hci_dev_lock(hdev);
1946 for (; num_rsp; num_rsp--, info++) {
1947 bool name_known, ssp;
1949 bacpy(&data.bdaddr, &info->bdaddr);
1950 data.pscan_rep_mode = info->pscan_rep_mode;
1951 data.pscan_period_mode = info->pscan_period_mode;
1952 data.pscan_mode = info->pscan_mode;
1953 memcpy(data.dev_class, info->dev_class, 3);
1954 data.clock_offset = info->clock_offset;
1955 data.rssi = 0x00;
1956 data.ssp_mode = 0x00;
1958 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1959 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1960 info->dev_class, 0, !name_known, ssp, NULL,
1961 0);
1962 }
1964 hci_dev_unlock(hdev);
1965 }
1967 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1968 {
1969 struct hci_ev_conn_complete *ev = (void *) skb->data;
1970 struct hci_conn *conn;
1972 BT_DBG("%s", hdev->name);
1974 hci_dev_lock(hdev);
1976 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1977 if (!conn) {
1978 if (ev->link_type != SCO_LINK)
1979 goto unlock;
1981 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1982 if (!conn)
1983 goto unlock;
1985 conn->type = SCO_LINK;
1986 }
1988 if (!ev->status) {
1989 conn->handle = __le16_to_cpu(ev->handle);
1991 if (conn->type == ACL_LINK) {
1992 conn->state = BT_CONFIG;
1993 hci_conn_hold(conn);
1995 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1996 !hci_find_link_key(hdev, &ev->bdaddr))
1997 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1998 else
1999 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2000 } else
2001 conn->state = BT_CONNECTED;
2003 hci_conn_hold_device(conn);
2004 hci_conn_add_sysfs(conn);
2006 if (test_bit(HCI_AUTH, &hdev->flags))
2007 conn->link_mode |= HCI_LM_AUTH;
2009 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2010 conn->link_mode |= HCI_LM_ENCRYPT;
2012 /* Get remote features */
2013 if (conn->type == ACL_LINK) {
2014 struct hci_cp_read_remote_features cp;
2015 cp.handle = ev->handle;
2016 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2017 sizeof(cp), &cp);
2018 }
2020 /* Set packet type for incoming connection */
2021 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2022 struct hci_cp_change_conn_ptype cp;
2023 cp.handle = ev->handle;
2024 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2025 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2026 &cp);
2027 }
2028 } else {
2029 conn->state = BT_CLOSED;
2030 if (conn->type == ACL_LINK)
2031 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
2032 conn->dst_type, ev->status);
2033 }
2035 if (conn->type == ACL_LINK)
2036 hci_sco_setup(conn, ev->status);
2038 if (ev->status) {
2039 hci_proto_connect_cfm(conn, ev->status);
2040 hci_conn_del(conn);
2041 } else if (ev->link_type != ACL_LINK)
2042 hci_proto_connect_cfm(conn, ev->status);
2044 unlock:
2045 hci_dev_unlock(hdev);
2047 hci_conn_check_pending(hdev);
2048 }
2050 void hci_conn_accept(struct hci_conn *conn, int mask)
2051 {
2052 struct hci_dev *hdev = conn->hdev;
2054 BT_DBG("conn %p", conn);
2056 conn->state = BT_CONFIG;
2058 if (!lmp_esco_capable(hdev)) {
2059 struct hci_cp_accept_conn_req cp;
2061 bacpy(&cp.bdaddr, &conn->dst);
2063 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2064 cp.role = 0x00; /* Become master */
2065 else
2066 cp.role = 0x01; /* Remain slave */
2068 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2069 } else /* lmp_esco_capable(hdev)) */ {
2070 struct hci_cp_accept_sync_conn_req cp;
2072 bacpy(&cp.bdaddr, &conn->dst);
2073 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2075 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2076 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2077 cp.max_latency = __constant_cpu_to_le16(0xffff);
2078 cp.content_format = cpu_to_le16(hdev->voice_setting);
2079 cp.retrans_effort = 0xff;
2081 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2082 sizeof(cp), &cp);
2083 }
2084 }
2086 static inline bool is_sco_active(struct hci_dev *hdev)
2087 {
2088 if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
2089 (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
2090 BT_CONNECTED)))
2091 return true;
2092 return false;
2093 }
2095 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2096 {
2097 struct hci_ev_conn_request *ev = (void *) skb->data;
2098 int mask = hdev->link_mode;
2099 __u8 flags = 0;
2101 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2102 ev->link_type);
2104 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2105 &flags);
2107 if ((mask & HCI_LM_ACCEPT) &&
2108 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
2109 /* Connection accepted */
2110 struct inquiry_entry *ie;
2111 struct hci_conn *conn;
2113 hci_dev_lock(hdev);
2115 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2116 if (ie)
2117 memcpy(ie->data.dev_class, ev->dev_class, 3);
2119 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2120 &ev->bdaddr);
2121 if (!conn) {
2122 /* pkt_type not yet used for incoming connections */
2123 conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
2124 if (!conn) {
2125 BT_ERR("No memory for new connection");
2126 hci_dev_unlock(hdev);
2127 return;
2128 }
2129 }
2131 memcpy(conn->dev_class, ev->dev_class, 3);
2133 hci_dev_unlock(hdev);
2135 if (ev->link_type == ACL_LINK ||
2136 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2137 struct hci_cp_accept_conn_req cp;
2138 conn->state = BT_CONNECT;
2140 bacpy(&cp.bdaddr, &ev->bdaddr);
2142 if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
2143 || is_sco_active(hdev)))
2144 cp.role = 0x00; /* Become master */
2145 else
2146 cp.role = 0x01; /* Remain slave */
2148 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2149 &cp);
2150 } else if (!(flags & HCI_PROTO_DEFER)) {
2151 struct hci_cp_accept_sync_conn_req cp;
2152 conn->state = BT_CONNECT;
2154 bacpy(&cp.bdaddr, &ev->bdaddr);
2155 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2157 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2158 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2159 cp.max_latency = __constant_cpu_to_le16(0xffff);
2160 cp.content_format = cpu_to_le16(hdev->voice_setting);
2161 cp.retrans_effort = 0xff;
2163 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2164 sizeof(cp), &cp);
2165 } else {
2166 conn->state = BT_CONNECT2;
2167 hci_proto_connect_cfm(conn, 0);
2168 hci_conn_put(conn);
2169 }
2170 } else {
2171 /* Connection rejected */
2172 struct hci_cp_reject_conn_req cp;
2174 bacpy(&cp.bdaddr, &ev->bdaddr);
2175 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2176 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2177 }
2178 }
2180 static u8 hci_to_mgmt_reason(u8 err)
2181 {
2182 switch (err) {
2183 case HCI_ERROR_CONNECTION_TIMEOUT:
2184 return MGMT_DEV_DISCONN_TIMEOUT;
2185 case HCI_ERROR_REMOTE_USER_TERM:
2186 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2187 case HCI_ERROR_REMOTE_POWER_OFF:
2188 return MGMT_DEV_DISCONN_REMOTE;
2189 case HCI_ERROR_LOCAL_HOST_TERM:
2190 return MGMT_DEV_DISCONN_LOCAL_HOST;
2191 default:
2192 return MGMT_DEV_DISCONN_UNKNOWN;
2193 }
2194 }
2196 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2197 {
2198 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2199 struct hci_conn *conn;
2201 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2203 hci_dev_lock(hdev);
2205 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2206 if (!conn)
2207 goto unlock;
2209 if (ev->status == 0)
2210 conn->state = BT_CLOSED;
2212 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
2213 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
2214 if (ev->status) {
2215 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2216 conn->dst_type, ev->status);
2217 } else {
2218 u8 reason = hci_to_mgmt_reason(ev->reason);
2220 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
2221 conn->dst_type, reason);
2222 }
2223 }
2225 if (ev->status == 0) {
2226 if (conn->type == ACL_LINK && conn->flush_key)
2227 hci_remove_link_key(hdev, &conn->dst);
2228 hci_proto_disconn_cfm(conn, ev->reason);
2229 hci_conn_del(conn);
2230 }
2232 unlock:
2233 hci_dev_unlock(hdev);
2234 }
2236 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2237 {
2238 struct hci_ev_auth_complete *ev = (void *) skb->data;
2239 struct hci_conn *conn;
2241 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2243 hci_dev_lock(hdev);
2245 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2246 if (!conn)
2247 goto unlock;
2249 if (!ev->status) {
2250 if (!hci_conn_ssp_enabled(conn) &&
2251 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2252 BT_INFO("re-auth of legacy device is not possible.");
2253 } else {
2254 conn->link_mode |= HCI_LM_AUTH;
2255 conn->sec_level = conn->pending_sec_level;
2256 }
2257 } else {
2258 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2259 ev->status);
2260 }
2262 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2263 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2265 if (conn->state == BT_CONFIG) {
2266 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2267 struct hci_cp_set_conn_encrypt cp;
2268 cp.handle = ev->handle;
2269 cp.encrypt = 0x01;
2270 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2271 &cp);
2272 } else {
2273 conn->state = BT_CONNECTED;
2274 hci_proto_connect_cfm(conn, ev->status);
2275 hci_conn_put(conn);
2276 }
2277 } else {
2278 hci_auth_cfm(conn, ev->status);
2280 hci_conn_hold(conn);
2281 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2282 hci_conn_put(conn);
2283 }
2285 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2286 if (!ev->status) {
2287 struct hci_cp_set_conn_encrypt cp;
2288 cp.handle = ev->handle;
2289 cp.encrypt = 0x01;
2290 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2291 &cp);
2292 } else {
2293 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2294 hci_encrypt_cfm(conn, ev->status, 0x00);
2295 }
2296 }
2298 unlock:
2299 hci_dev_unlock(hdev);
2300 }
2302 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2303 {
2304 struct hci_ev_remote_name *ev = (void *) skb->data;
2305 struct hci_conn *conn;
2307 BT_DBG("%s", hdev->name);
2309 hci_conn_check_pending(hdev);
2311 hci_dev_lock(hdev);
2313 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2315 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2316 goto check_auth;
2318 if (ev->status == 0)
2319 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2320 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2321 else
2322 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2324 check_auth:
2325 if (!conn)
2326 goto unlock;
2328 if (!hci_outgoing_auth_needed(hdev, conn))
2329 goto unlock;
2331 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2332 struct hci_cp_auth_requested cp;
2333 cp.handle = __cpu_to_le16(conn->handle);
2334 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2335 }
2337 unlock:
2338 hci_dev_unlock(hdev);
2339 }
2341 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2342 {
2343 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2344 struct hci_conn *conn;
2346 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2348 hci_dev_lock(hdev);
2350 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2351 if (conn) {
2352 if (!ev->status) {
2353 if (ev->encrypt) {
2354 /* Encryption implies authentication */
2355 conn->link_mode |= HCI_LM_AUTH;
2356 conn->link_mode |= HCI_LM_ENCRYPT;
2357 conn->sec_level = conn->pending_sec_level;
2358 } else
2359 conn->link_mode &= ~HCI_LM_ENCRYPT;
2360 }
2362 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2364 if (ev->status && conn->state == BT_CONNECTED) {
2365 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2366 hci_conn_put(conn);
2367 goto unlock;
2368 }
2370 if (conn->state == BT_CONFIG) {
2371 if (!ev->status)
2372 conn->state = BT_CONNECTED;
2374 hci_proto_connect_cfm(conn, ev->status);
2375 hci_conn_put(conn);
2376 } else
2377 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2378 }
2380 unlock:
2381 hci_dev_unlock(hdev);
2382 }
2384 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2385 struct sk_buff *skb)
2386 {
2387 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2388 struct hci_conn *conn;
2390 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2392 hci_dev_lock(hdev);
2394 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2395 if (conn) {
2396 if (!ev->status)
2397 conn->link_mode |= HCI_LM_SECURE;
2399 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2401 hci_key_change_cfm(conn, ev->status);
2402 }
2404 hci_dev_unlock(hdev);
2405 }
2407 static void hci_remote_features_evt(struct hci_dev *hdev,
2408 struct sk_buff *skb)
2409 {
2410 struct hci_ev_remote_features *ev = (void *) skb->data;
2411 struct hci_conn *conn;
2413 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2415 hci_dev_lock(hdev);
2417 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2418 if (!conn)
2419 goto unlock;
2421 if (!ev->status)
2422 memcpy(conn->features, ev->features, 8);
2424 if (conn->state != BT_CONFIG)
2425 goto unlock;
2427 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2428 struct hci_cp_read_remote_ext_features cp;
2429 cp.handle = ev->handle;
2430 cp.page = 0x01;
2431 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2432 sizeof(cp), &cp);
2433 goto unlock;
2434 }
2436 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2437 struct hci_cp_remote_name_req cp;
2438 memset(&cp, 0, sizeof(cp));
2439 bacpy(&cp.bdaddr, &conn->dst);
2440 cp.pscan_rep_mode = 0x02;
2441 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2442 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2443 mgmt_device_connected(hdev, &conn->dst, conn->type,
2444 conn->dst_type, 0, NULL, 0,
2445 conn->dev_class);
2447 if (!hci_outgoing_auth_needed(hdev, conn)) {
2448 conn->state = BT_CONNECTED;
2449 hci_proto_connect_cfm(conn, ev->status);
2450 hci_conn_put(conn);
2451 }
2453 unlock:
2454 hci_dev_unlock(hdev);
2455 }
2457 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2458 {
2459 BT_DBG("%s", hdev->name);
2460 }
2462 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2463 struct sk_buff *skb)
2464 {
2465 BT_DBG("%s", hdev->name);
2466 }
2468 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469 {
2470 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2471 __u16 opcode;
2473 skb_pull(skb, sizeof(*ev));
2475 opcode = __le16_to_cpu(ev->opcode);
2477 switch (opcode) {
2478 case HCI_OP_INQUIRY_CANCEL:
2479 hci_cc_inquiry_cancel(hdev, skb);
2480 break;
2482 case HCI_OP_PERIODIC_INQ:
2483 hci_cc_periodic_inq(hdev, skb);
2484 break;
2486 case HCI_OP_EXIT_PERIODIC_INQ:
2487 hci_cc_exit_periodic_inq(hdev, skb);
2488 break;
2490 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2491 hci_cc_remote_name_req_cancel(hdev, skb);
2492 break;
2494 case HCI_OP_ROLE_DISCOVERY:
2495 hci_cc_role_discovery(hdev, skb);
2496 break;
2498 case HCI_OP_READ_LINK_POLICY:
2499 hci_cc_read_link_policy(hdev, skb);
2500 break;
2502 case HCI_OP_WRITE_LINK_POLICY:
2503 hci_cc_write_link_policy(hdev, skb);
2504 break;
2506 case HCI_OP_READ_DEF_LINK_POLICY:
2507 hci_cc_read_def_link_policy(hdev, skb);
2508 break;
2510 case HCI_OP_WRITE_DEF_LINK_POLICY:
2511 hci_cc_write_def_link_policy(hdev, skb);
2512 break;
2514 case HCI_OP_RESET:
2515 hci_cc_reset(hdev, skb);
2516 break;
2518 case HCI_OP_WRITE_LOCAL_NAME:
2519 hci_cc_write_local_name(hdev, skb);
2520 break;
2522 case HCI_OP_READ_LOCAL_NAME:
2523 hci_cc_read_local_name(hdev, skb);
2524 break;
2526 case HCI_OP_WRITE_AUTH_ENABLE:
2527 hci_cc_write_auth_enable(hdev, skb);
2528 break;
2530 case HCI_OP_WRITE_ENCRYPT_MODE:
2531 hci_cc_write_encrypt_mode(hdev, skb);
2532 break;
2534 case HCI_OP_WRITE_SCAN_ENABLE:
2535 hci_cc_write_scan_enable(hdev, skb);
2536 break;
2538 case HCI_OP_READ_CLASS_OF_DEV:
2539 hci_cc_read_class_of_dev(hdev, skb);
2540 break;
2542 case HCI_OP_WRITE_CLASS_OF_DEV:
2543 hci_cc_write_class_of_dev(hdev, skb);
2544 break;
2546 case HCI_OP_READ_VOICE_SETTING:
2547 hci_cc_read_voice_setting(hdev, skb);
2548 break;
2550 case HCI_OP_WRITE_VOICE_SETTING:
2551 hci_cc_write_voice_setting(hdev, skb);
2552 break;
2554 case HCI_OP_HOST_BUFFER_SIZE:
2555 hci_cc_host_buffer_size(hdev, skb);
2556 break;
2558 case HCI_OP_WRITE_SSP_MODE:
2559 hci_cc_write_ssp_mode(hdev, skb);
2560 break;
2562 case HCI_OP_READ_LOCAL_VERSION:
2563 hci_cc_read_local_version(hdev, skb);
2564 break;
2566 case HCI_OP_READ_LOCAL_COMMANDS:
2567 hci_cc_read_local_commands(hdev, skb);
2568 break;
2570 case HCI_OP_READ_LOCAL_FEATURES:
2571 hci_cc_read_local_features(hdev, skb);
2572 break;
2574 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2575 hci_cc_read_local_ext_features(hdev, skb);
2576 break;
2578 case HCI_OP_READ_BUFFER_SIZE:
2579 hci_cc_read_buffer_size(hdev, skb);
2580 break;
2582 case HCI_OP_READ_BD_ADDR:
2583 hci_cc_read_bd_addr(hdev, skb);
2584 break;
2586 case HCI_OP_READ_DATA_BLOCK_SIZE:
2587 hci_cc_read_data_block_size(hdev, skb);
2588 break;
2590 case HCI_OP_WRITE_CA_TIMEOUT:
2591 hci_cc_write_ca_timeout(hdev, skb);
2592 break;
2594 case HCI_OP_READ_FLOW_CONTROL_MODE:
2595 hci_cc_read_flow_control_mode(hdev, skb);
2596 break;
2598 case HCI_OP_READ_LOCAL_AMP_INFO:
2599 hci_cc_read_local_amp_info(hdev, skb);
2600 break;
2602 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2603 hci_cc_read_local_amp_assoc(hdev, skb);
2604 break;
2606 case HCI_OP_DELETE_STORED_LINK_KEY:
2607 hci_cc_delete_stored_link_key(hdev, skb);
2608 break;
2610 case HCI_OP_SET_EVENT_MASK:
2611 hci_cc_set_event_mask(hdev, skb);
2612 break;
2614 case HCI_OP_WRITE_INQUIRY_MODE:
2615 hci_cc_write_inquiry_mode(hdev, skb);
2616 break;
2618 case HCI_OP_READ_INQ_RSP_TX_POWER:
2619 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2620 break;
2622 case HCI_OP_SET_EVENT_FLT:
2623 hci_cc_set_event_flt(hdev, skb);
2624 break;
2626 case HCI_OP_PIN_CODE_REPLY:
2627 hci_cc_pin_code_reply(hdev, skb);
2628 break;
2630 case HCI_OP_PIN_CODE_NEG_REPLY:
2631 hci_cc_pin_code_neg_reply(hdev, skb);
2632 break;
2634 case HCI_OP_READ_LOCAL_OOB_DATA:
2635 hci_cc_read_local_oob_data_reply(hdev, skb);
2636 break;
2638 case HCI_OP_LE_READ_BUFFER_SIZE:
2639 hci_cc_le_read_buffer_size(hdev, skb);
2640 break;
2642 case HCI_OP_LE_READ_ADV_TX_POWER:
2643 hci_cc_le_read_adv_tx_power(hdev, skb);
2644 break;
2646 case HCI_OP_LE_SET_EVENT_MASK:
2647 hci_cc_le_set_event_mask(hdev, skb);
2648 break;
2650 case HCI_OP_USER_CONFIRM_REPLY:
2651 hci_cc_user_confirm_reply(hdev, skb);
2652 break;
2654 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2655 hci_cc_user_confirm_neg_reply(hdev, skb);
2656 break;
2658 case HCI_OP_USER_PASSKEY_REPLY:
2659 hci_cc_user_passkey_reply(hdev, skb);
2660 break;
2662 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2663 hci_cc_user_passkey_neg_reply(hdev, skb);
2664 break;
2666 case HCI_OP_LE_SET_SCAN_PARAM:
2667 hci_cc_le_set_scan_param(hdev, skb);
2668 break;
2670 case HCI_OP_LE_SET_ADV_ENABLE:
2671 hci_cc_le_set_adv_enable(hdev, skb);
2672 break;
2674 case HCI_OP_LE_SET_SCAN_ENABLE:
2675 hci_cc_le_set_scan_enable(hdev, skb);
2676 break;
2678 case HCI_OP_LE_LTK_REPLY:
2679 hci_cc_le_ltk_reply(hdev, skb);
2680 break;
2682 case HCI_OP_LE_LTK_NEG_REPLY:
2683 hci_cc_le_ltk_neg_reply(hdev, skb);
2684 break;
2686 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2687 hci_cc_write_le_host_supported(hdev, skb);
2688 break;
2690 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2691 hci_cc_write_remote_amp_assoc(hdev, skb);
2692 break;
2694 default:
2695 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2696 break;
2697 }
2699 if (ev->opcode != HCI_OP_NOP)
2700 del_timer(&hdev->cmd_timer);
2702 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2703 atomic_set(&hdev->cmd_cnt, 1);
2704 if (!skb_queue_empty(&hdev->cmd_q))
2705 queue_work(hdev->workqueue, &hdev->cmd_work);
2706 }
2707 }
2709 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2710 {
2711 struct hci_ev_cmd_status *ev = (void *) skb->data;
2712 __u16 opcode;
2714 skb_pull(skb, sizeof(*ev));
2716 opcode = __le16_to_cpu(ev->opcode);
2718 switch (opcode) {
2719 case HCI_OP_INQUIRY:
2720 hci_cs_inquiry(hdev, ev->status);
2721 break;
2723 case HCI_OP_CREATE_CONN:
2724 hci_cs_create_conn(hdev, ev->status);
2725 break;
2727 case HCI_OP_ADD_SCO:
2728 hci_cs_add_sco(hdev, ev->status);
2729 break;
2731 case HCI_OP_AUTH_REQUESTED:
2732 hci_cs_auth_requested(hdev, ev->status);
2733 break;
2735 case HCI_OP_SET_CONN_ENCRYPT:
2736 hci_cs_set_conn_encrypt(hdev, ev->status);
2737 break;
2739 case HCI_OP_REMOTE_NAME_REQ:
2740 hci_cs_remote_name_req(hdev, ev->status);
2741 break;
2743 case HCI_OP_READ_REMOTE_FEATURES:
2744 hci_cs_read_remote_features(hdev, ev->status);
2745 break;
2747 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2748 hci_cs_read_remote_ext_features(hdev, ev->status);
2749 break;
2751 case HCI_OP_SETUP_SYNC_CONN:
2752 hci_cs_setup_sync_conn(hdev, ev->status);
2753 break;
2755 case HCI_OP_SNIFF_MODE:
2756 hci_cs_sniff_mode(hdev, ev->status);
2757 break;
2759 case HCI_OP_EXIT_SNIFF_MODE:
2760 hci_cs_exit_sniff_mode(hdev, ev->status);
2761 break;
2763 case HCI_OP_DISCONNECT:
2764 hci_cs_disconnect(hdev, ev->status);
2765 break;
2767 case HCI_OP_LE_CREATE_CONN:
2768 hci_cs_le_create_conn(hdev, ev->status);
2769 break;
2771 case HCI_OP_LE_START_ENC:
2772 hci_cs_le_start_enc(hdev, ev->status);
2773 break;
2775 case HCI_OP_CREATE_PHY_LINK:
2776 hci_cs_create_phylink(hdev, ev->status);
2777 break;
2779 case HCI_OP_ACCEPT_PHY_LINK:
2780 hci_cs_accept_phylink(hdev, ev->status);
2781 break;
2783 case HCI_OP_CREATE_LOGICAL_LINK:
2784 hci_cs_create_logical_link(hdev, ev->status);
2785 break;
2787 default:
2788 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2789 break;
2790 }
2792 if (ev->opcode != HCI_OP_NOP)
2793 del_timer(&hdev->cmd_timer);
2795 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2796 atomic_set(&hdev->cmd_cnt, 1);
2797 if (!skb_queue_empty(&hdev->cmd_q))
2798 queue_work(hdev->workqueue, &hdev->cmd_work);
2799 }
2800 }
2802 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2803 {
2804 struct hci_ev_role_change *ev = (void *) skb->data;
2805 struct hci_conn *conn;
2807 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2809 hci_dev_lock(hdev);
2811 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2812 if (conn) {
2813 if (!ev->status) {
2814 if (ev->role)
2815 conn->link_mode &= ~HCI_LM_MASTER;
2816 else
2817 conn->link_mode |= HCI_LM_MASTER;
2818 }
2820 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2822 hci_role_switch_cfm(conn, ev->status, ev->role);
2823 }
2825 hci_dev_unlock(hdev);
2826 }
2828 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2829 {
2830 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2831 int i;
2833 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2834 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2835 return;
2836 }
2838 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2839 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2840 BT_DBG("%s bad parameters", hdev->name);
2841 return;
2842 }
2844 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2846 for (i = 0; i < ev->num_hndl; i++) {
2847 struct hci_comp_pkts_info *info = &ev->handles[i];
2848 struct hci_conn *conn;
2849 __u16 handle, count;
2851 handle = __le16_to_cpu(info->handle);
2852 count = __le16_to_cpu(info->count);
2854 conn = hci_conn_hash_lookup_handle(hdev, handle);
2855 if (!conn)
2856 continue;
2858 conn->sent -= count;
2860 switch (conn->type) {
2861 case ACL_LINK:
2862 hdev->acl_cnt += count;
2863 if (hdev->acl_cnt > hdev->acl_pkts)
2864 hdev->acl_cnt = hdev->acl_pkts;
2865 break;
2867 case LE_LINK:
2868 if (hdev->le_pkts) {
2869 hdev->le_cnt += count;
2870 if (hdev->le_cnt > hdev->le_pkts)
2871 hdev->le_cnt = hdev->le_pkts;
2872 } else {
2873 hdev->acl_cnt += count;
2874 if (hdev->acl_cnt > hdev->acl_pkts)
2875 hdev->acl_cnt = hdev->acl_pkts;
2876 }
2877 break;
2879 case SCO_LINK:
2880 hdev->sco_cnt += count;
2881 if (hdev->sco_cnt > hdev->sco_pkts)
2882 hdev->sco_cnt = hdev->sco_pkts;
2883 break;
2885 default:
2886 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2887 break;
2888 }
2889 }
2891 queue_work(hdev->workqueue, &hdev->tx_work);
2892 }
2894 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2895 __u16 handle)
2896 {
2897 struct hci_chan *chan;
2899 switch (hdev->dev_type) {
2900 case HCI_BREDR:
2901 return hci_conn_hash_lookup_handle(hdev, handle);
2902 case HCI_AMP:
2903 chan = hci_chan_lookup_handle(hdev, handle);
2904 if (chan)
2905 return chan->conn;
2906 break;
2907 default:
2908 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2909 break;
2910 }
2912 return NULL;
2913 }
2915 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2916 {
2917 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2918 int i;
2920 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2921 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2922 return;
2923 }
2925 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2926 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2927 BT_DBG("%s bad parameters", hdev->name);
2928 return;
2929 }
2931 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2932 ev->num_hndl);
2934 for (i = 0; i < ev->num_hndl; i++) {
2935 struct hci_comp_blocks_info *info = &ev->handles[i];
2936 struct hci_conn *conn = NULL;
2937 __u16 handle, block_count;
2939 handle = __le16_to_cpu(info->handle);
2940 block_count = __le16_to_cpu(info->blocks);
2942 conn = __hci_conn_lookup_handle(hdev, handle);
2943 if (!conn)
2944 continue;
2946 conn->sent -= block_count;
2948 switch (conn->type) {
2949 case ACL_LINK:
2950 case AMP_LINK:
2951 hdev->block_cnt += block_count;
2952 if (hdev->block_cnt > hdev->num_blocks)
2953 hdev->block_cnt = hdev->num_blocks;
2954 break;
2956 default:
2957 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2958 break;
2959 }
2960 }
2962 queue_work(hdev->workqueue, &hdev->tx_work);
2963 }
2965 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2966 {
2967 struct hci_ev_mode_change *ev = (void *) skb->data;
2968 struct hci_conn *conn;
2970 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2972 hci_dev_lock(hdev);
2974 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2975 if (conn) {
2976 conn->mode = ev->mode;
2977 conn->interval = __le16_to_cpu(ev->interval);
2979 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2980 &conn->flags)) {
2981 if (conn->mode == HCI_CM_ACTIVE)
2982 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2983 else
2984 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2985 }
2987 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2988 hci_sco_setup(conn, ev->status);
2989 }
2991 hci_dev_unlock(hdev);
2992 }
2994 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2995 {
2996 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2997 struct hci_conn *conn;
2999 BT_DBG("%s", hdev->name);
3001 hci_dev_lock(hdev);
3003 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3004 if (!conn)
3005 goto unlock;
3007 if (conn->state == BT_CONNECTED) {
3008 hci_conn_hold(conn);
3009 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3010 hci_conn_put(conn);
3011 }
3013 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3014 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3015 sizeof(ev->bdaddr), &ev->bdaddr);
3016 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3017 u8 secure;
3019 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3020 secure = 1;
3021 else
3022 secure = 0;
3024 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3025 }
3027 unlock:
3028 hci_dev_unlock(hdev);
3029 }
3031 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3032 {
3033 struct hci_ev_link_key_req *ev = (void *) skb->data;
3034 struct hci_cp_link_key_reply cp;
3035 struct hci_conn *conn;
3036 struct link_key *key;
3038 BT_DBG("%s", hdev->name);
3040 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3041 return;
3043 hci_dev_lock(hdev);
3045 key = hci_find_link_key(hdev, &ev->bdaddr);
3046 if (!key) {
3047 BT_DBG("%s link key not found for %pMR", hdev->name,
3048 &ev->bdaddr);
3049 goto not_found;
3050 }
3052 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3053 &ev->bdaddr);
3055 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3056 key->type == HCI_LK_DEBUG_COMBINATION) {
3057 BT_DBG("%s ignoring debug key", hdev->name);
3058 goto not_found;
3059 }
3061 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3062 if (conn) {
3063 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
3064 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3065 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3066 goto not_found;
3067 }
3069 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3070 conn->pending_sec_level == BT_SECURITY_HIGH) {
3071 BT_DBG("%s ignoring key unauthenticated for high security",
3072 hdev->name);
3073 goto not_found;
3074 }
3076 conn->key_type = key->type;
3077 conn->pin_length = key->pin_len;
3078 }
3080 bacpy(&cp.bdaddr, &ev->bdaddr);
3081 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3083 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3085 hci_dev_unlock(hdev);
3087 return;
3089 not_found:
3090 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3091 hci_dev_unlock(hdev);
3092 }
3094 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3095 {
3096 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3097 struct hci_conn *conn;
3098 u8 pin_len = 0;
3100 BT_DBG("%s", hdev->name);
3102 hci_dev_lock(hdev);
3104 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3105 if (conn) {
3106 hci_conn_hold(conn);
3107 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3108 pin_len = conn->pin_length;
3110 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3111 conn->key_type = ev->key_type;
3113 hci_conn_put(conn);
3114 }
3116 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3117 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3118 ev->key_type, pin_len);
3120 hci_dev_unlock(hdev);
3121 }
3123 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3124 {
3125 struct hci_ev_clock_offset *ev = (void *) skb->data;
3126 struct hci_conn *conn;
3128 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3130 hci_dev_lock(hdev);
3132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3133 if (conn && !ev->status) {
3134 struct inquiry_entry *ie;
3136 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3137 if (ie) {
3138 ie->data.clock_offset = ev->clock_offset;
3139 ie->timestamp = jiffies;
3140 }
3141 }
3143 hci_dev_unlock(hdev);
3144 }
3146 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3147 {
3148 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3149 struct hci_conn *conn;
3151 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3153 hci_dev_lock(hdev);
3155 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3156 if (conn && !ev->status)
3157 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3159 hci_dev_unlock(hdev);
3160 }
3162 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3163 {
3164 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3165 struct inquiry_entry *ie;
3167 BT_DBG("%s", hdev->name);
3169 hci_dev_lock(hdev);
3171 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3172 if (ie) {
3173 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3174 ie->timestamp = jiffies;
3175 }
3177 hci_dev_unlock(hdev);
3178 }
3180 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3181 struct sk_buff *skb)
3182 {
3183 struct inquiry_data data;
3184 int num_rsp = *((__u8 *) skb->data);
3185 bool name_known, ssp;
3187 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3189 if (!num_rsp)
3190 return;
3192 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3193 return;
3195 hci_dev_lock(hdev);
3197 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3198 struct inquiry_info_with_rssi_and_pscan_mode *info;
3199 info = (void *) (skb->data + 1);
3201 for (; num_rsp; num_rsp--, info++) {
3202 bacpy(&data.bdaddr, &info->bdaddr);
3203 data.pscan_rep_mode = info->pscan_rep_mode;
3204 data.pscan_period_mode = info->pscan_period_mode;
3205 data.pscan_mode = info->pscan_mode;
3206 memcpy(data.dev_class, info->dev_class, 3);
3207 data.clock_offset = info->clock_offset;
3208 data.rssi = info->rssi;
3209 data.ssp_mode = 0x00;
3211 name_known = hci_inquiry_cache_update(hdev, &data,
3212 false, &ssp);
3213 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3214 info->dev_class, info->rssi,
3215 !name_known, ssp, NULL, 0);
3216 }
3217 } else {
3218 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3220 for (; num_rsp; num_rsp--, info++) {
3221 bacpy(&data.bdaddr, &info->bdaddr);
3222 data.pscan_rep_mode = info->pscan_rep_mode;
3223 data.pscan_period_mode = info->pscan_period_mode;
3224 data.pscan_mode = 0x00;
3225 memcpy(data.dev_class, info->dev_class, 3);
3226 data.clock_offset = info->clock_offset;
3227 data.rssi = info->rssi;
3228 data.ssp_mode = 0x00;
3229 name_known = hci_inquiry_cache_update(hdev, &data,
3230 false, &ssp);
3231 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3232 info->dev_class, info->rssi,
3233 !name_known, ssp, NULL, 0);
3234 }
3235 }
3237 hci_dev_unlock(hdev);
3238 }
3240 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3241 struct sk_buff *skb)
3242 {
3243 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3244 struct hci_conn *conn;
3246 BT_DBG("%s", hdev->name);
3248 hci_dev_lock(hdev);
3250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3251 if (!conn)
3252 goto unlock;
3254 if (!ev->status && ev->page == 0x01) {
3255 struct inquiry_entry *ie;
3257 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3258 if (ie)
3259 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3261 if (ev->features[0] & LMP_HOST_SSP)
3262 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3263 }
3265 if (conn->state != BT_CONFIG)
3266 goto unlock;
3268 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3269 struct hci_cp_remote_name_req cp;
3270 memset(&cp, 0, sizeof(cp));
3271 bacpy(&cp.bdaddr, &conn->dst);
3272 cp.pscan_rep_mode = 0x02;
3273 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3274 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3275 mgmt_device_connected(hdev, &conn->dst, conn->type,
3276 conn->dst_type, 0, NULL, 0,
3277 conn->dev_class);
3279 if (!hci_outgoing_auth_needed(hdev, conn)) {
3280 conn->state = BT_CONNECTED;
3281 hci_proto_connect_cfm(conn, ev->status);
3282 hci_conn_put(conn);
3283 }
3285 unlock:
3286 hci_dev_unlock(hdev);
3287 }
3289 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3290 struct sk_buff *skb)
3291 {
3292 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3293 struct hci_conn *conn;
3295 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3297 hci_dev_lock(hdev);
3299 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3300 if (!conn) {
3301 if (ev->link_type == ESCO_LINK)
3302 goto unlock;
3304 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3305 if (!conn)
3306 goto unlock;
3308 conn->type = SCO_LINK;
3309 }
3311 switch (ev->status) {
3312 case 0x00:
3313 conn->handle = __le16_to_cpu(ev->handle);
3314 conn->state = BT_CONNECTED;
3316 hci_conn_hold_device(conn);
3317 hci_conn_add_sysfs(conn);
3318 break;
3320 case 0x10: /* Connection Accept Timeout */
3321 case 0x11: /* Unsupported Feature or Parameter Value */
3322 case 0x1c: /* SCO interval rejected */
3323 case 0x1a: /* Unsupported Remote Feature */
3324 case 0x1f: /* Unspecified error */
3325 if (conn->out && conn->attempt < 2) {
3326 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3327 (hdev->esco_type & EDR_ESCO_MASK);
3328 hci_setup_sync(conn, conn->link->handle);
3329 goto unlock;
3330 }
3331 /* fall through */
3333 default:
3334 conn->state = BT_CLOSED;
3335 break;
3336 }
3338 hci_proto_connect_cfm(conn, ev->status);
3339 if (ev->status)
3340 hci_conn_del(conn);
3342 unlock:
3343 hci_dev_unlock(hdev);
3344 }
3346 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3347 {
3348 BT_DBG("%s", hdev->name);
3349 }
3351 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3352 {
3353 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3355 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3356 }
3358 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3359 struct sk_buff *skb)
3360 {
3361 struct inquiry_data data;
3362 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3363 int num_rsp = *((__u8 *) skb->data);
3364 size_t eir_len;
3366 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3368 if (!num_rsp)
3369 return;
3371 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3372 return;
3374 hci_dev_lock(hdev);
3376 for (; num_rsp; num_rsp--, info++) {
3377 bool name_known, ssp;
3379 bacpy(&data.bdaddr, &info->bdaddr);
3380 data.pscan_rep_mode = info->pscan_rep_mode;
3381 data.pscan_period_mode = info->pscan_period_mode;
3382 data.pscan_mode = 0x00;
3383 memcpy(data.dev_class, info->dev_class, 3);
3384 data.clock_offset = info->clock_offset;
3385 data.rssi = info->rssi;
3386 data.ssp_mode = 0x01;
3388 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3389 name_known = eir_has_data_type(info->data,
3390 sizeof(info->data),
3391 EIR_NAME_COMPLETE);
3392 else
3393 name_known = true;
3395 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3396 &ssp);
3397 eir_len = eir_get_length(info->data, sizeof(info->data));
3398 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3399 info->dev_class, info->rssi, !name_known,
3400 ssp, info->data, eir_len);
3401 }
3403 hci_dev_unlock(hdev);
3404 }
3406 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3407 struct sk_buff *skb)
3408 {
3409 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3410 struct hci_conn *conn;
3412 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3413 __le16_to_cpu(ev->handle));
3415 hci_dev_lock(hdev);
3417 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3418 if (!conn)
3419 goto unlock;
3421 if (!ev->status)
3422 conn->sec_level = conn->pending_sec_level;
3424 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3426 if (ev->status && conn->state == BT_CONNECTED) {
3427 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3428 hci_conn_put(conn);
3429 goto unlock;
3430 }
3432 if (conn->state == BT_CONFIG) {
3433 if (!ev->status)
3434 conn->state = BT_CONNECTED;
3436 hci_proto_connect_cfm(conn, ev->status);
3437 hci_conn_put(conn);
3438 } else {
3439 hci_auth_cfm(conn, ev->status);
3441 hci_conn_hold(conn);
3442 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3443 hci_conn_put(conn);
3444 }
3446 unlock:
3447 hci_dev_unlock(hdev);
3448 }
3450 static u8 hci_get_auth_req(struct hci_conn *conn)
3451 {
3452 /* If remote requests dedicated bonding follow that lead */
3453 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3454 /* If both remote and local IO capabilities allow MITM
3455 * protection then require it, otherwise don't */
3456 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3457 return 0x02;
3458 else
3459 return 0x03;
3460 }
3462 /* If remote requests no-bonding follow that lead */
3463 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3464 return conn->remote_auth | (conn->auth_type & 0x01);
3466 return conn->auth_type;
3467 }
3469 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3470 {
3471 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3472 struct hci_conn *conn;
3474 BT_DBG("%s", hdev->name);
3476 hci_dev_lock(hdev);
3478 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3479 if (!conn)
3480 goto unlock;
3482 hci_conn_hold(conn);
3484 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3485 goto unlock;
3487 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3488 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3489 struct hci_cp_io_capability_reply cp;
3491 bacpy(&cp.bdaddr, &ev->bdaddr);
3492 /* Change the IO capability from KeyboardDisplay
3493 * to DisplayYesNo as it is not supported by BT spec. */
3494 cp.capability = (conn->io_capability == 0x04) ?
3495 0x01 : conn->io_capability;
3496 conn->auth_type = hci_get_auth_req(conn);
3497 cp.authentication = conn->auth_type;
3499 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3500 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3501 cp.oob_data = 0x01;
3502 else
3503 cp.oob_data = 0x00;
3505 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3506 sizeof(cp), &cp);
3507 } else {
3508 struct hci_cp_io_capability_neg_reply cp;
3510 bacpy(&cp.bdaddr, &ev->bdaddr);
3511 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3513 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3514 sizeof(cp), &cp);
3515 }
3517 unlock:
3518 hci_dev_unlock(hdev);
3519 }
3521 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3522 {
3523 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3524 struct hci_conn *conn;
3526 BT_DBG("%s", hdev->name);
3528 hci_dev_lock(hdev);
3530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3531 if (!conn)
3532 goto unlock;
3534 conn->remote_cap = ev->capability;
3535 conn->remote_auth = ev->authentication;
3536 if (ev->oob_data)
3537 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3539 unlock:
3540 hci_dev_unlock(hdev);
3541 }
3543 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3544 struct sk_buff *skb)
3545 {
3546 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3547 int loc_mitm, rem_mitm, confirm_hint = 0;
3548 struct hci_conn *conn;
3550 BT_DBG("%s", hdev->name);
3552 hci_dev_lock(hdev);
3554 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3555 goto unlock;
3557 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3558 if (!conn)
3559 goto unlock;
3561 loc_mitm = (conn->auth_type & 0x01);
3562 rem_mitm = (conn->remote_auth & 0x01);
3564 /* If we require MITM but the remote device can't provide that
3565 * (it has NoInputNoOutput) then reject the confirmation
3566 * request. The only exception is when we're dedicated bonding
3567 * initiators (connect_cfm_cb set) since then we always have the MITM
3568 * bit set. */
3569 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3570 BT_DBG("Rejecting request: remote device can't provide MITM");
3571 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3572 sizeof(ev->bdaddr), &ev->bdaddr);
3573 goto unlock;
3574 }
3576 /* If no side requires MITM protection; auto-accept */
3577 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3578 (!rem_mitm || conn->io_capability == 0x03)) {
3580 /* If we're not the initiators request authorization to
3581 * proceed from user space (mgmt_user_confirm with
3582 * confirm_hint set to 1). */
3583 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3584 BT_DBG("Confirming auto-accept as acceptor");
3585 confirm_hint = 1;
3586 goto confirm;
3587 }
3589 BT_DBG("Auto-accept of user confirmation with %ums delay",
3590 hdev->auto_accept_delay);
3592 if (hdev->auto_accept_delay > 0) {
3593 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3594 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3595 goto unlock;
3596 }
3598 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3599 sizeof(ev->bdaddr), &ev->bdaddr);
3600 goto unlock;
3601 }
3603 confirm:
3604 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3605 confirm_hint);
3607 unlock:
3608 hci_dev_unlock(hdev);
3609 }
3611 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3612 struct sk_buff *skb)
3613 {
3614 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3616 BT_DBG("%s", hdev->name);
3618 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3619 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3620 }
3622 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3623 struct sk_buff *skb)
3624 {
3625 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3626 struct hci_conn *conn;
3628 BT_DBG("%s", hdev->name);
3630 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3631 if (!conn)
3632 return;
3634 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3635 conn->passkey_entered = 0;
3637 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3638 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3639 conn->dst_type, conn->passkey_notify,
3640 conn->passkey_entered);
3641 }
3643 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3644 {
3645 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3646 struct hci_conn *conn;
3648 BT_DBG("%s", hdev->name);
3650 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3651 if (!conn)
3652 return;
3654 switch (ev->type) {
3655 case HCI_KEYPRESS_STARTED:
3656 conn->passkey_entered = 0;
3657 return;
3659 case HCI_KEYPRESS_ENTERED:
3660 conn->passkey_entered++;
3661 break;
3663 case HCI_KEYPRESS_ERASED:
3664 conn->passkey_entered--;
3665 break;
3667 case HCI_KEYPRESS_CLEARED:
3668 conn->passkey_entered = 0;
3669 break;
3671 case HCI_KEYPRESS_COMPLETED:
3672 return;
3673 }
3675 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3676 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3677 conn->dst_type, conn->passkey_notify,
3678 conn->passkey_entered);
3679 }
3681 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3682 struct sk_buff *skb)
3683 {
3684 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3685 struct hci_conn *conn;
3687 BT_DBG("%s", hdev->name);
3689 hci_dev_lock(hdev);
3691 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3692 if (!conn)
3693 goto unlock;
3695 /* To avoid duplicate auth_failed events to user space we check
3696 * the HCI_CONN_AUTH_PEND flag which will be set if we
3697 * initiated the authentication. A traditional auth_complete
3698 * event gets always produced as initiator and is also mapped to
3699 * the mgmt_auth_failed event */
3700 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3701 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3702 ev->status);
3704 hci_conn_put(conn);
3706 unlock:
3707 hci_dev_unlock(hdev);
3708 }
3710 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3711 struct sk_buff *skb)
3712 {
3713 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3714 struct inquiry_entry *ie;
3716 BT_DBG("%s", hdev->name);
3718 hci_dev_lock(hdev);
3720 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3721 if (ie)
3722 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3724 hci_dev_unlock(hdev);
3725 }
3727 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3728 struct sk_buff *skb)
3729 {
3730 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3731 struct oob_data *data;
3733 BT_DBG("%s", hdev->name);
3735 hci_dev_lock(hdev);
3737 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3738 goto unlock;
3740 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3741 if (data) {
3742 struct hci_cp_remote_oob_data_reply cp;
3744 bacpy(&cp.bdaddr, &ev->bdaddr);
3745 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3746 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3748 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3749 &cp);
3750 } else {
3751 struct hci_cp_remote_oob_data_neg_reply cp;
3753 bacpy(&cp.bdaddr, &ev->bdaddr);
3754 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3755 &cp);
3756 }
3758 unlock:
3759 hci_dev_unlock(hdev);
3760 }
3762 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3763 struct sk_buff *skb)
3764 {
3765 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3766 struct hci_conn *hcon, *bredr_hcon;
3768 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3769 ev->status);
3771 hci_dev_lock(hdev);
3773 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3774 if (!hcon) {
3775 hci_dev_unlock(hdev);
3776 return;
3777 }
3779 if (ev->status) {
3780 hci_conn_del(hcon);
3781 hci_dev_unlock(hdev);
3782 return;
3783 }
3785 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3787 hcon->state = BT_CONNECTED;
3788 bacpy(&hcon->dst, &bredr_hcon->dst);
3790 hci_conn_hold(hcon);
3791 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3792 hci_conn_put(hcon);
3794 hci_conn_hold_device(hcon);
3795 hci_conn_add_sysfs(hcon);
3797 amp_physical_cfm(bredr_hcon, hcon);
3799 hci_dev_unlock(hdev);
3800 }
3802 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3803 {
3804 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3805 struct hci_conn *hcon;
3806 struct hci_chan *hchan;
3807 struct amp_mgr *mgr;
3809 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3810 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3811 ev->status);
3813 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3814 if (!hcon)
3815 return;
3817 /* Create AMP hchan */
3818 hchan = hci_chan_create(hcon);
3819 if (!hchan)
3820 return;
3822 hchan->handle = le16_to_cpu(ev->handle);
3824 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3826 mgr = hcon->amp_mgr;
3827 if (mgr && mgr->bredr_chan) {
3828 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3830 l2cap_chan_lock(bredr_chan);
3832 bredr_chan->conn->mtu = hdev->block_mtu;
3833 l2cap_logical_cfm(bredr_chan, hchan, 0);
3834 hci_conn_hold(hcon);
3836 l2cap_chan_unlock(bredr_chan);
3837 }
3838 }
3840 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3841 struct sk_buff *skb)
3842 {
3843 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3844 struct hci_chan *hchan;
3846 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3847 le16_to_cpu(ev->handle), ev->status);
3849 if (ev->status)
3850 return;
3852 hci_dev_lock(hdev);
3854 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3855 if (!hchan)
3856 goto unlock;
3858 amp_destroy_logical_link(hchan, ev->reason);
3860 unlock:
3861 hci_dev_unlock(hdev);
3862 }
3864 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3865 struct sk_buff *skb)
3866 {
3867 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3868 struct hci_conn *hcon;
3870 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3872 if (ev->status)
3873 return;
3875 hci_dev_lock(hdev);
3877 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3878 if (hcon) {
3879 hcon->state = BT_CLOSED;
3880 hci_conn_del(hcon);
3881 }
3883 hci_dev_unlock(hdev);
3884 }
3886 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3887 {
3888 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3889 struct hci_conn *conn;
3891 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3893 hci_dev_lock(hdev);
3895 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3896 if (!conn) {
3897 conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
3898 if (!conn) {
3899 BT_ERR("No memory for new connection");
3900 goto unlock;
3901 }
3903 conn->dst_type = ev->bdaddr_type;
3905 if (ev->role == LE_CONN_ROLE_MASTER) {
3906 conn->out = true;
3907 conn->link_mode |= HCI_LM_MASTER;
3908 }
3909 }
3911 if (ev->status) {
3912 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3913 conn->dst_type, ev->status);
3914 hci_proto_connect_cfm(conn, ev->status);
3915 conn->state = BT_CLOSED;
3916 hci_conn_del(conn);
3917 goto unlock;
3918 }
3920 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3921 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3922 conn->dst_type, 0, NULL, 0, NULL);
3924 conn->sec_level = BT_SECURITY_LOW;
3925 conn->handle = __le16_to_cpu(ev->handle);
3926 conn->state = BT_CONNECTED;
3928 hci_conn_hold_device(conn);
3929 hci_conn_add_sysfs(conn);
3931 hci_proto_connect_cfm(conn, ev->status);
3933 unlock:
3934 hci_dev_unlock(hdev);
3935 }
3937 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3938 {
3939 u8 num_reports = skb->data[0];
3940 void *ptr = &skb->data[1];
3941 s8 rssi;
3943 hci_dev_lock(hdev);
3945 while (num_reports--) {
3946 struct hci_ev_le_advertising_info *ev = ptr;
3948 rssi = ev->data[ev->length];
3949 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3950 NULL, rssi, 0, 1, ev->data, ev->length);
3952 ptr += sizeof(*ev) + ev->length + 1;
3953 }
3955 hci_dev_unlock(hdev);
3956 }
3958 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3959 {
3960 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3961 struct hci_cp_le_ltk_reply cp;
3962 struct hci_cp_le_ltk_neg_reply neg;
3963 struct hci_conn *conn;
3964 struct smp_ltk *ltk;
3966 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3968 hci_dev_lock(hdev);
3970 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3971 if (conn == NULL)
3972 goto not_found;
3974 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3975 if (ltk == NULL)
3976 goto not_found;
3978 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3979 cp.handle = cpu_to_le16(conn->handle);
3981 if (ltk->authenticated)
3982 conn->sec_level = BT_SECURITY_HIGH;
3984 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3986 if (ltk->type & HCI_SMP_STK) {
3987 list_del(<k->list);
3988 kfree(ltk);
3989 }
3991 hci_dev_unlock(hdev);
3993 return;
3995 not_found:
3996 neg.handle = ev->handle;
3997 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3998 hci_dev_unlock(hdev);
3999 }
4001 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4002 {
4003 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4005 skb_pull(skb, sizeof(*le_ev));
4007 switch (le_ev->subevent) {
4008 case HCI_EV_LE_CONN_COMPLETE:
4009 hci_le_conn_complete_evt(hdev, skb);
4010 break;
4012 case HCI_EV_LE_ADVERTISING_REPORT:
4013 hci_le_adv_report_evt(hdev, skb);
4014 break;
4016 case HCI_EV_LE_LTK_REQ:
4017 hci_le_ltk_request_evt(hdev, skb);
4018 break;
4020 default:
4021 break;
4022 }
4023 }
4025 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4026 {
4027 struct hci_ev_channel_selected *ev = (void *) skb->data;
4028 struct hci_conn *hcon;
4030 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4032 skb_pull(skb, sizeof(*ev));
4034 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4035 if (!hcon)
4036 return;
4038 amp_read_loc_assoc_final_data(hdev, hcon);
4039 }
4041 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4042 {
4043 struct hci_event_hdr *hdr = (void *) skb->data;
4044 __u8 event = hdr->evt;
4046 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4048 switch (event) {
4049 case HCI_EV_INQUIRY_COMPLETE:
4050 hci_inquiry_complete_evt(hdev, skb);
4051 break;
4053 case HCI_EV_INQUIRY_RESULT:
4054 hci_inquiry_result_evt(hdev, skb);
4055 break;
4057 case HCI_EV_CONN_COMPLETE:
4058 hci_conn_complete_evt(hdev, skb);
4059 break;
4061 case HCI_EV_CONN_REQUEST:
4062 hci_conn_request_evt(hdev, skb);
4063 break;
4065 case HCI_EV_DISCONN_COMPLETE:
4066 hci_disconn_complete_evt(hdev, skb);
4067 break;
4069 case HCI_EV_AUTH_COMPLETE:
4070 hci_auth_complete_evt(hdev, skb);
4071 break;
4073 case HCI_EV_REMOTE_NAME:
4074 hci_remote_name_evt(hdev, skb);
4075 break;
4077 case HCI_EV_ENCRYPT_CHANGE:
4078 hci_encrypt_change_evt(hdev, skb);
4079 break;
4081 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4082 hci_change_link_key_complete_evt(hdev, skb);
4083 break;
4085 case HCI_EV_REMOTE_FEATURES:
4086 hci_remote_features_evt(hdev, skb);
4087 break;
4089 case HCI_EV_REMOTE_VERSION:
4090 hci_remote_version_evt(hdev, skb);
4091 break;
4093 case HCI_EV_QOS_SETUP_COMPLETE:
4094 hci_qos_setup_complete_evt(hdev, skb);
4095 break;
4097 case HCI_EV_CMD_COMPLETE:
4098 hci_cmd_complete_evt(hdev, skb);
4099 break;
4101 case HCI_EV_CMD_STATUS:
4102 hci_cmd_status_evt(hdev, skb);
4103 break;
4105 case HCI_EV_ROLE_CHANGE:
4106 hci_role_change_evt(hdev, skb);
4107 break;
4109 case HCI_EV_NUM_COMP_PKTS:
4110 hci_num_comp_pkts_evt(hdev, skb);
4111 break;
4113 case HCI_EV_MODE_CHANGE:
4114 hci_mode_change_evt(hdev, skb);
4115 break;
4117 case HCI_EV_PIN_CODE_REQ:
4118 hci_pin_code_request_evt(hdev, skb);
4119 break;
4121 case HCI_EV_LINK_KEY_REQ:
4122 hci_link_key_request_evt(hdev, skb);
4123 break;
4125 case HCI_EV_LINK_KEY_NOTIFY:
4126 hci_link_key_notify_evt(hdev, skb);
4127 break;
4129 case HCI_EV_CLOCK_OFFSET:
4130 hci_clock_offset_evt(hdev, skb);
4131 break;
4133 case HCI_EV_PKT_TYPE_CHANGE:
4134 hci_pkt_type_change_evt(hdev, skb);
4135 break;
4137 case HCI_EV_PSCAN_REP_MODE:
4138 hci_pscan_rep_mode_evt(hdev, skb);
4139 break;
4141 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4142 hci_inquiry_result_with_rssi_evt(hdev, skb);
4143 break;
4145 case HCI_EV_REMOTE_EXT_FEATURES:
4146 hci_remote_ext_features_evt(hdev, skb);
4147 break;
4149 case HCI_EV_SYNC_CONN_COMPLETE:
4150 hci_sync_conn_complete_evt(hdev, skb);
4151 break;
4153 case HCI_EV_SYNC_CONN_CHANGED:
4154 hci_sync_conn_changed_evt(hdev, skb);
4155 break;
4157 case HCI_EV_SNIFF_SUBRATE:
4158 hci_sniff_subrate_evt(hdev, skb);
4159 break;
4161 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4162 hci_extended_inquiry_result_evt(hdev, skb);
4163 break;
4165 case HCI_EV_KEY_REFRESH_COMPLETE:
4166 hci_key_refresh_complete_evt(hdev, skb);
4167 break;
4169 case HCI_EV_IO_CAPA_REQUEST:
4170 hci_io_capa_request_evt(hdev, skb);
4171 break;
4173 case HCI_EV_IO_CAPA_REPLY:
4174 hci_io_capa_reply_evt(hdev, skb);
4175 break;
4177 case HCI_EV_USER_CONFIRM_REQUEST:
4178 hci_user_confirm_request_evt(hdev, skb);
4179 break;
4181 case HCI_EV_USER_PASSKEY_REQUEST:
4182 hci_user_passkey_request_evt(hdev, skb);
4183 break;
4185 case HCI_EV_USER_PASSKEY_NOTIFY:
4186 hci_user_passkey_notify_evt(hdev, skb);
4187 break;
4189 case HCI_EV_KEYPRESS_NOTIFY:
4190 hci_keypress_notify_evt(hdev, skb);
4191 break;
4193 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4194 hci_simple_pair_complete_evt(hdev, skb);
4195 break;
4197 case HCI_EV_REMOTE_HOST_FEATURES:
4198 hci_remote_host_features_evt(hdev, skb);
4199 break;
4201 case HCI_EV_LE_META:
4202 hci_le_meta_evt(hdev, skb);
4203 break;
4205 case HCI_EV_CHANNEL_SELECTED:
4206 hci_chan_selected_evt(hdev, skb);
4207 break;
4209 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4210 hci_remote_oob_data_request_evt(hdev, skb);
4211 break;
4213 case HCI_EV_PHY_LINK_COMPLETE:
4214 hci_phy_link_complete_evt(hdev, skb);
4215 break;
4217 case HCI_EV_LOGICAL_LINK_COMPLETE:
4218 hci_loglink_complete_evt(hdev, skb);
4219 break;
4221 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4222 hci_disconn_loglink_complete_evt(hdev, skb);
4223 break;
4225 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4226 hci_disconn_phylink_complete_evt(hdev, skb);
4227 break;
4229 case HCI_EV_NUM_COMP_BLOCKS:
4230 hci_num_comp_blocks_evt(hdev, skb);
4231 break;
4233 default:
4234 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4235 break;
4236 }
4238 kfree_skb(skb);
4239 hdev->stat.evt_rx++;
4240 }