diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 09:04:05 +0100 | 
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 09:04:05 +0100 | 
| commit | 92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch) | |
| tree | 15626ff9287e37c3cb81c7286d6db5a7fd77c854 /net/bluetooth/hci_sock.c | |
| parent | 15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff) | |
| parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) | |
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2
Backmerge to get at
commit 1b0e3a049efe471c399674fd954500ce97438d30
Author: Imre Deak <imre.deak@intel.com>
Date:   Thu Nov 5 23:04:11 2015 +0200
    drm/i915/skl: disable display side power well support for now
so that we can proplery re-eanble skl power wells in -next.
Conflicts are just adjacent lines changed, except for intel_fbdev.c
where we need to interleave the changs. Nothing nefarious.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'net/bluetooth/hci_sock.c')
| -rw-r--r-- | net/bluetooth/hci_sock.c | 109 | 
1 files changed, 94 insertions, 15 deletions
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f2d30d1156c9..b1eb8c09a660 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -120,10 +120,7 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)  	/* Apply filter */  	flt = &hci_pi(sk)->filter; -	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) -		flt_type = 0; -	else -		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS; +	flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;  	if (!test_bit(flt_type, &flt->type_mask))  		return true; @@ -173,6 +170,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)  			continue;  		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { +			if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && +			    bt_cb(skb)->pkt_type != HCI_EVENT_PKT && +			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && +			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) +				continue;  			if (is_filtered_packet(sk, skb))  				continue;  		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { @@ -279,6 +281,9 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)  		else  			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);  		break; +	case HCI_DIAG_PKT: +		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); +		break;  	default:  		return;  	} @@ -303,6 +308,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)  {  	struct hci_mon_hdr *hdr;  	struct hci_mon_new_index *ni; +	struct hci_mon_index_info *ii;  	struct sk_buff *skb;  	__le16 opcode; @@ -312,7 +318,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)  		if (!skb)  			return NULL; -		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE); +		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);  		ni->type = hdev->dev_type;  		ni->bus = hdev->bus;  		bacpy(&ni->bdaddr, &hdev->bdaddr); @@ -329,6 +335,40 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)  		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);  		break; +	case HCI_DEV_SETUP: +		if (hdev->manufacturer == 0xffff) +			return NULL; + +		/* fall through */ + +	case HCI_DEV_UP: +		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); +		if (!skb) +			return NULL; + +		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE); +		bacpy(&ii->bdaddr, &hdev->bdaddr); +		ii->manufacturer = cpu_to_le16(hdev->manufacturer); + +		opcode = cpu_to_le16(HCI_MON_INDEX_INFO); +		break; + +	case HCI_DEV_OPEN: +		skb = bt_skb_alloc(0, GFP_ATOMIC); +		if (!skb) +			return NULL; + +		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); +		break; + +	case HCI_DEV_CLOSE: +		skb = bt_skb_alloc(0, GFP_ATOMIC); +		if (!skb) +			return NULL; + +		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); +		break; +  	default:  		return NULL;  	} @@ -358,6 +398,28 @@ static void send_monitor_replay(struct sock *sk)  		if (sock_queue_rcv_skb(sk, skb))  			kfree_skb(skb); + +		if (!test_bit(HCI_RUNNING, &hdev->flags)) +			continue; + +		skb = create_monitor_event(hdev, HCI_DEV_OPEN); +		if (!skb) +			continue; + +		if (sock_queue_rcv_skb(sk, skb)) +			kfree_skb(skb); + +		if (test_bit(HCI_UP, &hdev->flags)) +			skb = create_monitor_event(hdev, HCI_DEV_UP); +		else if (hci_dev_test_flag(hdev, HCI_SETUP)) +			skb = create_monitor_event(hdev, HCI_DEV_SETUP); +		else +			skb = NULL; + +		if (skb) { +			if (sock_queue_rcv_skb(sk, skb)) +				kfree_skb(skb); +		}  	}  	read_unlock(&hci_dev_list_lock); @@ -392,14 +454,12 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)  void hci_sock_dev_event(struct hci_dev *hdev, int event)  { -	struct hci_ev_si_device ev; -  	BT_DBG("hdev %s event %d", hdev->name, event); -	/* Send event to monitor */  	if (atomic_read(&monitor_promisc)) {  		struct sk_buff *skb; +		/* Send event to monitor */  		skb = create_monitor_event(hdev, event);  		if (skb) {  			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, @@ -408,10 +468,14 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)  		}  	} -	/* Send event to sockets */ -	ev.event  = event; -	ev.dev_id = hdev->id; -	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); +	if (event <= HCI_DEV_DOWN) { +		struct hci_ev_si_device ev; + +		/* Send event to sockets */ +		ev.event  = event; +		ev.dev_id = hdev->id; +		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); +	}  	if (event == HCI_DEV_UNREG) {  		struct sock *sk; @@ -503,7 +567,16 @@ static int hci_sock_release(struct socket *sock)  	if (hdev) {  		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { -			hci_dev_close(hdev->id); +			/* When releasing an user channel exclusive access, +			 * call hci_dev_do_close directly instead of calling +			 * hci_dev_close to ensure the exclusive access will +			 * be released and the controller brought back down. +			 * +			 * The checking of HCI_AUTO_OFF is not needed in this +			 * case since it will have been cleared already when +			 * opening the user channel. +			 */ +			hci_dev_do_close(hdev);  			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);  			mgmt_index_added(hdev);  		} @@ -928,7 +1001,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,  	BT_DBG("sock %p, sk %p", sock, sk); -	if (flags & (MSG_OOB)) +	if (flags & MSG_OOB)  		return -EOPNOTSUPP;  	if (sk->sk_state == BT_CLOSED) @@ -1176,7 +1249,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,  			/* Stand-alone HCI commands must be flagged as  			 * single-command requests.  			 */ -			bt_cb(skb)->req.start = true; +			bt_cb(skb)->hci.req_start = true;  			skb_queue_tail(&hdev->cmd_q, skb);  			queue_work(hdev->workqueue, &hdev->cmd_work); @@ -1187,6 +1260,12 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,  			goto drop;  		} +		if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && +		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { +			err = -EINVAL; +			goto drop; +		} +  		skb_queue_tail(&hdev->raw_q, skb);  		queue_work(hdev->workqueue, &hdev->tx_work);  	}  | 
