summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDominik Sliwa <dominik.sliwa@toradex.com>2017-12-22 12:07:34 +0000
committerDominik Sliwa <dominik.sliwa@toradex.com>2018-09-18 17:15:11 +0200
commit69ff0b5709d0d06545fcb2d08c7645b56d3687dd (patch)
tree71f1569cc019e16c1b1be22c1e8d382766daebb3
parentd6a47bfb72f495a72637fbe635220f6af0f4cb6f (diff)
backports: bluetooth: Support 4.9 kernels
Signed-off-by: Dominik Sliwa <dominik.sliwa@toradex.com>
-rw-r--r--Kconfig.local3
-rw-r--r--Kconfig.sources1
-rw-r--r--Makefile.kernel1
-rw-r--r--backport-include/crypto/skcipher.h99
-rw-r--r--backport-include/linux/kernel.h4
-rw-r--r--backport-include/linux/kref.h13
-rw-r--r--backport-include/linux/of_device.h5
-rw-r--r--backport-include/linux/skbuff.h17
-rw-r--r--backport-include/linux/string.h4
-rw-r--r--backport-include/linux/uio.h11
-rw-r--r--backport-include/net/sock.h10
-rw-r--r--backport-include/uapi/linux/cryptouser.h12
-rw-r--r--compat/Kconfig12
-rw-r--r--compat/Makefile3
-rw-r--r--compat/backport-4.0.c33
-rw-r--r--compat/backport-4.10.c37
-rw-r--r--compat/backport-4.12.c22
-rw-r--r--compat/backport-4.4.c11
-rw-r--r--compat/crypto-kpp.c128
-rw-r--r--compat/crypto-skcipher.c35
-rw-r--r--compat/lib-iov_iter.c1423
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/tty/Makefile2
-rw-r--r--drivers/tty/serdev/Kconfig25
-rw-r--r--drivers/tty/serdev/Makefile5
-rw-r--r--drivers/tty/serdev/core.c646
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c320
-rw-r--r--include/crypto/backport-kpp.h350
-rw-r--r--include/crypto/internal/backport-kpp.h64
-rw-r--r--include/linux/serdev.h336
-rw-r--r--include/net/bluetooth/.hci_core.h.kate-swpbin0 -> 116 bytes
-rw-r--r--include/net/bluetooth/.rfcomm.h.kate-swpbin0 -> 81 bytes
-rw-r--r--include/net/bluetooth/bluetooth.h416
-rw-r--r--include/net/bluetooth/hci.h2032
-rw-r--r--include/net/bluetooth/hci_core.h1545
-rw-r--r--include/net/bluetooth/hci_mon.h67
-rw-r--r--include/net/bluetooth/hci_sock.h176
-rw-r--r--include/net/bluetooth/l2cap.h951
-rw-r--r--include/net/bluetooth/mgmt.h826
-rw-r--r--include/net/bluetooth/rfcomm.h376
-rw-r--r--include/net/bluetooth/sco.h49
-rw-r--r--local-symbols1
-rw-r--r--net/bluetooth/Kconfig3
-rw-r--r--net/bluetooth/a2mp.c4
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/ecdh_helper.c3
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/bluetooth/l2cap_core.c38
-rw-r--r--net/bluetooth/l2cap_sock.c19
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c11
-rw-r--r--net/bluetooth/sco.c12
53 files changed, 10169 insertions, 16 deletions
diff --git a/Kconfig.local b/Kconfig.local
index 3bc0ad2..6ac6651 100644
--- a/Kconfig.local
+++ b/Kconfig.local
@@ -1606,3 +1606,6 @@ config BACKPORTED_R8822BE
config BACKPORTED_RTLWIFI_DEBUG_ST
tristate
default RTLWIFI_DEBUG_ST
+config BACKPORTED_SERIAL_DEV_BUS
+ tristate
+ default SERIAL_DEV_BUS
diff --git a/Kconfig.sources b/Kconfig.sources
index 9ad83db..373876e 100644
--- a/Kconfig.sources
+++ b/Kconfig.sources
@@ -8,6 +8,7 @@ source "$BACKPORT_DIR/net/bluetooth/Kconfig"
source "$BACKPORT_DIR/drivers/net/wireless/Kconfig"
#source "$BACKPORT_DIR/drivers/net/ethernet/Kconfig"
source "$BACKPORT_DIR/drivers/net/usb/Kconfig"
+source "$BACKPORT_DIR/drivers/tty/serdev/Kconfig"
source "$BACKPORT_DIR/drivers/ssb/Kconfig"
source "$BACKPORT_DIR/drivers/bcma/Kconfig"
diff --git a/Makefile.kernel b/Makefile.kernel
index 1163019..7b4164d 100644
--- a/Makefile.kernel
+++ b/Makefile.kernel
@@ -48,6 +48,7 @@ obj-$(CPTCFG_BCMA) += drivers/bcma/
obj-$(CPTCFG_USB_NET_RNDIS_WLAN) += drivers/net/usb/
obj-$(CPTCFG_NFC) += net/nfc/
obj-$(CPTCFG_NFC) += drivers/nfc/
+obj-$(CPTCFG_SERIAL_DEV_BUS) += drivers/tty/serdev/
#obj-$(CPTCFG_MEDIA_SUPPORT) += drivers/media/
obj-$(CPTCFG_USB_WDM) += drivers/usb/class/
diff --git a/backport-include/crypto/skcipher.h b/backport-include/crypto/skcipher.h
index 73ba783..900c359 100644
--- a/backport-include/crypto/skcipher.h
+++ b/backport-include/crypto/skcipher.h
@@ -441,6 +441,105 @@ static inline void skcipher_request_set_crypt(
}
#endif /* < 4.3 */
+#if LINUX_VERSION_IS_LESS(4,8,0)
+#define skcipher_walk LINUX_BACKPORT(skcipher_walk)
+struct skcipher_walk {
+ union {
+ struct {
+ struct page *page;
+ unsigned long offset;
+ } phys;
+
+ struct {
+ u8 *page;
+ void *addr;
+ } virt;
+ } src, dst;
+
+ struct scatter_walk in;
+ unsigned int nbytes;
+
+ struct scatter_walk out;
+ unsigned int total;
+
+ struct list_head buffers;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
+
+struct skcipher_alg {
+ int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct skcipher_request *req);
+ int (*decrypt)(struct skcipher_request *req);
+ int (*init)(struct crypto_skcipher *tfm);
+ void (*exit)(struct crypto_skcipher *tfm);
+
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+ unsigned int chunksize;
+ unsigned int walksize;
+
+ struct crypto_alg base;
+};
+
+struct skcipher_instance {
+ void (*free)(struct skcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct skcipher_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct skcipher_alg alg;
+ };
+};
+
+static inline unsigned int crypto_skcipher_alg_walksize(
+ struct skcipher_alg *alg)
+{
+ if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER)
+ return alg->base.cra_blocksize;
+
+ if (alg->base.cra_ablkcipher.encrypt)
+ return alg->base.cra_blocksize;
+
+ return alg->walksize;
+}
+
+static inline struct skcipher_alg *crypto_skcipher_alg(
+ struct crypto_skcipher *tfm)
+{
+ return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
+ struct skcipher_alg, base);
+}
+
+static inline unsigned int crypto_skcipher_walksize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+}
+
+static inline struct crypto_instance *skcipher_crypto_instance(
+ struct skcipher_instance *inst)
+{
+ return &inst->s.base;
+}
+
+#define CRYPTO_ALG_TYPE_SKCIPHER CRYPTO_ALG_TYPE_ABLKCIPHER
+#endif /* < 4.8 */
+
#if LINUX_VERSION_IS_LESS(4,6,0)
#define skcipher_request_zero LINUX_BACKPORT(skcipher_request_zero)
static inline void skcipher_request_zero(struct skcipher_request *req)
diff --git a/backport-include/linux/kernel.h b/backport-include/linux/kernel.h
index 332f153..c5f9216 100644
--- a/backport-include/linux/kernel.h
+++ b/backport-include/linux/kernel.h
@@ -209,6 +209,10 @@ int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *
#endif
+#if LINUX_VERSION_IS_LESS(4,5,0)
+const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
+#endif /* < 4.4 */
+
#if LINUX_VERSION_IS_LESS(3,14,0)
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
diff --git a/backport-include/linux/kref.h b/backport-include/linux/kref.h
new file mode 100644
index 0000000..631488f
--- /dev/null
+++ b/backport-include/linux/kref.h
@@ -0,0 +1,13 @@
+#ifndef __BACKPORT_LINUX_KREF_H
+#define __BACKPORT_LINUX_KREF_H
+#include_next <linux/kref.h>
+
+#if LINUX_VERSION_IS_LESS(4,11,0)
+#include <linux/refcount.h>
+static inline unsigned int kref_read(const struct kref *kref)
+{
+ return refcount_read((const refcount_t *)&kref->refcount);
+}
+#endif /* < 4.11 */
+
+#endif /* __BACKPORT_LINUX_KREF_H */
diff --git a/backport-include/linux/of_device.h b/backport-include/linux/of_device.h
index 951b253..9f621c6 100644
--- a/backport-include/linux/of_device.h
+++ b/backport-include/linux/of_device.h
@@ -3,6 +3,11 @@
#include_next <linux/of_device.h>
#include <linux/version.h>
+
+#if LINUX_VERSION_IS_LESS(4,12,0)
+ssize_t bp_of_device_modalias(struct device *dev, char *str, ssize_t len);
+#define of_device_modalias bp_of_device_modalias
+#endif
#if LINUX_VERSION_IS_LESS(4,1,0)
static inline void of_dma_configure(struct device *dev, struct device_node *np)
{}
diff --git a/backport-include/linux/skbuff.h b/backport-include/linux/skbuff.h
index 034206b..af40b8d 100644
--- a/backport-include/linux/skbuff.h
+++ b/backport-include/linux/skbuff.h
@@ -371,6 +371,23 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
{
*(u8 *)skb_put(skb, 1) = val;
}
+
+static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
+{
+ void *tmp = __skb_put(skb, len);
+
+ memset(tmp, 0, len);
+ return tmp;
+}
+
+static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
+ unsigned int len)
+{
+ void *tmp = __skb_put(skb, len);
+
+ memcpy(tmp, data, len);
+ return tmp;
+}
#endif
#endif /* __BACKPORT_SKBUFF_H */
diff --git a/backport-include/linux/string.h b/backport-include/linux/string.h
index b85d9c7..b1d26d1 100644
--- a/backport-include/linux/string.h
+++ b/backport-include/linux/string.h
@@ -29,4 +29,8 @@ void memzero_explicit(void *s, size_t count);
ssize_t strscpy(char *dest, const char *src, size_t count);
#endif
+#if LINUX_VERSION_IS_LESS(4,0,0)
+extern void kfree_const(const void *x);
+extern const char *kstrdup_const(const char *s, gfp_t gfp);
+#endif
#endif /* __BACKPORT_LINUX_STRING_H */
diff --git a/backport-include/linux/uio.h b/backport-include/linux/uio.h
new file mode 100644
index 0000000..11194c9
--- /dev/null
+++ b/backport-include/linux/uio.h
@@ -0,0 +1,11 @@
+#ifndef _BP_LINUX_UIO_H
+#define _BP_LINUX_UIO_H
+#include_next <linux/uio.h>
+
+#if LINUX_VERSION_IS_LESS(3,16,0)
+#include <linux/fs.h>
+#endif
+
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
+
+#endif /* _BP_LINUX_UIO_H */
diff --git a/backport-include/net/sock.h b/backport-include/net/sock.h
index 39bf008..cc037cf 100644
--- a/backport-include/net/sock.h
+++ b/backport-include/net/sock.h
@@ -75,4 +75,14 @@ static inline void sk_pacing_shift_update(struct sock *sk, int val)
}
#endif /* < 4.16 */
+#if LINUX_VERSION_IS_LESS(4,7,0)
+/* no reclassification while locks are held */
+static inline bool sock_allow_reclassification(const struct sock *csk)
+{
+ struct sock *sk = (struct sock *)csk;
+
+ return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+}
+#endif /* < 4.7 */
+
#endif /* __BACKPORT_NET_SOCK_H */
diff --git a/backport-include/uapi/linux/cryptouser.h b/backport-include/uapi/linux/cryptouser.h
new file mode 100644
index 0000000..ea4a9a5
--- /dev/null
+++ b/backport-include/uapi/linux/cryptouser.h
@@ -0,0 +1,12 @@
+#ifndef __BACKPORT_LINUX_CRYPTOUSER_H
+#define __BACKPORT_LINUX_CRYPTOUSER_H
+#include_next <linux/cryptouser.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_IS_LESS(4,8,0)
+struct crypto_report_kpp {
+ char type[CRYPTO_MAX_NAME];
+};
+#endif /* < 4.8 */
+
+#endif
diff --git a/compat/Kconfig b/compat/Kconfig
index f28b24c..1660136 100644
--- a/compat/Kconfig
+++ b/compat/Kconfig
@@ -113,6 +113,12 @@ config BPAUTO_CRYPTO_SKCIPHER
#c-file crypto/skcipher.c
#module-name skcipher
+config BPAUTO_CRYPTO_KPP2
+ tristate
+ depends on m
+ depends on KERNEL_4_7
+ default y if BACKPORTED_BT
+
config BPAUTO_WANT_DEV_COREDUMP
bool
@@ -175,6 +181,12 @@ config BPAUTO_REFCOUNT
#h-file linux/refcount.h
#c-file lib/refcount.c
+config BPAUTO_IOV_ITER
+ bool
+ default y
+ depends on !KERNEL_4_6
+ #c-file lib/iov_iter.c
+
config BPAUTO_SYSTEM_DATA_VERIFICATION
bool
diff --git a/compat/Makefile b/compat/Makefile
index cabfcdc..d198c2c 100644
--- a/compat/Makefile
+++ b/compat/Makefile
@@ -60,6 +60,8 @@ compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/oid.o
compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/asn1parse.o
compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/rsapubkey.asn1.o
+kpp-objs += crypto-kpp.o
+obj-$(CPTCFG_BPAUTO_CRYPTO_KPP2) += kpp.o
$(obj)/lib-oid_registry.o: $(obj)/oid_registry_data.c
$(obj)/oid_registry_data.c: $(src)/../include/linux/backport-oid_registry.h \
@@ -76,6 +78,7 @@ compat-$(CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP) += drivers-base-devcoredump.o
compat-$(CPTCFG_BPAUTO_RHASHTABLE) += lib-rhashtable.o
cordic-objs += lib-cordic.o
obj-$(CPTCFG_BPAUTO_BUILD_CORDIC) += cordic.o
+compat-$(CPTCFG_BPAUTO_IOV_ITER) += lib-iov_iter.o
compat-$(CPTCFG_BPAUTO_PUBLIC_KEY) +=
compat-$(CPTCFG_BPAUTO_BUCKET_LOCKS) += lib-bucket_locks.o
compat-$(CPTCFG_BPAUTO_PKCS7) +=
diff --git a/compat/backport-4.0.c b/compat/backport-4.0.c
index eb95082..7f86c69 100644
--- a/compat/backport-4.0.c
+++ b/compat/backport-4.0.c
@@ -17,6 +17,7 @@
#include <linux/trace_seq.h>
#include <linux/ftrace_event.h>
#include <asm/unaligned.h>
+#include <linux/slab.h>
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
@@ -375,3 +376,35 @@ ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
return ret;
}
EXPORT_SYMBOL(ftrace_print_array_seq);
+
+/**
+ * kfree_const - conditionally free memory
+ * @x: pointer to the memory
+ *
+ * Function calls kfree only if @x is not in .rodata section.
+ */
+void kfree_const(const void *x)
+{
+ /* if (!is_kernel_rodata((unsigned long)x)) */
+ kfree(x);
+}
+EXPORT_SYMBOL(kfree_const);
+
+/*
+* kstrdup_const - conditionally duplicate an existing const string
+* @s: the string to duplicate
+* @gfp: the GFP mask used in the kmalloc() call when allocating memory
+*
+* Function returns source string if it is in .rodata section otherwise it
+* fallbacks to kstrdup.
+* Strings allocated by kstrdup_const should be freed by kfree_const.
+*/
+
+const char *kstrdup_const(const char *s, gfp_t gfp)
+{
+/* if (is_kernel_rodata((unsigned long)s))
+ * return s;
+ */
+ return kstrdup(s, gfp);
+}
+EXPORT_SYMBOL(kstrdup_const);
diff --git a/compat/backport-4.10.c b/compat/backport-4.10.c
index 44e02dd..97d152b 100644
--- a/compat/backport-4.10.c
+++ b/compat/backport-4.10.c
@@ -251,4 +251,41 @@ int mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
return 0;
}
EXPORT_SYMBOL(mii_ethtool_get_link_ksettings);
+
+void *kvmalloc_node(size_t size, gfp_t flags, int node)
+{
+ gfp_t kmalloc_flags = flags;
+ void *ret;
+
+ /*
+ * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
+ * so the given set of flags has to be compatible.
+ */
+ WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
+
+ /*
+ * We want to attempt a large physically contiguous block first because
+ * it is less likely to fragment multiple larger blocks and therefore
+ * contribute to a long term fragmentation less than vmalloc fallback.
+ * However make sure that larger requests are not too disruptive - no
+ * OOM killer and no allocation failure warnings as we have a fallback.
+ */
+ if (size > PAGE_SIZE) {
+ kmalloc_flags |= __GFP_NOWARN;
+ kmalloc_flags |= __GFP_NORETRY;
+ }
+
+ ret = kmalloc_node(size, kmalloc_flags, node);
+
+ /*
+ * It doesn't really make sense to fallback to vmalloc for sub page
+ * requests
+ */
+ if (ret || size <= PAGE_SIZE)
+ return ret;
+
+ return vmalloc(size);
+}
+EXPORT_SYMBOL(kvmalloc_node);
+
#endif /* LINUX_VERSION_IS_GEQ(4,6,0) */
diff --git a/compat/backport-4.12.c b/compat/backport-4.12.c
index 2c0053f..8f3e5ab 100644
--- a/compat/backport-4.12.c
+++ b/compat/backport-4.12.c
@@ -3,6 +3,9 @@
*/
#include <net/genetlink.h>
#include <net/sock.h>
+#ifdef CONFIG_OF
+#include <linux/of_device.h>
+#endif
enum nlmsgerr_attrs {
NLMSGERR_ATTR_UNUSED,
@@ -275,3 +278,22 @@ int bp_extack_genl_unregister_family(struct genl_family *family)
return 0;
}
EXPORT_SYMBOL_GPL(bp_extack_genl_unregister_family);
+
+#ifdef CONFIG_OF
+/**
+ * of_device_modalias - Fill buffer with newline terminated modalias string
+ */
+ssize_t bp_of_device_modalias(struct device *dev, char *str, ssize_t len)
+{
+ ssize_t sl = of_device_get_modalias(dev, str, len - 2);
+ if (sl < 0)
+ return sl;
+ if (sl > len - 2)
+ return -ENOMEM;
+
+ str[sl++] = '\n';
+ str[sl] = 0;
+ return sl;
+}
+EXPORT_SYMBOL_GPL(bp_of_device_modalias);
+#endif
diff --git a/compat/backport-4.4.c b/compat/backport-4.4.c
index 1c11a20..660d50b 100644
--- a/compat/backport-4.4.c
+++ b/compat/backport-4.4.c
@@ -152,3 +152,14 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso)
}
}
EXPORT_SYMBOL(tso_start);
+
+const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap)
+{
+ if (!strchr(fmt, '%'))
+ return kstrdup_const(fmt, gfp);
+ if (!strcmp(fmt, "%s"))
+ return kstrdup_const(va_arg(ap, const char*), gfp);
+ return kvasprintf(gfp, fmt, ap);
+}
+EXPORT_SYMBOL(kvasprintf_const);
+
diff --git a/compat/crypto-kpp.c b/compat/crypto-kpp.c
new file mode 100644
index 0000000..ae11fa8
--- /dev/null
+++ b/compat/crypto-kpp.c
@@ -0,0 +1,128 @@
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <linux/compiler.h>
+#include <net/netlink.h>
+#include <crypto/backport-kpp.h>
+#include <crypto/internal/backport-kpp.h>
+
+extern unsigned int crypto_alg_extsize(struct crypto_alg *alg);
+void *crypto_alloc_tfm(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask);
+
+#define CRYPTOCFGA_REPORT_KPP __CRYPTOCFGA_MAX
+#ifdef CONFIG_NET
+static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_kpp rkpp;
+
+ strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
+ sizeof(struct crypto_report_kpp), &rkpp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
+ __maybe_unused;
+
+static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : kpp\n");
+}
+
+static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
+ struct kpp_alg *alg = crypto_kpp_alg(kpp);
+
+ alg->exit(kpp);
+}
+
+static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
+ struct kpp_alg *alg = crypto_kpp_alg(kpp);
+
+ if (alg->exit)
+ kpp->base.exit = crypto_kpp_exit_tfm;
+
+ if (alg->init)
+ return alg->init(kpp);
+
+ return 0;
+}
+
+static const struct crypto_type crypto_kpp_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_kpp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_kpp_show,
+#endif
+ .report = crypto_kpp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_KPP,
+ .tfmsize = offsetof(struct crypto_kpp, base),
+};
+
+struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
+
+static void kpp_prepare_alg(struct kpp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ base->cra_type = &crypto_kpp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+}
+
+int crypto_register_kpp(struct kpp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ kpp_prepare_alg(alg);
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_kpp);
+
+void crypto_unregister_kpp(struct kpp_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
diff --git a/compat/crypto-skcipher.c b/compat/crypto-skcipher.c
index 9c5fe5c..def0294 100644
--- a/compat/crypto-skcipher.c
+++ b/compat/crypto-skcipher.c
@@ -48,7 +48,7 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
{
return req->base.flags;
}
-
+#if LINUX_VERSION_IS_GEQ(4,8,0)
enum {
SKCIPHER_WALK_PHYS = 1 << 0,
SKCIPHER_WALK_SLOW = 1 << 1,
@@ -594,6 +594,35 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
+#endif /* LINUX_VERSION_IS_GEQ(4,8,0) */
+
+#if LINUX_VERSION_IS_LESS(4,2,0)
+
+unsigned int crypto_alg_extsize(struct crypto_alg *alg)
+{
+ return alg->cra_ctxsize +
+ (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+}
+EXPORT_SYMBOL_GPL(crypto_alg_extsize);
+
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+ u32 type, u32 mask)
+{
+#if 0
+ int ret = 0;
+ struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
+
+ if (!IS_ERR(alg)) {
+ crypto_mod_put(alg);
+ ret = 1;
+ }
+#endif
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(crypto_type_has_alg);
+
+#endif
+
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
{
if (alg->cra_type == &crypto_blkcipher_type)
@@ -945,7 +974,9 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
static const struct crypto_type crypto_skcipher_type2 = {
.extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
+#if LINUX_VERSION_IS_GEQ(4,3,0)
.free = crypto_skcipher_free_instance,
+#endif
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
@@ -959,8 +990,10 @@ static const struct crypto_type crypto_skcipher_type2 = {
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
const char *name, u32 type, u32 mask)
{
+#if 0
spawn->base.frontend = &crypto_skcipher_type2;
return crypto_grab_spawn(&spawn->base, name, type, mask);
+#endif
}
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
diff --git a/compat/lib-iov_iter.c b/compat/lib-iov_iter.c
new file mode 100644
index 0000000..929963d
--- /dev/null
+++ b/compat/lib-iov_iter.c
@@ -0,0 +1,1423 @@
+#include <linux/export.h>
+#include <linux/bvec.h>
+#include <linux/uio.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/splice.h>
+#include <net/checksum.h>
+
+#define PIPE_PARANOIA /* for now */
+
+#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
+ size_t left; \
+ size_t wanted = n; \
+ __p = i->iov; \
+ __v.iov_len = min(n, __p->iov_len - skip); \
+ if (likely(__v.iov_len)) { \
+ __v.iov_base = __p->iov_base + skip; \
+ left = (STEP); \
+ __v.iov_len -= left; \
+ skip += __v.iov_len; \
+ n -= __v.iov_len; \
+ } else { \
+ left = 0; \
+ } \
+ while (unlikely(!left && n)) { \
+ __p++; \
+ __v.iov_len = min(n, __p->iov_len); \
+ if (unlikely(!__v.iov_len)) \
+ continue; \
+ __v.iov_base = __p->iov_base; \
+ left = (STEP); \
+ __v.iov_len -= left; \
+ skip = __v.iov_len; \
+ n -= __v.iov_len; \
+ } \
+ n = wanted - n; \
+}
+
+#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
+ size_t wanted = n; \
+ __p = i->kvec; \
+ __v.iov_len = min(n, __p->iov_len - skip); \
+ if (likely(__v.iov_len)) { \
+ __v.iov_base = __p->iov_base + skip; \
+ (void)(STEP); \
+ skip += __v.iov_len; \
+ n -= __v.iov_len; \
+ } \
+ while (unlikely(n)) { \
+ __p++; \
+ __v.iov_len = min(n, __p->iov_len); \
+ if (unlikely(!__v.iov_len)) \
+ continue; \
+ __v.iov_base = __p->iov_base; \
+ (void)(STEP); \
+ skip = __v.iov_len; \
+ n -= __v.iov_len; \
+ } \
+ n = wanted; \
+}
+
+#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
+ struct bvec_iter __start; \
+ __start.bi_size = n; \
+ __start.bi_bvec_done = skip; \
+ __start.bi_idx = 0; \
+ for_each_bvec(__v, i->bvec, __bi, __start) { \
+ if (!__v.bv_len) \
+ continue; \
+ (void)(STEP); \
+ } \
+}
+
+#define iterate_all_kinds(i, n, v, I, B, K) { \
+ if (likely(n)) { \
+ size_t skip = i->iov_offset; \
+ if (unlikely(i->type & ITER_BVEC)) { \
+ struct bio_vec v; \
+ struct bvec_iter __bi; \
+ iterate_bvec(i, n, v, __bi, skip, (B)) \
+ } else if (unlikely(i->type & ITER_KVEC)) { \
+ const struct kvec *kvec; \
+ struct kvec v; \
+ iterate_kvec(i, n, v, kvec, skip, (K)) \
+ } else { \
+ const struct iovec *iov; \
+ struct iovec v; \
+ iterate_iovec(i, n, v, iov, skip, (I)) \
+ } \
+ } \
+}
+
+#define iterate_and_advance(i, n, v, I, B, K) { \
+ if (unlikely(i->count < n)) \
+ n = i->count; \
+ if (i->count) { \
+ size_t skip = i->iov_offset; \
+ if (unlikely(i->type & ITER_BVEC)) { \
+ const struct bio_vec *bvec = i->bvec; \
+ struct bio_vec v; \
+ struct bvec_iter __bi; \
+ iterate_bvec(i, n, v, __bi, skip, (B)) \
+ i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
+ i->nr_segs -= i->bvec - bvec; \
+ skip = __bi.bi_bvec_done; \
+ } else if (unlikely(i->type & ITER_KVEC)) { \
+ const struct kvec *kvec; \
+ struct kvec v; \
+ iterate_kvec(i, n, v, kvec, skip, (K)) \
+ if (skip == kvec->iov_len) { \
+ kvec++; \
+ skip = 0; \
+ } \
+ i->nr_segs -= kvec - i->kvec; \
+ i->kvec = kvec; \
+ } else { \
+ const struct iovec *iov; \
+ struct iovec v; \
+ iterate_iovec(i, n, v, iov, skip, (I)) \
+ if (skip == iov->iov_len) { \
+ iov++; \
+ skip = 0; \
+ } \
+ i->nr_segs -= iov - i->iov; \
+ i->iov = iov; \
+ } \
+ i->count -= n; \
+ i->iov_offset = skip; \
+ } \
+}
+
+static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ size_t skip, copy, left, wanted;
+ const struct iovec *iov;
+ char __user *buf;
+ void *kaddr, *from;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ might_fault();
+ wanted = bytes;
+ iov = i->iov;
+ skip = i->iov_offset;
+ buf = iov->iov_base + skip;
+ copy = min(bytes, iov->iov_len - skip);
+
+ if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
+ kaddr = kmap_atomic(page);
+ from = kaddr + offset;
+
+ /* first chunk, usually the only one */
+ left = __copy_to_user_inatomic(buf, from, copy);
+ copy -= left;
+ skip += copy;
+ from += copy;
+ bytes -= copy;
+
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_to_user_inatomic(buf, from, copy);
+ copy -= left;
+ skip = copy;
+ from += copy;
+ bytes -= copy;
+ }
+ if (likely(!bytes)) {
+ kunmap_atomic(kaddr);
+ goto done;
+ }
+ offset = from - kaddr;
+ buf += copy;
+ kunmap_atomic(kaddr);
+ copy = min(bytes, iov->iov_len - skip);
+ }
+ /* Too bad - revert to non-atomic kmap */
+
+ kaddr = kmap(page);
+ from = kaddr + offset;
+ left = __copy_to_user(buf, from, copy);
+ copy -= left;
+ skip += copy;
+ from += copy;
+ bytes -= copy;
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_to_user(buf, from, copy);
+ copy -= left;
+ skip = copy;
+ from += copy;
+ bytes -= copy;
+ }
+ kunmap(page);
+
+done:
+ if (skip == iov->iov_len) {
+ iov++;
+ skip = 0;
+ }
+ i->count -= wanted - bytes;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+
+static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ size_t skip, copy, left, wanted;
+ const struct iovec *iov;
+ char __user *buf;
+ void *kaddr, *to;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ might_fault();
+ wanted = bytes;
+ iov = i->iov;
+ skip = i->iov_offset;
+ buf = iov->iov_base + skip;
+ copy = min(bytes, iov->iov_len - skip);
+
+ if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
+ kaddr = kmap_atomic(page);
+ to = kaddr + offset;
+
+ /* first chunk, usually the only one */
+ left = __copy_from_user_inatomic(to, buf, copy);
+ copy -= left;
+ skip += copy;
+ to += copy;
+ bytes -= copy;
+
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_from_user_inatomic(to, buf, copy);
+ copy -= left;
+ skip = copy;
+ to += copy;
+ bytes -= copy;
+ }
+ if (likely(!bytes)) {
+ kunmap_atomic(kaddr);
+ goto done;
+ }
+ offset = to - kaddr;
+ buf += copy;
+ kunmap_atomic(kaddr);
+ copy = min(bytes, iov->iov_len - skip);
+ }
+ /* Too bad - revert to non-atomic kmap */
+
+ kaddr = kmap(page);
+ to = kaddr + offset;
+ left = __copy_from_user(to, buf, copy);
+ copy -= left;
+ skip += copy;
+ to += copy;
+ bytes -= copy;
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_from_user(to, buf, copy);
+ copy -= left;
+ skip = copy;
+ to += copy;
+ bytes -= copy;
+ }
+ kunmap(page);
+
+done:
+ if (skip == iov->iov_len) {
+ iov++;
+ skip = 0;
+ }
+ i->count -= wanted - bytes;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+
+#ifdef PIPE_PARANOIA
+static bool sanity(const struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ int next = pipe->curbuf + pipe->nrbufs;
+ if (i->iov_offset) {
+ struct pipe_buffer *p;
+ if (unlikely(!pipe->nrbufs))
+ goto Bad; // pipe must be non-empty
+ if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
+ goto Bad; // must be at the last buffer...
+
+ p = &pipe->bufs[idx];
+ if (unlikely(p->offset + p->len != i->iov_offset))
+ goto Bad; // ... at the end of segment
+ } else {
+ if (idx != (next & (pipe->buffers - 1)))
+ goto Bad; // must be right after the last buffer
+ }
+ return true;
+Bad:
+ printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
+ printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
+ pipe->curbuf, pipe->nrbufs, pipe->buffers);
+ for (idx = 0; idx < pipe->buffers; idx++)
+ printk(KERN_ERR "[%p %p %d %d]\n",
+ pipe->bufs[idx].ops,
+ pipe->bufs[idx].page,
+ pipe->bufs[idx].offset,
+ pipe->bufs[idx].len);
+ WARN_ON(1);
+ return false;
+}
+#else
+#define sanity(i) true
+#endif
+
+static inline int next_idx(int idx, struct pipe_inode_info *pipe)
+{
+ return (idx + 1) & (pipe->buffers - 1);
+}
+
+static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ struct pipe_buffer *buf;
+ size_t off;
+ int idx;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ if (!sanity(i))
+ return 0;
+
+ off = i->iov_offset;
+ idx = i->idx;
+ buf = &pipe->bufs[idx];
+ if (off) {
+ if (offset == off && buf->page == page) {
+ /* merge with the last one */
+ buf->len += bytes;
+ i->iov_offset += bytes;
+ goto out;
+ }
+ idx = next_idx(idx, pipe);
+ buf = &pipe->bufs[idx];
+ }
+ if (idx == pipe->curbuf && pipe->nrbufs)
+ return 0;
+ pipe->nrbufs++;
+ buf->ops = &page_cache_pipe_buf_ops;
+ get_page(buf->page = page);
+ buf->offset = offset;
+ buf->len = bytes;
+ i->iov_offset = offset + bytes;
+ i->idx = idx;
+out:
+ i->count -= bytes;
+ return bytes;
+}
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+/*
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * bytes. For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
+ * because it is an invalid address).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+ size_t skip = i->iov_offset;
+ const struct iovec *iov;
+ int err;
+ struct iovec v;
+
+ if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+ iterate_iovec(i, bytes, v, iov, skip, ({
+ err = fault_in_pages_readable(v.iov_base, v.iov_len);
+ if (unlikely(err))
+ return err;
+ 0;}))
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
+
+void iov_iter_init(struct iov_iter *i, int direction,
+ const struct iovec *iov, unsigned long nr_segs,
+ size_t count)
+{
+ /* It will get better. Eventually... */
+ if (uaccess_kernel()) {
+ direction |= ITER_KVEC;
+ i->type = direction;
+ i->kvec = (struct kvec *)iov;
+ } else {
+ i->type = direction;
+ i->iov = iov;
+ }
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_init);
+#endif
+
+static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
+{
+ char *from = kmap_atomic(page);
+ memcpy(to, from + offset, len);
+ kunmap_atomic(from);
+}
+
+static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
+{
+ char *to = kmap_atomic(page);
+ memcpy(to + offset, from, len);
+ kunmap_atomic(to);
+}
+
+static void memzero_page(struct page *page, size_t offset, size_t len)
+{
+ char *addr = kmap_atomic(page);
+ memset(addr + offset, 0, len);
+ kunmap_atomic(addr);
+}
+
+#if 0
+static inline bool allocated(struct pipe_buffer *buf)
+{
+ return buf->ops == &default_pipe_buf_ops;
+}
+
+static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
+{
+ size_t off = i->iov_offset;
+ int idx = i->idx;
+ if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
+ idx = next_idx(idx, i->pipe);
+ off = 0;
+ }
+ *idxp = idx;
+ *offp = off;
+}
+
+static size_t push_pipe(struct iov_iter *i, size_t size,
+ int *idxp, size_t *offp)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t off;
+ int idx;
+ ssize_t left;
+
+ if (unlikely(size > i->count))
+ size = i->count;
+ if (unlikely(!size))
+ return 0;
+
+ left = size;
+ data_start(i, &idx, &off);
+ *idxp = idx;
+ *offp = off;
+ if (off) {
+ left -= PAGE_SIZE - off;
+ if (left <= 0) {
+ pipe->bufs[idx].len += size;
+ return size;
+ }
+ pipe->bufs[idx].len = PAGE_SIZE;
+ idx = next_idx(idx, pipe);
+ }
+ while (idx != pipe->curbuf || !pipe->nrbufs) {
+ struct page *page = alloc_page(GFP_USER);
+ if (!page)
+ break;
+ pipe->nrbufs++;
+ pipe->bufs[idx].ops = &default_pipe_buf_ops;
+ pipe->bufs[idx].page = page;
+ pipe->bufs[idx].offset = 0;
+ if (left <= PAGE_SIZE) {
+ pipe->bufs[idx].len = left;
+ return size;
+ }
+ pipe->bufs[idx].len = PAGE_SIZE;
+ left -= PAGE_SIZE;
+ idx = next_idx(idx, pipe);
+ }
+ return size - left;
+}
+
+static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk;
+ n -= chunk;
+ addr += chunk;
+ }
+ i->count -= bytes;
+ return bytes;
+}
+#endif
+
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ const char *from = addr;
+#if 0
+ if (unlikely(i->type & ITER_PIPE))
+ return copy_pipe_to_iter(addr, bytes, i);
+#endif
+ if (iter_is_iovec(i))
+ might_fault();
+ iterate_and_advance(i, bytes, v,
+ __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
+ v.iov_len),
+ memcpy_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len),
+ memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(_copy_to_iter);
+
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ if (iter_is_iovec(i))
+ might_fault();
+ iterate_and_advance(i, bytes, v,
+ __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
+ v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(_copy_from_iter);
+
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return false;
+ }
+ if (unlikely(i->count < bytes))
+ return false;
+
+ if (iter_is_iovec(i))
+ might_fault();
+ iterate_all_kinds(i, bytes, v, ({
+ if (__copy_from_user((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len))
+ return false;
+ 0;}),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ )
+
+ iov_iter_advance(i, bytes);
+ return true;
+}
+EXPORT_SYMBOL(_copy_from_iter_full);
+
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ iterate_and_advance(i, bytes, v,
+ __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(_copy_from_iter_nocache);
+
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ iterate_and_advance(i, bytes, v,
+ __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len),
+ memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
+ v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
+#endif
+
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return false;
+ }
+ if (unlikely(i->count < bytes))
+ return false;
+ iterate_all_kinds(i, bytes, v, ({
+ if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len))
+ return false;
+ 0;}),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ )
+
+ iov_iter_advance(i, bytes);
+ return true;
+}
+EXPORT_SYMBOL(_copy_from_iter_full_nocache);
+
+static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+{
+ struct page *head = compound_head(page);
+ size_t v = n + offset + page_address(page) - page_address(head);
+
+ if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+ return true;
+ WARN_ON(1);
+ return false;
+}
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
+ if (i->type & (ITER_BVEC|ITER_KVEC)) {
+ void *kaddr = kmap_atomic(page);
+ size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
+ kunmap_atomic(kaddr);
+ return wanted;
+ } else if (likely(!(i->type & ITER_PIPE)))
+ return copy_page_to_iter_iovec(page, offset, bytes, i);
+ else
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
+}
+EXPORT_SYMBOL(copy_page_to_iter);
+
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ if (i->type & (ITER_BVEC|ITER_KVEC)) {
+ void *kaddr = kmap_atomic(page);
+ size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
+ kunmap_atomic(kaddr);
+ return wanted;
+ } else
+ return copy_page_from_iter_iovec(page, offset, bytes, i);
+}
+EXPORT_SYMBOL(copy_page_from_iter);
+#endif
+
+#if 0
+static size_t pipe_zero(size_t bytes, struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ memzero_page(pipe->bufs[idx].page, off, chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk;
+ n -= chunk;
+ }
+ i->count -= bytes;
+ return bytes;
+}
+#endif
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(i->type & ITER_PIPE))
+ return pipe_zero(bytes, i);
+ iterate_and_advance(i, bytes, v,
+ clear_user(v.iov_base, v.iov_len),
+ memzero_page(v.bv_page, v.bv_offset, v.bv_len),
+ memset(v.iov_base, 0, v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(iov_iter_zero);
+
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+ if (unlikely(!page_copy_sane(page, offset, bytes))) {
+ kunmap_atomic(kaddr);
+ return 0;
+ }
+ if (unlikely(i->type & ITER_PIPE)) {
+ kunmap_atomic(kaddr);
+ WARN_ON(1);
+ return 0;
+ }
+ iterate_all_kinds(i, bytes, v,
+ __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ )
+ kunmap_atomic(kaddr);
+ return bytes;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+#endif
+
+static inline void pipe_truncate(struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ if (pipe->nrbufs) {
+ size_t off = i->iov_offset;
+ int idx = i->idx;
+ int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+ if (off) {
+ pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
+ idx = next_idx(idx, pipe);
+ nrbufs++;
+ }
+ while (pipe->nrbufs > nrbufs) {
+ pipe_buf_release(pipe, &pipe->bufs[idx]);
+ idx = next_idx(idx, pipe);
+ pipe->nrbufs--;
+ }
+ }
+}
+
+static void pipe_advance(struct iov_iter *i, size_t size)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ if (unlikely(i->count < size))
+ size = i->count;
+ if (size) {
+ struct pipe_buffer *buf;
+ size_t off = i->iov_offset, left = size;
+ int idx = i->idx;
+ if (off) /* make it relative to the beginning of buffer */
+ left += off - pipe->bufs[idx].offset;
+ while (1) {
+ buf = &pipe->bufs[idx];
+ if (left <= buf->len)
+ break;
+ left -= buf->len;
+ idx = next_idx(idx, pipe);
+ }
+ i->idx = idx;
+ i->iov_offset = buf->offset + left;
+ }
+ i->count -= size;
+ /* ... and discard everything past that point */
+ pipe_truncate(i);
+}
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+ if (unlikely(i->type & ITER_PIPE)) {
+ pipe_advance(i, size);
+ return;
+ }
+ iterate_and_advance(i, size, v, 0, 0, 0)
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ if (WARN_ON(unroll > MAX_RW_COUNT))
+ return;
+ i->count += unroll;
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+ while (1) {
+ size_t n = off - pipe->bufs[idx].offset;
+ if (unroll < n) {
+ off -= unroll;
+ break;
+ }
+ unroll -= n;
+ if (!unroll && idx == i->start_idx) {
+ off = 0;
+ break;
+ }
+ if (!idx--)
+ idx = pipe->buffers - 1;
+ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ }
+ i->iov_offset = off;
+ i->idx = idx;
+ pipe_truncate(i);
+ return;
+ }
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (i->type & ITER_BVEC) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = i->iov;
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+ if (unlikely(i->type & ITER_PIPE))
+ return i->count; // it is a silly place, anyway
+ if (i->nr_segs == 1)
+ return i->count;
+ else if (i->type & ITER_BVEC)
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
+ else
+ return min(i->count, i->iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
+
+void iov_iter_kvec(struct iov_iter *i, int direction,
+ const struct kvec *kvec, unsigned long nr_segs,
+ size_t count)
+{
+ BUG_ON(!(direction & ITER_KVEC));
+ i->type = direction;
+ i->kvec = kvec;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_kvec);
+
+void iov_iter_bvec(struct iov_iter *i, int direction,
+ const struct bio_vec *bvec, unsigned long nr_segs,
+ size_t count)
+{
+ BUG_ON(!(direction & ITER_BVEC));
+ i->type = direction;
+ i->bvec = bvec;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_bvec);
+
+void iov_iter_pipe(struct iov_iter *i, int direction,
+ struct pipe_inode_info *pipe,
+ size_t count)
+{
+ BUG_ON(direction != ITER_PIPE);
+ WARN_ON(pipe->nrbufs == pipe->buffers);
+ i->type = direction;
+ i->pipe = pipe;
+ i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ i->iov_offset = 0;
+ i->count = count;
+ i->start_idx = i->idx;
+}
+EXPORT_SYMBOL(iov_iter_pipe);
+
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+ unsigned long res = 0;
+ size_t size = i->count;
+
+ if (unlikely(i->type & ITER_PIPE)) {
+ if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
+ return size | i->iov_offset;
+ return size;
+ }
+ iterate_all_kinds(i, size, v,
+ (res |= (unsigned long)v.iov_base | v.iov_len, 0),
+ res |= v.bv_offset | v.bv_len,
+ res |= (unsigned long)v.iov_base | v.iov_len
+ )
+ return res;
+}
+EXPORT_SYMBOL(iov_iter_alignment);
+
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+{
+ unsigned long res = 0;
+ size_t size = i->count;
+
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return ~0U;
+ }
+
+ iterate_all_kinds(i, size, v,
+ (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+ (size != v.iov_len ? size : 0), 0),
+ (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+ (size != v.bv_len ? size : 0)),
+ (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+ (size != v.iov_len ? size : 0))
+ );
+ return res;
+}
+EXPORT_SYMBOL(iov_iter_gap_alignment);
+#endif
+
+#if 0
+static inline size_t __pipe_get_pages(struct iov_iter *i,
+ size_t maxsize,
+ struct page **pages,
+ int idx,
+ size_t *start)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ ssize_t n = push_pipe(i, maxsize, &idx, start);
+ if (!n)
+ return -EFAULT;
+
+ maxsize = n;
+ n += *start;
+ while (n > 0) {
+ get_page(*pages++ = pipe->bufs[idx].page);
+ idx = next_idx(idx, pipe);
+ n -= PAGE_SIZE;
+ }
+
+ return maxsize;
+}
+
+static ssize_t pipe_get_pages(struct iov_iter *i,
+ struct page **pages, size_t maxsize, unsigned maxpages,
+ size_t *start)
+{
+ unsigned npages;
+ size_t capacity;
+ int idx;
+
+ if (!maxsize)
+ return 0;
+
+ if (!sanity(i))
+ return -EFAULT;
+
+ data_start(i, &idx, start);
+ /* some of this one + all after this one */
+ npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
+ capacity = min(npages,maxpages) * PAGE_SIZE - *start;
+
+ return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
+}
+#endif
+#if LINUX_VERSION_IS_LESS(4,7,0)
+ssize_t iov_iter_get_pages(struct iov_iter *i,
+ struct page **pages, size_t maxsize, unsigned maxpages,
+ size_t *start)
+{
+ if (maxsize > i->count)
+ maxsize = i->count;
+
+ if (unlikely(i->type & ITER_PIPE))
+ return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ iterate_all_kinds(i, maxsize, v, ({
+ unsigned long addr = (unsigned long)v.iov_base;
+ size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
+ int n;
+ int res;
+
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ addr &= ~(PAGE_SIZE - 1);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
+ if (unlikely(res < 0))
+ return res;
+ return (res == n ? len : res * PAGE_SIZE) - *start;
+ 0;}),({
+ /* can't be more than PAGE_SIZE */
+ *start = v.bv_offset;
+ get_page(*pages = v.bv_page);
+ return v.bv_len;
+ }),({
+ return -EFAULT;
+ })
+ )
+ return 0;
+}
+EXPORT_SYMBOL(iov_iter_get_pages);
+#endif
+
+static struct page **get_pages_array(size_t n)
+{
+ return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
+}
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ size_t *start)
+{
+ struct page **p;
+
+ if (maxsize > i->count)
+ maxsize = i->count;
+
+ if (unlikely(i->type & ITER_PIPE))
+ return pipe_get_pages_alloc(i, pages, maxsize, start);
+ iterate_all_kinds(i, maxsize, v, ({
+ unsigned long addr = (unsigned long)v.iov_base;
+ size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
+ int n;
+ int res;
+
+ addr &= ~(PAGE_SIZE - 1);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ p = get_pages_array(n);
+ if (!p)
+ return -ENOMEM;
+ res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
+ if (unlikely(res < 0)) {
+ kvfree(p);
+ return res;
+ }
+ *pages = p;
+ return (res == n ? len : res * PAGE_SIZE) - *start;
+ 0;}),({
+ /* can't be more than PAGE_SIZE */
+ *start = v.bv_offset;
+ *pages = p = get_pages_array(1);
+ if (!p)
+ return -ENOMEM;
+ get_page(*p = v.bv_page);
+ return v.bv_len;
+ }),({
+ return -EFAULT;
+ })
+ )
+ return 0;
+}
+EXPORT_SYMBOL(iov_iter_get_pages_alloc);
+
+size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
+ struct iov_iter *i)
+{
+ char *to = addr;
+ __wsum sum, next;
+ size_t off = 0;
+ sum = *csum;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ iterate_and_advance(i, bytes, v, ({
+ int err = 0;
+ next = csum_and_copy_from_user(v.iov_base,
+ (to += v.iov_len) - v.iov_len,
+ v.iov_len, 0, &err);
+ if (!err) {
+ sum = csum_block_add(sum, next, off);
+ off += v.iov_len;
+ }
+ err ? v.iov_len : 0;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ next = csum_partial_copy_nocheck(p + v.bv_offset,
+ (to += v.bv_len) - v.bv_len,
+ v.bv_len, 0);
+ kunmap_atomic(p);
+ sum = csum_block_add(sum, next, off);
+ off += v.bv_len;
+ }),({
+ next = csum_partial_copy_nocheck(v.iov_base,
+ (to += v.iov_len) - v.iov_len,
+ v.iov_len, 0);
+ sum = csum_block_add(sum, next, off);
+ off += v.iov_len;
+ })
+ )
+ *csum = sum;
+ return bytes;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter);
+#endif
+
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
+ struct iov_iter *i)
+{
+ char *to = addr;
+ __wsum sum, next;
+ size_t off = 0;
+ sum = *csum;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return false;
+ }
+ if (unlikely(i->count < bytes))
+ return false;
+ iterate_all_kinds(i, bytes, v, ({
+ int err = 0;
+ next = csum_and_copy_from_user(v.iov_base,
+ (to += v.iov_len) - v.iov_len,
+ v.iov_len, 0, &err);
+ if (err)
+ return false;
+ sum = csum_block_add(sum, next, off);
+ off += v.iov_len;
+ 0;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ next = csum_partial_copy_nocheck(p + v.bv_offset,
+ (to += v.bv_len) - v.bv_len,
+ v.bv_len, 0);
+ kunmap_atomic(p);
+ sum = csum_block_add(sum, next, off);
+ off += v.bv_len;
+ }),({
+ next = csum_partial_copy_nocheck(v.iov_base,
+ (to += v.iov_len) - v.iov_len,
+ v.iov_len, 0);
+ sum = csum_block_add(sum, next, off);
+ off += v.iov_len;
+ })
+ )
+ *csum = sum;
+ iov_iter_advance(i, bytes);
+ return true;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter_full);
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
+ struct iov_iter *i)
+{
+ const char *from = addr;
+ __wsum sum, next;
+ size_t off = 0;
+ sum = *csum;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1); /* for now */
+ return 0;
+ }
+ iterate_and_advance(i, bytes, v, ({
+ int err = 0;
+ next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
+ v.iov_base,
+ v.iov_len, 0, &err);
+ if (!err) {
+ sum = csum_block_add(sum, next, off);
+ off += v.iov_len;
+ }
+ err ? v.iov_len : 0;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
+ p + v.bv_offset,
+ v.bv_len, 0);
+ kunmap_atomic(p);
+ sum = csum_block_add(sum, next, off);
+ off += v.bv_len;
+ }),({
+ next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
+ v.iov_base,
+ v.iov_len, 0);
+ sum = csum_block_add(sum, next, off);
+ off += v.iov_len;
+ })
+ )
+ *csum = sum;
+ return bytes;
+}
+EXPORT_SYMBOL(csum_and_copy_to_iter);
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t size = i->count;
+ int npages = 0;
+
+ if (!size)
+ return 0;
+
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t off;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ data_start(i, &idx, &off);
+ /* some of this one + all after this one */
+ npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
+ if (npages >= maxpages)
+ return maxpages;
+ } else iterate_all_kinds(i, size, v, ({
+ unsigned long p = (unsigned long)v.iov_base;
+ npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
+ - p / PAGE_SIZE;
+ if (npages >= maxpages)
+ return maxpages;
+ 0;}),({
+ npages++;
+ if (npages >= maxpages)
+ return maxpages;
+ }),({
+ unsigned long p = (unsigned long)v.iov_base;
+ npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
+ - p / PAGE_SIZE;
+ if (npages >= maxpages)
+ return maxpages;
+ })
+ )
+ return npages;
+}
+EXPORT_SYMBOL(iov_iter_npages);
+
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
+{
+ *new = *old;
+ if (unlikely(new->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ if (new->type & ITER_BVEC)
+ return new->bvec = kmemdup(new->bvec,
+ new->nr_segs * sizeof(struct bio_vec),
+ flags);
+ else
+ /* iovec and kvec have identical layout */
+ return new->iov = kmemdup(new->iov,
+ new->nr_segs * sizeof(struct iovec),
+ flags);
+}
+EXPORT_SYMBOL(dup_iter);
+
+/**
+ * import_iovec() - Copy an array of &struct iovec from userspace
+ * into the kernel, check that it is valid, and initialize a new
+ * &struct iov_iter iterator to access it.
+ *
+ * @type: One of %READ or %WRITE.
+ * @uvector: Pointer to the userspace array.
+ * @nr_segs: Number of elements in userspace array.
+ * @fast_segs: Number of elements in @iov.
+ * @iov: (input and output parameter) Pointer to pointer to (usually small
+ * on-stack) kernel array.
+ * @i: Pointer to iterator that will be initialized on success.
+ *
+ * If the array pointed to by *@iov is large enough to hold all @nr_segs,
+ * then this function places %NULL in *@iov on return. Otherwise, a new
+ * array will be allocated and the result placed in *@iov. This means that
+ * the caller may call kfree() on *@iov regardless of whether the small
+ * on-stack array was used or not (and regardless of whether this function
+ * returns an error or not).
+ *
+ * Return: 0 on success or negative error code on error.
+ */
+int import_iovec(int type, const struct iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
+{
+ ssize_t n;
+ struct iovec *p;
+ n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
+ *iov, &p);
+ if (n < 0) {
+ if (p != *iov)
+ kfree(p);
+ *iov = NULL;
+ return n;
+ }
+ iov_iter_init(i, type, p, nr_segs, n);
+ *iov = p == *iov ? NULL : p;
+ return 0;
+}
+EXPORT_SYMBOL(import_iovec);
+#endif
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
+{
+ ssize_t n;
+ struct iovec *p;
+ n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
+ *iov, &p);
+ if (n < 0) {
+ if (p != *iov)
+ kfree(p);
+ *iov = NULL;
+ return n;
+ }
+ iov_iter_init(i, type, p, nr_segs, n);
+ *iov = p == *iov ? NULL : p;
+ return 0;
+}
+#endif
+
+#if LINUX_VERSION_IS_LESS(4,7,0)
+int import_single_range(int rw, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i)
+{
+ if (len > MAX_RW_COUNT)
+ len = MAX_RW_COUNT;
+ if (unlikely(!access_ok(!rw, buf, len)))
+ return -EFAULT;
+
+ iov->iov_base = buf;
+ iov->iov_len = len;
+ iov_iter_init(i, rw, iov, 1, len);
+ return 0;
+}
+EXPORT_SYMBOL(import_single_range);
+#endif
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 8f0e8d8..9b8bda8 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -30,7 +30,7 @@ obj-$(CPTCFG_BT_HCIUART_NOKIA) += hci_nokia.o
obj-$(CPTCFG_BT_HCIRSI) += btrsi.o
btmrvl-y := btmrvl_main.o
-btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
+btmrvl-$(CPTCFG_DEBUG_FS) += btmrvl_debugfs.o
hci_uart-y := hci_ldisc.o
hci_uart-$(CPTCFG_BT_HCIUART_SERDEV) += hci_serdev.o
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 888bac4..26ea804 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1310,6 +1310,7 @@ rdwr_status btmrvl_sdio_rdwr_firmware(struct btmrvl_private *priv,
return RDWR_STATUS_SUCCESS;
}
+#if LINUX_VERSION_IS_GEQ(4,16,0)
/* This function dump sdio register and memory data */
static void btmrvl_sdio_coredump(struct device *dev)
{
@@ -1493,6 +1494,7 @@ done:
dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
}
+#endif
static int btmrvl_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
@@ -1721,7 +1723,9 @@ static struct sdio_driver bt_mrvl_sdio = {
.remove = btmrvl_sdio_remove,
.drv = {
.owner = THIS_MODULE,
+#if LINUX_VERSION_IS_GEQ(4,16,0)
.coredump = btmrvl_sdio_coredump,
+#endif
.pm = &btmrvl_sdio_pm_ops,
}
};
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
new file mode 100644
index 0000000..50bb1c6
--- /dev/null
+++ b/drivers/tty/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CPTCFG_SERIAL_DEV_BUS) += serdev/
diff --git a/drivers/tty/serdev/Kconfig b/drivers/tty/serdev/Kconfig
new file mode 100644
index 0000000..8f4a442
--- /dev/null
+++ b/drivers/tty/serdev/Kconfig
@@ -0,0 +1,25 @@
+#
+# Serial bus device driver configuration
+#
+menuconfig SERIAL_DEV_BUS
+ tristate "Serial device bus"
+ depends on !KERNEL_3_3
+ help
+ Core support for devices connected via a serial port.
+
+ Note that you typically also want to enable TTY port controller support.
+
+if SERIAL_DEV_BUS
+
+config SERIAL_DEV_CTRL_TTYPORT
+ bool "Serial device TTY port controller"
+ help
+ Say Y here if you want to use the Serial device bus with common TTY
+ drivers (e.g. serial drivers).
+
+ If unsure, say Y.
+ depends on TTY
+ depends on SERIAL_DEV_BUS != m
+ default y
+
+endif
diff --git a/drivers/tty/serdev/Makefile b/drivers/tty/serdev/Makefile
new file mode 100644
index 0000000..6c366bf
--- /dev/null
+++ b/drivers/tty/serdev/Makefile
@@ -0,0 +1,5 @@
+serdev-objs := core.o
+
+obj-$(CPTCFG_SERIAL_DEV_BUS) += serdev.o
+
+obj-$(CPTCFG_SERIAL_DEV_CTRL_TTYPORT) += serdev-ttyport.o
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
new file mode 100644
index 0000000..87f4e28
--- /dev/null
+++ b/drivers/tty/serdev/core.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ *
+ * Based on drivers/spmi/spmi.c:
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+static bool is_registered;
+static DEFINE_IDA(ctrl_ida);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int len;
+
+#if LINUX_VERSION_IS_GEQ(3,14,0)
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+#endif
+
+ return of_device_modalias(dev, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *serdev_device_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(serdev_device);
+
+static int serdev_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int rc;
+
+ /* TODO: platform modalias */
+
+#if LINUX_VERSION_IS_GEQ(3,14,0)
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+#endif
+
+ return of_device_uevent_modalias(dev, env);
+}
+
+static void serdev_device_release(struct device *dev)
+{
+ struct serdev_device *serdev = to_serdev_device(dev);
+ kfree(serdev);
+}
+
+static const struct device_type serdev_device_type = {
+ .groups = serdev_device_groups,
+ .uevent = serdev_device_uevent,
+ .release = serdev_device_release,
+};
+
+static bool is_serdev_device(const struct device *dev)
+{
+ return dev->type == &serdev_device_type;
+}
+
+static void serdev_ctrl_release(struct device *dev)
+{
+ struct serdev_controller *ctrl = to_serdev_controller(dev);
+ ida_simple_remove(&ctrl_ida, ctrl->nr);
+ kfree(ctrl);
+}
+
+static const struct device_type serdev_ctrl_type = {
+ .release = serdev_ctrl_release,
+};
+
+static int serdev_device_match(struct device *dev, struct device_driver *drv)
+{
+ if (!is_serdev_device(dev))
+ return 0;
+
+ /* TODO: platform matching */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ return of_driver_match_device(dev, drv);
+}
+
+/**
+ * serdev_device_add() - add a device previously constructed via serdev_device_alloc()
+ * @serdev: serdev_device to be added
+ */
+int serdev_device_add(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+ struct device *parent = serdev->dev.parent;
+ int err;
+
+ dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
+
+ /* Only a single slave device is currently supported. */
+ if (ctrl->serdev) {
+ dev_err(&serdev->dev, "controller busy\n");
+ return -EBUSY;
+ }
+ ctrl->serdev = serdev;
+
+ err = device_add(&serdev->dev);
+ if (err < 0) {
+ dev_err(&serdev->dev, "Can't add %s, status %d\n",
+ dev_name(&serdev->dev), err);
+ goto err_clear_serdev;
+ }
+
+ dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
+
+ return 0;
+
+err_clear_serdev:
+ ctrl->serdev = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(serdev_device_add);
+
+/**
+ * serdev_device_remove(): remove an serdev device
+ * @serdev: serdev_device to be removed
+ */
+void serdev_device_remove(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ device_unregister(&serdev->dev);
+ ctrl->serdev = NULL;
+}
+EXPORT_SYMBOL_GPL(serdev_device_remove);
+
+int serdev_device_open(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->open)
+ return -EINVAL;
+
+ return ctrl->ops->open(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_open);
+
+void serdev_device_close(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->close)
+ return;
+
+ ctrl->ops->close(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_close);
+
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+ serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+ struct serdev_device **dr;
+ int ret;
+
+ dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = serdev;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
+void serdev_device_write_wakeup(struct serdev_device *serdev)
+{
+ complete(&serdev->write_comp);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
+
+int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->write_buf)
+ return -EINVAL;
+
+ return ctrl->ops->write_buf(ctrl, buf, count);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_buf);
+
+int serdev_device_write(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count,
+ unsigned long timeout)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+ int ret;
+
+ if (!ctrl || !ctrl->ops->write_buf ||
+ (timeout && !serdev->ops->write_wakeup))
+ return -EINVAL;
+
+ mutex_lock(&serdev->write_lock);
+ do {
+ reinit_completion(&serdev->write_comp);
+
+ ret = ctrl->ops->write_buf(ctrl, buf, count);
+ if (ret < 0)
+ break;
+
+ buf += ret;
+ count -= ret;
+
+ } while (count &&
+ (timeout = wait_for_completion_timeout(&serdev->write_comp,
+ timeout)));
+ mutex_unlock(&serdev->write_lock);
+ return ret < 0 ? ret : (count ? -ETIMEDOUT : 0);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write);
+
+void serdev_device_write_flush(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->write_flush)
+ return;
+
+ ctrl->ops->write_flush(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_flush);
+
+int serdev_device_write_room(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->write_room)
+ return 0;
+
+ return serdev->ctrl->ops->write_room(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_room);
+
+unsigned int serdev_device_set_baudrate(struct serdev_device *serdev, unsigned int speed)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_baudrate)
+ return 0;
+
+ return ctrl->ops->set_baudrate(ctrl, speed);
+
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_baudrate);
+
+void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_flow_control)
+ return;
+
+ ctrl->ops->set_flow_control(ctrl, enable);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_flow_control);
+
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_parity)
+ return -ENOTSUPP;
+
+ return ctrl->ops->set_parity(ctrl, parity);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_parity);
+
+void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->wait_until_sent)
+ return;
+
+ ctrl->ops->wait_until_sent(ctrl, timeout);
+}
+EXPORT_SYMBOL_GPL(serdev_device_wait_until_sent);
+
+int serdev_device_get_tiocm(struct serdev_device *serdev)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->get_tiocm)
+ return -ENOTSUPP;
+
+ return ctrl->ops->get_tiocm(ctrl);
+}
+EXPORT_SYMBOL_GPL(serdev_device_get_tiocm);
+
+int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_tiocm)
+ return -ENOTSUPP;
+
+ return ctrl->ops->set_tiocm(ctrl, set, clear);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_tiocm);
+
+static int serdev_drv_probe(struct device *dev)
+{
+ const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
+
+ return sdrv->probe(to_serdev_device(dev));
+}
+
+static int serdev_drv_remove(struct device *dev)
+{
+ const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
+ if (sdrv->remove)
+ sdrv->remove(to_serdev_device(dev));
+ return 0;
+}
+
+static struct bus_type serdev_bus_type = {
+ .name = "serial",
+ .match = serdev_device_match,
+ .probe = serdev_drv_probe,
+ .remove = serdev_drv_remove,
+};
+
+/**
+ * serdev_device_alloc() - Allocate a new serdev device
+ * @ctrl: associated controller
+ *
+ * Caller is responsible for either calling serdev_device_add() to add the
+ * newly allocated controller, or calling serdev_device_put() to discard it.
+ */
+struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
+{
+ struct serdev_device *serdev;
+
+ serdev = kzalloc(sizeof(*serdev), GFP_KERNEL);
+ if (!serdev)
+ return NULL;
+
+ serdev->ctrl = ctrl;
+ device_initialize(&serdev->dev);
+ serdev->dev.parent = &ctrl->dev;
+ serdev->dev.bus = &serdev_bus_type;
+ serdev->dev.type = &serdev_device_type;
+ init_completion(&serdev->write_comp);
+ mutex_init(&serdev->write_lock);
+ return serdev;
+}
+EXPORT_SYMBOL_GPL(serdev_device_alloc);
+
+/**
+ * serdev_controller_alloc() - Allocate a new serdev controller
+ * @parent: parent device
+ * @size: size of private data
+ *
+ * Caller is responsible for either calling serdev_controller_add() to add the
+ * newly allocated controller, or calling serdev_controller_put() to discard it.
+ * The allocated private data region may be accessed via
+ * serdev_controller_get_drvdata()
+ */
+struct serdev_controller *serdev_controller_alloc(struct device *parent,
+ size_t size)
+{
+ struct serdev_controller *ctrl;
+ int id;
+
+ if (WARN_ON(!parent))
+ return NULL;
+
+ ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
+ if (!ctrl)
+ return NULL;
+
+ id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(parent,
+ "unable to allocate serdev controller identifier.\n");
+ goto err_free;
+ }
+
+ ctrl->nr = id;
+
+ device_initialize(&ctrl->dev);
+ ctrl->dev.type = &serdev_ctrl_type;
+ ctrl->dev.bus = &serdev_bus_type;
+ ctrl->dev.parent = parent;
+ ctrl->dev.of_node = parent->of_node;
+ serdev_controller_set_drvdata(ctrl, &ctrl[1]);
+
+ dev_set_name(&ctrl->dev, "serial%d", id);
+
+ dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
+ return ctrl;
+
+err_free:
+ kfree(ctrl);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(serdev_controller_alloc);
+
+static int of_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ struct device_node *node;
+ struct serdev_device *serdev = NULL;
+ int err;
+ bool found = false;
+
+ for_each_available_child_of_node(ctrl->dev.of_node, node) {
+ if (!of_get_property(node, "compatible", NULL))
+ continue;
+
+ dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
+
+ serdev = serdev_device_alloc(ctrl);
+ if (!serdev)
+ continue;
+
+ serdev->dev.of_node = node;
+
+ err = serdev_device_add(serdev);
+ if (err) {
+ dev_err(&serdev->dev,
+ "failure adding device. status %d\n", err);
+ serdev_device_put(serdev);
+ } else
+ found = true;
+ }
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
+ struct acpi_device *adev)
+{
+ struct serdev_device *serdev = NULL;
+ int err;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return AE_OK;
+
+ serdev = serdev_device_alloc(ctrl);
+ if (!serdev) {
+ dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
+ }
+
+ ACPI_COMPANION_SET(&serdev->dev, adev);
+ acpi_device_set_enumerated(adev);
+
+ err = serdev_device_add(serdev);
+ if (err) {
+ dev_err(&serdev->dev,
+ "failure adding ACPI serdev device. status %d\n", err);
+ serdev_device_put(serdev);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct serdev_controller *ctrl = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ return acpi_serdev_register_device(ctrl, adev);
+}
+
+static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(ctrl->dev.parent);
+ if (!handle)
+ return -ENODEV;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_serdev_add_device, NULL, ctrl, NULL);
+ if (ACPI_FAILURE(status))
+ dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
+
+ if (!ctrl->serdev)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int acpi_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI */
+
+/**
+ * serdev_controller_add() - Add an serdev controller
+ * @ctrl: controller to be registered.
+ *
+ * Register a controller previously allocated via serdev_controller_alloc() with
+ * the serdev core.
+ */
+int serdev_controller_add(struct serdev_controller *ctrl)
+{
+ int ret_of, ret_acpi, ret;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!is_registered))
+ return -EAGAIN;
+
+ ret = device_add(&ctrl->dev);
+ if (ret)
+ return ret;
+
+ ret_of = of_serdev_register_devices(ctrl);
+ ret_acpi = acpi_serdev_register_devices(ctrl);
+ if (ret_of && ret_acpi) {
+ dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
+ ret_of, ret_acpi);
+ ret = -ENODEV;
+ goto out_dev_del;
+ }
+
+ dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
+ ctrl->nr, &ctrl->dev);
+ return 0;
+
+out_dev_del:
+ device_del(&ctrl->dev);
+ return ret;
+};
+EXPORT_SYMBOL_GPL(serdev_controller_add);
+
+/* Remove a device associated with a controller */
+static int serdev_remove_device(struct device *dev, void *data)
+{
+ struct serdev_device *serdev = to_serdev_device(dev);
+ if (dev->type == &serdev_device_type)
+ serdev_device_remove(serdev);
+ return 0;
+}
+
+/**
+ * serdev_controller_remove(): remove an serdev controller
+ * @ctrl: controller to remove
+ *
+ * Remove a serdev controller. Caller is responsible for calling
+ * serdev_controller_put() to discard the allocated controller.
+ */
+void serdev_controller_remove(struct serdev_controller *ctrl)
+{
+ int dummy;
+
+ if (!ctrl)
+ return;
+
+ dummy = device_for_each_child(&ctrl->dev, NULL,
+ serdev_remove_device);
+ device_del(&ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(serdev_controller_remove);
+
+/**
+ * serdev_driver_register() - Register client driver with serdev core
+ * @sdrv: client driver to be associated with client-device.
+ *
+ * This API will register the client driver with the serdev framework.
+ * It is typically called from the driver's module-init function.
+ */
+int __serdev_device_driver_register(struct serdev_device_driver *sdrv, struct module *owner)
+{
+ sdrv->driver.bus = &serdev_bus_type;
+ sdrv->driver.owner = owner;
+
+#if LINUX_VERSION_IS_GEQ(3,14,0)
+ /* force drivers to async probe so I/O is possible in probe */
+ sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
+#endif
+
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
+
+static void __exit serdev_exit(void)
+{
+ bus_unregister(&serdev_bus_type);
+ ida_destroy(&ctrl_ida);
+}
+module_exit(serdev_exit);
+
+static int __init serdev_init(void)
+{
+ int ret;
+
+ ret = bus_register(&serdev_bus_type);
+ if (ret)
+ return ret;
+
+ is_registered = true;
+ return 0;
+}
+/* Must be before serial drivers register */
+postcore_initcall(serdev_init);
+
+MODULE_AUTHOR("Rob Herring <robh@kernel.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Serial attached device bus");
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
new file mode 100644
index 0000000..fa16729
--- /dev/null
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ */
+#include <linux/kernel.h>
+#include <linux/serdev.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/poll.h>
+
+#define SERPORT_ACTIVE 1
+
+struct serport {
+ struct tty_port *port;
+ struct tty_struct *tty;
+ struct tty_driver *tty_drv;
+ int tty_idx;
+ unsigned long flags;
+};
+
+/*
+ * Callback functions from the tty port.
+ */
+
+static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
+ const unsigned char *fp, size_t count)
+{
+ struct serdev_controller *ctrl = port->client_data;
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ int ret;
+
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
+ return 0;
+
+ ret = serdev_controller_receive_buf(ctrl, cp, count);
+
+ dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
+ "receive_buf returns %d (count = %zu)\n",
+ ret, count);
+ if (ret < 0)
+ return 0;
+ else if (ret > count)
+ return count;
+
+ return ret;
+}
+
+static void ttyport_write_wakeup(struct tty_port *port)
+{
+ struct serdev_controller *ctrl = port->client_data;
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(port);
+ if (!tty)
+ return;
+
+ if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
+ test_bit(SERPORT_ACTIVE, &serport->flags))
+ serdev_controller_write_wakeup(ctrl);
+
+ /* Wake up any tty_wait_until_sent() */
+ wake_up_interruptible(&tty->write_wait);
+
+ tty_kref_put(tty);
+}
+
+static const struct tty_port_client_operations client_ops = {
+ .receive_buf = ttyport_receive_buf,
+ .write_wakeup = ttyport_write_wakeup,
+};
+
+/*
+ * Callback functions from the serdev core.
+ */
+
+static int ttyport_write_buf(struct serdev_controller *ctrl, const unsigned char *data, size_t len)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
+ return 0;
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return tty->ops->write(serport->tty, data, len);
+}
+
+static void ttyport_write_flush(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ tty_driver_flush_buffer(tty);
+}
+
+static int ttyport_write_room(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ return tty_write_room(tty);
+}
+
+static int ttyport_open(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty;
+ struct ktermios ktermios;
+ int ret;
+
+ tty = tty_init_dev(serport->tty_drv, serport->tty_idx);
+ if (IS_ERR(tty))
+ return PTR_ERR(tty);
+ serport->tty = tty;
+
+ if (!tty->ops->open || !tty->ops->close) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = tty->ops->open(serport->tty, NULL);
+ if (ret)
+ goto err_close;
+
+ tty_unlock(serport->tty);
+
+ /* Bring the UART into a known 8 bits no parity hw fc state */
+ ktermios = tty->termios;
+ ktermios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP |
+ INLCR | IGNCR | ICRNL | IXON);
+ ktermios.c_oflag &= ~OPOST;
+ ktermios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+ ktermios.c_cflag &= ~(CSIZE | PARENB);
+ ktermios.c_cflag |= CS8;
+ ktermios.c_cflag |= CRTSCTS;
+ /* Hangups are not supported so make sure to ignore carrier detect. */
+ ktermios.c_cflag |= CLOCAL;
+ tty_set_termios(tty, &ktermios);
+
+ set_bit(SERPORT_ACTIVE, &serport->flags);
+
+ return 0;
+
+err_close:
+ tty->ops->close(tty, NULL);
+err_unlock:
+ tty_unlock(tty);
+ tty_release_struct(tty, serport->tty_idx);
+
+ return ret;
+}
+
+static void ttyport_close(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ clear_bit(SERPORT_ACTIVE, &serport->flags);
+
+ tty_lock(tty);
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+ tty_unlock(tty);
+
+ tty_release_struct(tty, serport->tty_idx);
+}
+
+static unsigned int ttyport_set_baudrate(struct serdev_controller *ctrl, unsigned int speed)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ ktermios.c_cflag &= ~CBAUD;
+ tty_termios_encode_baud_rate(&ktermios, speed, speed);
+
+ /* tty_set_termios() return not checked as it is always 0 */
+ tty_set_termios(tty, &ktermios);
+ return ktermios.c_ospeed;
+}
+
+static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ if (enable)
+ ktermios.c_cflag |= CRTSCTS;
+ else
+ ktermios.c_cflag &= ~CRTSCTS;
+
+ tty_set_termios(tty, &ktermios);
+}
+
+static int ttyport_set_parity(struct serdev_controller *ctrl,
+ enum serdev_parity parity)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ ktermios.c_cflag &= ~(PARENB | PARODD | CMSPAR);
+ if (parity != SERDEV_PARITY_NONE) {
+ ktermios.c_cflag |= PARENB;
+ if (parity == SERDEV_PARITY_ODD)
+ ktermios.c_cflag |= PARODD;
+ }
+
+ tty_set_termios(tty, &ktermios);
+
+ if ((tty->termios.c_cflag & (PARENB | PARODD | CMSPAR)) !=
+ (ktermios.c_cflag & (PARENB | PARODD | CMSPAR)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ tty_wait_until_sent(tty, timeout);
+}
+
+static int ttyport_get_tiocm(struct serdev_controller *ctrl)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ if (!tty->ops->tiocmget)
+ return -ENOTSUPP;
+
+ return tty->driver->ops->tiocmget(tty);
+}
+
+static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, unsigned int clear)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+
+ if (!tty->ops->tiocmset)
+ return -ENOTSUPP;
+
+ return tty->driver->ops->tiocmset(tty, set, clear);
+}
+
+static const struct serdev_controller_ops ctrl_ops = {
+ .write_buf = ttyport_write_buf,
+ .write_flush = ttyport_write_flush,
+ .write_room = ttyport_write_room,
+ .open = ttyport_open,
+ .close = ttyport_close,
+ .set_flow_control = ttyport_set_flow_control,
+ .set_parity = ttyport_set_parity,
+ .set_baudrate = ttyport_set_baudrate,
+ .wait_until_sent = ttyport_wait_until_sent,
+ .get_tiocm = ttyport_get_tiocm,
+ .set_tiocm = ttyport_set_tiocm,
+};
+
+struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx)
+{
+ const struct tty_port_client_operations *old_ops;
+ struct serdev_controller *ctrl;
+ struct serport *serport;
+ int ret;
+
+ if (!port || !drv || !parent)
+ return ERR_PTR(-ENODEV);
+
+ ctrl = serdev_controller_alloc(parent, sizeof(struct serport));
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ serport = serdev_controller_get_drvdata(ctrl);
+
+ serport->port = port;
+ serport->tty_idx = idx;
+ serport->tty_drv = drv;
+
+ ctrl->ops = &ctrl_ops;
+
+ old_ops = port->client_ops;
+ port->client_ops = &client_ops;
+ port->client_data = ctrl;
+
+ ret = serdev_controller_add(ctrl);
+ if (ret)
+ goto err_reset_data;
+
+ dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
+ return &ctrl->dev;
+
+err_reset_data:
+ port->client_data = NULL;
+ port->client_ops = old_ops;
+ serdev_controller_put(ctrl);
+
+ return ERR_PTR(ret);
+}
+
+int serdev_tty_port_unregister(struct tty_port *port)
+{
+ struct serdev_controller *ctrl = port->client_data;
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+
+ if (!serport)
+ return -ENODEV;
+
+ serdev_controller_remove(ctrl);
+ port->client_ops = NULL;
+ port->client_data = NULL;
+ serdev_controller_put(ctrl);
+
+ return 0;
+}
diff --git a/include/crypto/backport-kpp.h b/include/crypto/backport-kpp.h
new file mode 100644
index 0000000..4597d74
--- /dev/null
+++ b/include/crypto/backport-kpp.h
@@ -0,0 +1,350 @@
+#ifndef _BACKPORT_CRYPTO_KPP_H
+#define _BACKPORT_CRYPTO_KPP_H
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/crypto.h>
+#define CRYPTO_ALG_TYPE_KPP 0x0000000b
+
+/**
+ * struct kpp_request
+ *
+ * @base: Common attributes for async crypto requests
+ * @src: Source data
+ * @dst: Destination data
+ * @src_len: Size of the input buffer
+ * @dst_len: Size of the output buffer. It needs to be at least
+ * as big as the expected result depending on the operation
+ * After operation it will be updated with the actual size of the
+ * result. In case of error where the dst sgl size was insufficient,
+ * it will be updated to the size required for the operation.
+ * @__ctx: Start of private context data
+ */
+struct kpp_request {
+ struct crypto_async_request base;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int src_len;
+ unsigned int dst_len;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_kpp - user-instantiated object which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_kpp {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct kpp_alg - generic key-agreement protocol primitives
+ *
+ * @set_secret: Function invokes the protocol specific function to
+ * store the secret private key along with parameters.
+ * The implementation knows how to decode the buffer
+ * @generate_public_key: Function generate the public key to be sent to the
+ * counterpart. In case of error, where output is not big
+ * enough req->dst_len will be updated to the size
+ * required
+ * @compute_shared_secret: Function compute the shared secret as defined by
+ * the algorithm. The result is given back to the user.
+ * In case of error, where output is not big enough,
+ * req->dst_len will be updated to the size required
+ * @max_size: Function returns the size of the output buffer
+ * @init: Initialize the object. This is called only once at
+ * instantiation time. In case the cryptographic hardware
+ * needs to be initialized. Software fallback should be
+ * put in place here.
+ * @exit: Undo everything @init did.
+ *
+ * @reqsize: Request context size required by algorithm
+ * implementation
+ * @base: Common crypto API algorithm data structure
+ */
+struct kpp_alg {
+ int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
+ unsigned int len);
+ int (*generate_public_key)(struct kpp_request *req);
+ int (*compute_shared_secret)(struct kpp_request *req);
+
+ unsigned int (*max_size)(struct crypto_kpp *tfm);
+
+ int (*init)(struct crypto_kpp *tfm);
+ void (*exit)(struct crypto_kpp *tfm);
+
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Key-agreement Protocol Primitives API
+ *
+ * The KPP API is used with the algorithm type
+ * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_kpp() - allocate KPP tfm handle
+ * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh")
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for kpp algorithm. The returned struct crypto_kpp
+ * is required for any following API invocation
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case of
+ * an error, PTR_ERR() returns the error code.
+ */
+struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct kpp_alg, base);
+}
+
+static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_kpp, base);
+}
+
+static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
+{
+ return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
+{
+ return crypto_kpp_alg(tfm)->reqsize;
+}
+
+static inline void kpp_request_set_tfm(struct kpp_request *req,
+ struct crypto_kpp *tfm)
+{
+ req->base.tfm = crypto_kpp_tfm(tfm);
+}
+
+static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req)
+{
+ return __crypto_kpp_tfm(req->base.tfm);
+}
+
+static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_get_flags(crypto_kpp_tfm(tfm));
+}
+
+static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags);
+}
+
+/**
+ * crypto_free_kpp() - free KPP tfm handle
+ *
+ * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ */
+static inline void crypto_free_kpp(struct crypto_kpp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm));
+}
+
+/**
+ * kpp_request_alloc() - allocates kpp request
+ *
+ * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ * @gfp: allocation flags
+ *
+ * Return: allocated handle in case of success or NULL in case of an error.
+ */
+static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm,
+ gfp_t gfp)
+{
+ struct kpp_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp);
+ if (likely(req))
+ kpp_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * kpp_request_free() - zeroize and free kpp request
+ *
+ * @req: request to free
+ */
+static inline void kpp_request_free(struct kpp_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * kpp_request_set_callback() - Sets an asynchronous callback.
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req: request that the callback will be set for
+ * @flgs: specify for instance if the operation may backlog
+ * @cmpl: callback which will be called
+ * @data: private data used by the caller
+ */
+static inline void kpp_request_set_callback(struct kpp_request *req,
+ u32 flgs,
+ crypto_completion_t cmpl,
+ void *data)
+{
+ req->base.complete = cmpl;
+ req->base.data = data;
+ req->base.flags = flgs;
+}
+
+/**
+ * kpp_request_set_input() - Sets input buffer
+ *
+ * Sets parameters required by generate_public_key
+ *
+ * @req: kpp request
+ * @input: ptr to input scatter list
+ * @input_len: size of the input scatter list
+ */
+static inline void kpp_request_set_input(struct kpp_request *req,
+ struct scatterlist *input,
+ unsigned int input_len)
+{
+ req->src = input;
+ req->src_len = input_len;
+}
+
+/**
+ * kpp_request_set_output() - Sets output buffer
+ *
+ * Sets parameters required by kpp operation
+ *
+ * @req: kpp request
+ * @output: ptr to output scatter list
+ * @output_len: size of the output scatter list
+ */
+static inline void kpp_request_set_output(struct kpp_request *req,
+ struct scatterlist *output,
+ unsigned int output_len)
+{
+ req->dst = output;
+ req->dst_len = output_len;
+}
+
+enum {
+ CRYPTO_KPP_SECRET_TYPE_UNKNOWN,
+ CRYPTO_KPP_SECRET_TYPE_DH,
+ CRYPTO_KPP_SECRET_TYPE_ECDH,
+};
+
+/**
+ * struct kpp_secret - small header for packing secret buffer
+ *
+ * @type: define type of secret. Each kpp type will define its own
+ * @len: specify the len of the secret, include the header, that
+ * follows the struct
+ */
+struct kpp_secret {
+ unsigned short type;
+ unsigned short len;
+};
+
+/**
+ * crypto_kpp_set_secret() - Invoke kpp operation
+ *
+ * Function invokes the specific kpp operation for a given alg.
+ *
+ * @tfm: tfm handle
+ * @buffer: Buffer holding the packet representation of the private
+ * key. The structure of the packet key depends on the particular
+ * KPP implementation. Packing and unpacking helpers are provided
+ * for ECDH and DH (see the respective header files for those
+ * implementations).
+ * @len: Length of the packet private key buffer.
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
+ const void *buffer, unsigned int len)
+{
+ struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+ return alg->set_secret(tfm, buffer, len);
+}
+
+/**
+ * crypto_kpp_generate_public_key() - Invoke kpp operation
+ *
+ * Function invokes the specific kpp operation for generating the public part
+ * for a given kpp algorithm.
+ *
+ * To generate a private key, the caller should use a random number generator.
+ * The output of the requested length serves as the private key.
+ *
+ * @req: kpp key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+ return alg->generate_public_key(req);
+}
+
+/**
+ * crypto_kpp_compute_shared_secret() - Invoke kpp operation
+ *
+ * Function invokes the specific kpp operation for computing the shared secret
+ * for a given kpp algorithm.
+ *
+ * @req: kpp key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+ return alg->compute_shared_secret(req);
+}
+
+/**
+ * crypto_kpp_maxsize() - Get len for output buffer
+ *
+ * Function returns the output buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ */
+static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm)
+{
+ struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+ return alg->max_size(tfm);
+}
+#endif
diff --git a/include/crypto/internal/backport-kpp.h b/include/crypto/internal/backport-kpp.h
new file mode 100644
index 0000000..bd16788
--- /dev/null
+++ b/include/crypto/internal/backport-kpp.h
@@ -0,0 +1,64 @@
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_KPP_INT_H
+#define _CRYPTO_KPP_INT_H
+#include <crypto/backport-kpp.h>
+#include <crypto/algapi.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *kpp_request_ctx(struct kpp_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void kpp_request_complete(struct kpp_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
+{
+ return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+/**
+ * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
+ *
+ * Function registers an implementation of a key-agreement protocol primitive
+ * algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_kpp(struct kpp_alg *alg);
+
+/**
+ * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive
+ * algorithm
+ *
+ * Function unregisters an implementation of a key-agreement protocol primitive
+ * algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_kpp(struct kpp_alg *alg);
+
+#endif
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
new file mode 100644
index 0000000..f06a849
--- /dev/null
+++ b/include/linux/serdev.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_SERDEV_H
+#define _LINUX_SERDEV_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/termios.h>
+#include <linux/delay.h>
+
+struct serdev_controller;
+struct serdev_device;
+
+/*
+ * serdev device structures
+ */
+
+/**
+ * struct serdev_device_ops - Callback operations for a serdev device
+ * @receive_buf: Function called with data received from device;
+ * returns number of bytes accepted; may sleep.
+ * @write_wakeup: Function called when ready to transmit more data; must
+ * not sleep.
+ */
+struct serdev_device_ops {
+ int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ void (*write_wakeup)(struct serdev_device *);
+};
+
+/**
+ * struct serdev_device - Basic representation of an serdev device
+ * @dev: Driver model representation of the device.
+ * @nr: Device number on serdev bus.
+ * @ctrl: serdev controller managing this device.
+ * @ops: Device operations.
+ * @write_comp Completion used by serdev_device_write() internally
+ * @write_lock Lock to serialize access when writing data
+ */
+struct serdev_device {
+ struct device dev;
+ int nr;
+ struct serdev_controller *ctrl;
+ const struct serdev_device_ops *ops;
+ struct completion write_comp;
+ struct mutex write_lock;
+};
+
+static inline struct serdev_device *to_serdev_device(struct device *d)
+{
+ return container_of(d, struct serdev_device, dev);
+}
+
+/**
+ * struct serdev_device_driver - serdev slave device driver
+ * @driver: serdev device drivers should initialize name field of this
+ * structure.
+ * @probe: binds this driver to a serdev device.
+ * @remove: unbinds this driver from the serdev device.
+ */
+struct serdev_device_driver {
+ struct device_driver driver;
+ int (*probe)(struct serdev_device *);
+ void (*remove)(struct serdev_device *);
+};
+
+static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d)
+{
+ return container_of(d, struct serdev_device_driver, driver);
+}
+
+enum serdev_parity {
+ SERDEV_PARITY_NONE,
+ SERDEV_PARITY_EVEN,
+ SERDEV_PARITY_ODD,
+};
+
+/*
+ * serdev controller structures
+ */
+struct serdev_controller_ops {
+ int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ void (*write_flush)(struct serdev_controller *);
+ int (*write_room)(struct serdev_controller *);
+ int (*open)(struct serdev_controller *);
+ void (*close)(struct serdev_controller *);
+ void (*set_flow_control)(struct serdev_controller *, bool);
+ int (*set_parity)(struct serdev_controller *, enum serdev_parity);
+ unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
+ void (*wait_until_sent)(struct serdev_controller *, long);
+ int (*get_tiocm)(struct serdev_controller *);
+ int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
+};
+
+/**
+ * struct serdev_controller - interface to the serdev controller
+ * @dev: Driver model representation of the device.
+ * @nr: number identifier for this controller/bus.
+ * @serdev: Pointer to slave device for this controller.
+ * @ops: Controller operations.
+ */
+struct serdev_controller {
+ struct device dev;
+ unsigned int nr;
+ struct serdev_device *serdev;
+ const struct serdev_controller_ops *ops;
+};
+
+static inline struct serdev_controller *to_serdev_controller(struct device *d)
+{
+ return container_of(d, struct serdev_controller, dev);
+}
+
+static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev)
+{
+ return dev_get_drvdata(&serdev->dev);
+}
+
+static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void *data)
+{
+ dev_set_drvdata(&serdev->dev, data);
+}
+
+/**
+ * serdev_device_put() - decrement serdev device refcount
+ * @serdev serdev device.
+ */
+static inline void serdev_device_put(struct serdev_device *serdev)
+{
+ if (serdev)
+ put_device(&serdev->dev);
+}
+
+static inline void serdev_device_set_client_ops(struct serdev_device *serdev,
+ const struct serdev_device_ops *ops)
+{
+ serdev->ops = ops;
+}
+
+static inline
+void *serdev_controller_get_drvdata(const struct serdev_controller *ctrl)
+{
+ return ctrl ? dev_get_drvdata(&ctrl->dev) : NULL;
+}
+
+static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl,
+ void *data)
+{
+ dev_set_drvdata(&ctrl->dev, data);
+}
+
+/**
+ * serdev_controller_put() - decrement controller refcount
+ * @ctrl serdev controller.
+ */
+static inline void serdev_controller_put(struct serdev_controller *ctrl)
+{
+ if (ctrl)
+ put_device(&ctrl->dev);
+}
+
+struct serdev_device *serdev_device_alloc(struct serdev_controller *);
+int serdev_device_add(struct serdev_device *);
+void serdev_device_remove(struct serdev_device *);
+
+struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+int serdev_controller_add(struct serdev_controller *);
+void serdev_controller_remove(struct serdev_controller *);
+
+static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl)
+{
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->write_wakeup)
+ return;
+
+ serdev->ops->write_wakeup(serdev);
+}
+
+static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const unsigned char *data,
+ size_t count)
+{
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->receive_buf)
+ return 0;
+
+ return serdev->ops->receive_buf(serdev, data, count);
+}
+
+#if IS_ENABLED(CPTCFG_SERIAL_DEV_BUS)
+int serdev_device_open(struct serdev_device *);
+void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
+unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
+void serdev_device_set_flow_control(struct serdev_device *, bool);
+int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+void serdev_device_wait_until_sent(struct serdev_device *, long);
+int serdev_device_get_tiocm(struct serdev_device *);
+int serdev_device_set_tiocm(struct serdev_device *, int, int);
+void serdev_device_write_wakeup(struct serdev_device *);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
+void serdev_device_write_flush(struct serdev_device *);
+int serdev_device_write_room(struct serdev_device *);
+
+/*
+ * serdev device driver functions
+ */
+int __serdev_device_driver_register(struct serdev_device_driver *, struct module *);
+#define serdev_device_driver_register(sdrv) \
+ __serdev_device_driver_register(sdrv, THIS_MODULE)
+
+/**
+ * serdev_device_driver_unregister() - unregister an serdev client driver
+ * @sdrv: the driver to unregister
+ */
+static inline void serdev_device_driver_unregister(struct serdev_device_driver *sdrv)
+{
+ if (sdrv)
+ driver_unregister(&sdrv->driver);
+}
+
+#define module_serdev_device_driver(__serdev_device_driver) \
+ module_driver(__serdev_device_driver, serdev_device_driver_register, \
+ serdev_device_driver_unregister)
+
+#else
+static inline int serdev_device_open(struct serdev_device *sdev)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_close(struct serdev_device *sdev) {}
+static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev, unsigned int baudrate)
+{
+ return 0;
+}
+static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *buf,
+ size_t count)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
+static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
+ size_t count, unsigned long timeout)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
+static inline int serdev_device_write_room(struct serdev_device *sdev)
+{
+ return 0;
+}
+
+#define serdev_device_driver_register(x)
+#define serdev_device_driver_unregister(x)
+
+#endif /* CPTCFG_SERIAL_DEV_BUS */
+
+static inline bool serdev_device_get_cts(struct serdev_device *serdev)
+{
+ int status = serdev_device_get_tiocm(serdev);
+ return !!(status & TIOCM_CTS);
+}
+
+static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms)
+{
+ unsigned long timeout;
+ bool signal;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ while (time_is_after_jiffies(timeout)) {
+ signal = serdev_device_get_cts(serdev);
+ if (signal == state)
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable)
+{
+ if (enable)
+ return serdev_device_set_tiocm(serdev, TIOCM_RTS, 0);
+ else
+ return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
+}
+
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity);
+
+/*
+ * serdev hooks into TTY core
+ */
+struct tty_port;
+struct tty_driver;
+
+#ifdef CPTCFG_SERIAL_DEV_CTRL_TTYPORT
+struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx);
+int serdev_tty_port_unregister(struct tty_port *port);
+#else
+static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline int serdev_tty_port_unregister(struct tty_port *port)
+{
+ return -ENODEV;
+}
+#endif /* CPTCFG_SERIAL_DEV_CTRL_TTYPORT */
+
+#endif /*_LINUX_SERDEV_H */
diff --git a/include/net/bluetooth/.hci_core.h.kate-swp b/include/net/bluetooth/.hci_core.h.kate-swp
new file mode 100644
index 0000000..19bf82d
--- /dev/null
+++ b/include/net/bluetooth/.hci_core.h.kate-swp
Binary files differ
diff --git a/include/net/bluetooth/.rfcomm.h.kate-swp b/include/net/bluetooth/.rfcomm.h.kate-swp
new file mode 100644
index 0000000..3138a6c
--- /dev/null
+++ b/include/net/bluetooth/.rfcomm.h.kate-swp
Binary files differ
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
new file mode 100644
index 0000000..e49536a
--- /dev/null
+++ b/include/net/bluetooth/bluetooth.h
@@ -0,0 +1,416 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __BLUETOOTH_H
+#define __BLUETOOTH_H
+
+#include <linux/poll.h>
+#include <net/sock.h>
+#include <linux/seq_file.h>
+
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
+
+#ifndef AF_BLUETOOTH
+#define AF_BLUETOOTH 31
+#define PF_BLUETOOTH AF_BLUETOOTH
+#endif
+
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1 1
+#define BLUETOOTH_VER_1_2 2
+#define BLUETOOTH_VER_2_0 3
+
+/* Reserv for core and drivers use */
+#define BT_SKB_RESERVE 8
+
+#define BTPROTO_L2CAP 0
+#define BTPROTO_HCI 1
+#define BTPROTO_SCO 2
+#define BTPROTO_RFCOMM 3
+#define BTPROTO_BNEP 4
+#define BTPROTO_CMTP 5
+#define BTPROTO_HIDP 6
+#define BTPROTO_AVDTP 7
+
+#define SOL_HCI 0
+#define SOL_L2CAP 6
+#define SOL_SCO 17
+#define SOL_RFCOMM 18
+
+#define BT_SECURITY 4
+struct bt_security {
+ __u8 level;
+ __u8 key_size;
+};
+#define BT_SECURITY_SDP 0
+#define BT_SECURITY_LOW 1
+#define BT_SECURITY_MEDIUM 2
+#define BT_SECURITY_HIGH 3
+#define BT_SECURITY_FIPS 4
+
+#define BT_DEFER_SETUP 7
+
+#define BT_FLUSHABLE 8
+
+#define BT_FLUSHABLE_OFF 0
+#define BT_FLUSHABLE_ON 1
+
+#define BT_POWER 9
+struct bt_power {
+ __u8 force_active;
+};
+#define BT_POWER_FORCE_ACTIVE_OFF 0
+#define BT_POWER_FORCE_ACTIVE_ON 1
+
+#define BT_CHANNEL_POLICY 10
+
+/* BR/EDR only (default policy)
+ * AMP controllers cannot be used.
+ * Channel move requests from the remote device are denied.
+ * If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_BREDR_ONLY 0
+
+/* BR/EDR Preferred
+ * Allow use of AMP controllers.
+ * If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ * Channel move requests from the remote device are allowed.
+ */
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1
+
+/* AMP Preferred
+ * Allow use of AMP controllers
+ * If the L2CAP channel is currently on BR/EDR and AMP controller
+ * resources are available, initiate a channel move to AMP.
+ * Channel move requests from the remote device are allowed.
+ * If the L2CAP socket has not been connected yet, try to create
+ * and configure the channel directly on an AMP controller rather
+ * than BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
+
+#define BT_VOICE 11
+struct bt_voice {
+ __u16 setting;
+};
+
+#define BT_VOICE_TRANSPARENT 0x0003
+#define BT_VOICE_CVSD_16BIT 0x0060
+
+#define BT_SNDMTU 12
+#define BT_RCVMTU 13
+
+__printf(1, 2)
+void bt_info(const char *fmt, ...);
+__printf(1, 2)
+void bt_warn(const char *fmt, ...);
+__printf(1, 2)
+void bt_err(const char *fmt, ...);
+__printf(1, 2)
+void bt_err_ratelimited(const char *fmt, ...);
+
+#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__)
+#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
+#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
+
+#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__)
+
+#define bt_dev_info(hdev, fmt, ...) \
+ BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_warn(hdev, fmt, ...) \
+ BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_err(hdev, fmt, ...) \
+ BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_dbg(hdev, fmt, ...) \
+ BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+
+#define bt_dev_err_ratelimited(hdev, fmt, ...) \
+ BT_ERR_RATELIMITED("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+
+/* Connection and socket states */
+enum {
+ BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
+ BT_OPEN,
+ BT_BOUND,
+ BT_LISTEN,
+ BT_CONNECT,
+ BT_CONNECT2,
+ BT_CONFIG,
+ BT_DISCONN,
+ BT_CLOSED
+};
+
+/* If unused will be removed by compiler */
+static inline const char *state_to_string(int state)
+{
+ switch (state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
+
+ return "invalid state";
+}
+
+/* BD Address */
+typedef struct {
+ __u8 b[6];
+} __packed bdaddr_t;
+
+/* BD Address type */
+#define BDADDR_BREDR 0x00
+#define BDADDR_LE_PUBLIC 0x01
+#define BDADDR_LE_RANDOM 0x02
+
+static inline bool bdaddr_type_is_valid(u8 type)
+{
+ switch (type) {
+ case BDADDR_BREDR:
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool bdaddr_type_is_le(u8 type)
+{
+ switch (type) {
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
+
+/* Copy, swap, convert BD Address */
+static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
+{
+ return memcmp(ba1, ba2, sizeof(bdaddr_t));
+}
+static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
+{
+ memcpy(dst, src, sizeof(bdaddr_t));
+}
+
+void baswap(bdaddr_t *dst, const bdaddr_t *src);
+
+/* Common socket structures and functions */
+
+#define bt_sk(__sk) ((struct bt_sock *) __sk)
+
+struct bt_sock {
+ struct sock sk;
+ struct list_head accept_q;
+ struct sock *parent;
+ unsigned long flags;
+ void (*skb_msg_name)(struct sk_buff *, void *, int *);
+};
+
+enum {
+ BT_SK_DEFER_SETUP,
+ BT_SK_SUSPEND,
+};
+
+struct bt_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+#ifdef CONFIG_PROC_FS
+ int (* custom_seq_show)(struct seq_file *, void *);
+#endif
+};
+
+int bt_sock_register(int proto, const struct net_proto_family *ops);
+void bt_sock_unregister(int proto);
+void bt_sock_link(struct bt_sock_list *l, struct sock *s);
+void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+
+void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+void bt_accept_unlink(struct sock *sk);
+struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
+
+/* Skb helpers */
+struct l2cap_ctrl {
+ u8 sframe:1,
+ poll:1,
+ final:1,
+ fcs:1,
+ sar:2,
+ super:2;
+
+ u16 reqseq;
+ u16 txseq;
+ u8 retries;
+ __le16 psm;
+ bdaddr_t bdaddr;
+ struct l2cap_chan *chan;
+};
+
+struct hci_dev;
+
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb);
+
+#define HCI_REQ_START BIT(0)
+#define HCI_REQ_SKB BIT(1)
+
+struct hci_ctrl {
+ u16 opcode;
+ u8 req_flags;
+ u8 req_event;
+ union {
+ hci_req_complete_t req_complete;
+ hci_req_complete_skb_t req_complete_skb;
+ };
+};
+
+struct bt_skb_cb {
+ u8 pkt_type;
+ u8 force_active;
+ u16 expect;
+ u8 incoming:1;
+ union {
+ struct l2cap_ctrl l2cap;
+ struct hci_ctrl hci;
+ };
+};
+#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
+
+#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
+#define hci_skb_expect(skb) bt_cb((skb))->expect
+#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
+
+static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + BT_SKB_RESERVE, how);
+ if (skb)
+ skb_reserve(skb, BT_SKB_RESERVE);
+ return skb;
+}
+
+static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
+ unsigned long len, int nb, int *err)
+{
+ struct sk_buff *skb;
+
+ skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
+ if (skb)
+ skb_reserve(skb, BT_SKB_RESERVE);
+
+ if (!skb && *err)
+ return NULL;
+
+ *err = sock_error(sk);
+ if (*err)
+ goto out;
+
+ if (sk->sk_shutdown) {
+ *err = -ECONNRESET;
+ goto out;
+ }
+
+ return skb;
+
+out:
+ kfree_skb(skb);
+ return NULL;
+}
+
+int bt_to_errno(u16 code);
+
+void hci_sock_set_flag(struct sock *sk, int nr);
+void hci_sock_clear_flag(struct sock *sk, int nr);
+int hci_sock_test_flag(struct sock *sk, int nr);
+unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
+
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
+
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
+
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list *sk_list,
+ int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
+
+extern struct dentry *bt_debugfs;
+
+int l2cap_init(void);
+void l2cap_exit(void);
+
+#if IS_ENABLED(CPTCFG_BT_BREDR)
+int sco_init(void);
+void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+ return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
+int mgmt_init(void);
+void mgmt_exit(void);
+
+void bt_sock_reclassify_lock(struct sock *sk, int proto);
+
+#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
new file mode 100644
index 0000000..1668211
--- /dev/null
+++ b/include/net/bluetooth/hci.h
@@ -0,0 +1,2032 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_H
+#define __HCI_H
+
+#define HCI_MAX_ACL_SIZE 1024
+#define HCI_MAX_SCO_SIZE 255
+#define HCI_MAX_EVENT_SIZE 260
+#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+
+#define HCI_LINK_KEY_SIZE 16
+#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
+
+#define HCI_MAX_AMP_ASSOC_SIZE 672
+
+#define HCI_MAX_CSB_DATA_SIZE 252
+
+/* HCI dev events */
+#define HCI_DEV_REG 1
+#define HCI_DEV_UNREG 2
+#define HCI_DEV_UP 3
+#define HCI_DEV_DOWN 4
+#define HCI_DEV_SUSPEND 5
+#define HCI_DEV_RESUME 6
+#define HCI_DEV_OPEN 7
+#define HCI_DEV_CLOSE 8
+#define HCI_DEV_SETUP 9
+
+/* HCI notify events */
+#define HCI_NOTIFY_CONN_ADD 1
+#define HCI_NOTIFY_CONN_DEL 2
+#define HCI_NOTIFY_VOICE_SETTING 3
+
+/* HCI bus types */
+#define HCI_VIRTUAL 0
+#define HCI_USB 1
+#define HCI_PCCARD 2
+#define HCI_UART 3
+#define HCI_RS232 4
+#define HCI_PCI 5
+#define HCI_SDIO 6
+#define HCI_SPI 7
+#define HCI_I2C 8
+#define HCI_SMD 9
+
+/* HCI controller types */
+#define HCI_PRIMARY 0x00
+#define HCI_AMP 0x01
+
+/* First BR/EDR Controller shall have ID = 0 */
+#define AMP_ID_BREDR 0x00
+
+/* AMP controller types */
+#define AMP_TYPE_BREDR 0x00
+#define AMP_TYPE_80211 0x01
+
+/* AMP controller status */
+#define AMP_STATUS_POWERED_DOWN 0x00
+#define AMP_STATUS_BLUETOOTH_ONLY 0x01
+#define AMP_STATUS_NO_CAPACITY 0x02
+#define AMP_STATUS_LOW_CAPACITY 0x03
+#define AMP_STATUS_MEDIUM_CAPACITY 0x04
+#define AMP_STATUS_HIGH_CAPACITY 0x05
+#define AMP_STATUS_FULL_CAPACITY 0x06
+
+/* HCI device quirks */
+enum {
+ /* When this quirk is set, the HCI Reset command is send when
+ * closing the transport instead of when opening it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_RESET_ON_CLOSE,
+
+ /* When this quirk is set, the device is turned into a raw-only
+ * device and it will stay in unconfigured state.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_RAW_DEVICE,
+
+ /* When this quirk is set, the buffer sizes reported by
+ * HCI Read Buffer Size command are corrected if invalid.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_FIXUP_BUFFER_SIZE,
+
+ /* When this quirk is set, then a controller that does not
+ * indicate support for Inquiry Result with RSSI is assumed to
+ * support it anyway. Some early Bluetooth 1.2 controllers had
+ * wrongly configured local features that will require forcing
+ * them to enable this mode. Getting RSSI information with the
+ * inquiry responses is preferred since it allows for a better
+ * user expierence.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_FIXUP_INQUIRY_MODE,
+
+ /* When this quirk is set, then the HCI Read Local Supported
+ * Commands command is not supported. In general Bluetooth 1.2
+ * and later controllers should support this command. However
+ * some controllers indicate Bluetooth 1.2 support, but do
+ * not support this command.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_LOCAL_COMMANDS,
+
+ /* When this quirk is set, then no stored link key handling
+ * is performed. This is mainly due to the fact that the
+ * HCI Delete Stored Link Key command is advertised, but
+ * not supported.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_STORED_LINK_KEY,
+
+ /* When this quirk is set, an external configuration step
+ * is required and will be indicated with the controller
+ * configuation.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_EXTERNAL_CONFIG,
+
+ /* When this quirk is set, the public Bluetooth address
+ * initially reported by HCI Read BD Address command
+ * is considered invalid. Controller configuration is
+ * required before this device can be used.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_INVALID_BDADDR,
+
+ /* When this quirk is set, the duplicate filtering during
+ * scanning is based on Bluetooth devices addresses. To allow
+ * RSSI based updates, restart scanning if needed.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+
+ /* When this quirk is set, LE scan and BR/EDR inquiry is done
+ * simultaneously, otherwise it's interleaved.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+
+ /* When this quirk is set, the enabling of diagnostic mode is
+ * not persistent over HCI Reset. Every time the controller
+ * is brought up it needs to be reprogrammed.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_NON_PERSISTENT_DIAG,
+};
+
+/* HCI device flags */
+enum {
+ HCI_UP,
+ HCI_INIT,
+ HCI_RUNNING,
+
+ HCI_PSCAN,
+ HCI_ISCAN,
+ HCI_AUTH,
+ HCI_ENCRYPT,
+ HCI_INQUIRY,
+
+ HCI_RAW,
+
+ HCI_RESET,
+};
+
+/* HCI socket flags */
+enum {
+ HCI_SOCK_TRUSTED,
+ HCI_MGMT_INDEX_EVENTS,
+ HCI_MGMT_UNCONF_INDEX_EVENTS,
+ HCI_MGMT_EXT_INDEX_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
+ HCI_MGMT_OOB_DATA_EVENTS,
+};
+
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+ HCI_SETUP,
+ HCI_CONFIG,
+ HCI_AUTO_OFF,
+ HCI_RFKILLED,
+ HCI_MGMT,
+ HCI_BONDABLE,
+ HCI_SERVICE_CACHE,
+ HCI_KEEP_DEBUG_KEYS,
+ HCI_USE_DEBUG_KEYS,
+ HCI_UNREGISTER,
+ HCI_UNCONFIGURED,
+ HCI_USER_CHANNEL,
+ HCI_EXT_CONFIGURED,
+ HCI_LE_ADV,
+ HCI_LE_SCAN,
+ HCI_SSP_ENABLED,
+ HCI_SC_ENABLED,
+ HCI_SC_ONLY,
+ HCI_PRIVACY,
+ HCI_LIMITED_PRIVACY,
+ HCI_RPA_EXPIRED,
+ HCI_RPA_RESOLVING,
+ HCI_HS_ENABLED,
+ HCI_LE_ENABLED,
+ HCI_ADVERTISING,
+ HCI_ADVERTISING_CONNECTABLE,
+ HCI_CONNECTABLE,
+ HCI_DISCOVERABLE,
+ HCI_LIMITED_DISCOVERABLE,
+ HCI_LINK_SECURITY,
+ HCI_PERIODIC_INQ,
+ HCI_FAST_CONNECTABLE,
+ HCI_BREDR_ENABLED,
+ HCI_LE_SCAN_INTERRUPTED,
+
+ HCI_DUT_MODE,
+ HCI_VENDOR_DIAG,
+ HCI_FORCE_BREDR_SMP,
+ HCI_FORCE_STATIC_ADDR,
+
+ __HCI_NUM_FLAGS,
+};
+
+/* HCI timeouts */
+#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
+#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
+#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
+#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
+#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
+
+/* HCI data types */
+#define HCI_COMMAND_PKT 0x01
+#define HCI_ACLDATA_PKT 0x02
+#define HCI_SCODATA_PKT 0x03
+#define HCI_EVENT_PKT 0x04
+#define HCI_DIAG_PKT 0xf0
+#define HCI_VENDOR_PKT 0xff
+
+/* HCI packet types */
+#define HCI_DM1 0x0008
+#define HCI_DM3 0x0400
+#define HCI_DM5 0x4000
+#define HCI_DH1 0x0010
+#define HCI_DH3 0x0800
+#define HCI_DH5 0x8000
+
+#define HCI_HV1 0x0020
+#define HCI_HV2 0x0040
+#define HCI_HV3 0x0080
+
+#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3)
+#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK)
+
+/* eSCO packet types */
+#define ESCO_HV1 0x0001
+#define ESCO_HV2 0x0002
+#define ESCO_HV3 0x0004
+#define ESCO_EV3 0x0008
+#define ESCO_EV4 0x0010
+#define ESCO_EV5 0x0020
+#define ESCO_2EV3 0x0040
+#define ESCO_3EV3 0x0080
+#define ESCO_2EV5 0x0100
+#define ESCO_3EV5 0x0200
+
+#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+
+/* ACL flags */
+#define ACL_START_NO_FLUSH 0x00
+#define ACL_CONT 0x01
+#define ACL_START 0x02
+#define ACL_COMPLETE 0x03
+#define ACL_ACTIVE_BCAST 0x04
+#define ACL_PICO_BCAST 0x08
+
+/* Baseband links */
+#define SCO_LINK 0x00
+#define ACL_LINK 0x01
+#define ESCO_LINK 0x02
+/* Low Energy links do not have defined link type. Use invented one */
+#define LE_LINK 0x80
+#define AMP_LINK 0x81
+#define INVALID_LINK 0xff
+
+/* LMP features */
+#define LMP_3SLOT 0x01
+#define LMP_5SLOT 0x02
+#define LMP_ENCRYPT 0x04
+#define LMP_SOFFSET 0x08
+#define LMP_TACCURACY 0x10
+#define LMP_RSWITCH 0x20
+#define LMP_HOLD 0x40
+#define LMP_SNIFF 0x80
+
+#define LMP_PARK 0x01
+#define LMP_RSSI 0x02
+#define LMP_QUALITY 0x04
+#define LMP_SCO 0x08
+#define LMP_HV2 0x10
+#define LMP_HV3 0x20
+#define LMP_ULAW 0x40
+#define LMP_ALAW 0x80
+
+#define LMP_CVSD 0x01
+#define LMP_PSCHEME 0x02
+#define LMP_PCONTROL 0x04
+#define LMP_TRANSPARENT 0x08
+
+#define LMP_RSSI_INQ 0x40
+#define LMP_ESCO 0x80
+
+#define LMP_EV4 0x01
+#define LMP_EV5 0x02
+#define LMP_NO_BREDR 0x20
+#define LMP_LE 0x40
+
+#define LMP_SNIFF_SUBR 0x02
+#define LMP_PAUSE_ENC 0x04
+#define LMP_EDR_ESCO_2M 0x20
+#define LMP_EDR_ESCO_3M 0x40
+#define LMP_EDR_3S_ESCO 0x80
+
+#define LMP_EXT_INQ 0x01
+#define LMP_SIMUL_LE_BR 0x02
+#define LMP_SIMPLE_PAIR 0x08
+#define LMP_NO_FLUSH 0x40
+
+#define LMP_LSTO 0x01
+#define LMP_INQ_TX_PWR 0x02
+#define LMP_EXTFEATURES 0x80
+
+/* Extended LMP features */
+#define LMP_CSB_MASTER 0x01
+#define LMP_CSB_SLAVE 0x02
+#define LMP_SYNC_TRAIN 0x04
+#define LMP_SYNC_SCAN 0x08
+
+#define LMP_SC 0x01
+#define LMP_PING 0x02
+
+/* Host features */
+#define LMP_HOST_SSP 0x01
+#define LMP_HOST_LE 0x02
+#define LMP_HOST_LE_BREDR 0x04
+#define LMP_HOST_SC 0x08
+
+/* LE features */
+#define HCI_LE_ENCRYPTION 0x01
+#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
+#define HCI_LE_SLAVE_FEATURES 0x08
+#define HCI_LE_PING 0x10
+#define HCI_LE_DATA_LEN_EXT 0x20
+#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_CHAN_SEL_ALG2 0x40
+
+/* Connection modes */
+#define HCI_CM_ACTIVE 0x0000
+#define HCI_CM_HOLD 0x0001
+#define HCI_CM_SNIFF 0x0002
+#define HCI_CM_PARK 0x0003
+
+/* Link policies */
+#define HCI_LP_RSWITCH 0x0001
+#define HCI_LP_HOLD 0x0002
+#define HCI_LP_SNIFF 0x0004
+#define HCI_LP_PARK 0x0008
+
+/* Link modes */
+#define HCI_LM_ACCEPT 0x8000
+#define HCI_LM_MASTER 0x0001
+#define HCI_LM_AUTH 0x0002
+#define HCI_LM_ENCRYPT 0x0004
+#define HCI_LM_TRUSTED 0x0008
+#define HCI_LM_RELIABLE 0x0010
+#define HCI_LM_SECURE 0x0020
+#define HCI_LM_FIPS 0x0040
+
+/* Authentication types */
+#define HCI_AT_NO_BONDING 0x00
+#define HCI_AT_NO_BONDING_MITM 0x01
+#define HCI_AT_DEDICATED_BONDING 0x02
+#define HCI_AT_DEDICATED_BONDING_MITM 0x03
+#define HCI_AT_GENERAL_BONDING 0x04
+#define HCI_AT_GENERAL_BONDING_MITM 0x05
+
+/* I/O capabilities */
+#define HCI_IO_DISPLAY_ONLY 0x00
+#define HCI_IO_DISPLAY_YESNO 0x01
+#define HCI_IO_KEYBOARD_ONLY 0x02
+#define HCI_IO_NO_INPUT_OUTPUT 0x03
+
+/* Link Key types */
+#define HCI_LK_COMBINATION 0x00
+#define HCI_LK_LOCAL_UNIT 0x01
+#define HCI_LK_REMOTE_UNIT 0x02
+#define HCI_LK_DEBUG_COMBINATION 0x03
+#define HCI_LK_UNAUTH_COMBINATION_P192 0x04
+#define HCI_LK_AUTH_COMBINATION_P192 0x05
+#define HCI_LK_CHANGED_COMBINATION 0x06
+#define HCI_LK_UNAUTH_COMBINATION_P256 0x07
+#define HCI_LK_AUTH_COMBINATION_P256 0x08
+
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_UNKNOWN_CONN_ID 0x02
+#define HCI_ERROR_AUTH_FAILURE 0x05
+#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
+#define HCI_ERROR_MEMORY_EXCEEDED 0x07
+#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
+#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_REMOTE_USER_TERM 0x13
+#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
+#define HCI_ERROR_REMOTE_POWER_OFF 0x15
+#define HCI_ERROR_LOCAL_HOST_TERM 0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
+#define HCI_ERROR_UNSPECIFIED 0x1f
+#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+
+/* Flow control modes */
+#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
+#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
+
+/* The core spec defines 127 as the "not available" value */
+#define HCI_TX_POWER_INVALID 127
+#define HCI_RSSI_INVALID 127
+
+#define HCI_ROLE_MASTER 0x00
+#define HCI_ROLE_SLAVE 0x01
+
+/* Extended Inquiry Response field types */
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
+#define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */
+#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
+#define EIR_LE_ROLE 0x1C /* LE role */
+#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
+#define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */
+#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */
+#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */
+
+/* Low Energy Advertising Flags */
+#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
+#define LE_AD_GENERAL 0x02 /* General Discoverable */
+#define LE_AD_NO_BREDR 0x04 /* BR/EDR not supported */
+#define LE_AD_SIM_LE_BREDR_CTRL 0x08 /* Simultaneous LE & BR/EDR Controller */
+#define LE_AD_SIM_LE_BREDR_HOST 0x10 /* Simultaneous LE & BR/EDR Host */
+
+/* ----- HCI Commands ---- */
+#define HCI_OP_NOP 0x0000
+
+#define HCI_OP_INQUIRY 0x0401
+struct hci_cp_inquiry {
+ __u8 lap[3];
+ __u8 length;
+ __u8 num_rsp;
+} __packed;
+
+#define HCI_OP_INQUIRY_CANCEL 0x0402
+
+#define HCI_OP_PERIODIC_INQ 0x0403
+
+#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
+
+#define HCI_OP_CREATE_CONN 0x0405
+struct hci_cp_create_conn {
+ bdaddr_t bdaddr;
+ __le16 pkt_type;
+ __u8 pscan_rep_mode;
+ __u8 pscan_mode;
+ __le16 clock_offset;
+ __u8 role_switch;
+} __packed;
+
+#define HCI_OP_DISCONNECT 0x0406
+struct hci_cp_disconnect {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_ADD_SCO 0x0407
+struct hci_cp_add_sco {
+ __le16 handle;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_CREATE_CONN_CANCEL 0x0408
+struct hci_cp_create_conn_cancel {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_ACCEPT_CONN_REQ 0x0409
+struct hci_cp_accept_conn_req {
+ bdaddr_t bdaddr;
+ __u8 role;
+} __packed;
+
+#define HCI_OP_REJECT_CONN_REQ 0x040a
+struct hci_cp_reject_conn_req {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LINK_KEY_REPLY 0x040b
+struct hci_cp_link_key_reply {
+ bdaddr_t bdaddr;
+ __u8 link_key[HCI_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
+struct hci_cp_link_key_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_PIN_CODE_REPLY 0x040d
+struct hci_cp_pin_code_reply {
+ bdaddr_t bdaddr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __packed;
+struct hci_rp_pin_code_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
+struct hci_cp_pin_code_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_pin_code_neg_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
+struct hci_cp_change_conn_ptype {
+ __le16 handle;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_AUTH_REQUESTED 0x0411
+struct hci_cp_auth_requested {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_SET_CONN_ENCRYPT 0x0413
+struct hci_cp_set_conn_encrypt {
+ __le16 handle;
+ __u8 encrypt;
+} __packed;
+
+#define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415
+struct hci_cp_change_conn_link_key {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_REMOTE_NAME_REQ 0x0419
+struct hci_cp_remote_name_req {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_mode;
+ __le16 clock_offset;
+} __packed;
+
+#define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a
+struct hci_cp_remote_name_req_cancel {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_READ_REMOTE_FEATURES 0x041b
+struct hci_cp_read_remote_features {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c
+struct hci_cp_read_remote_ext_features {
+ __le16 handle;
+ __u8 page;
+} __packed;
+
+#define HCI_OP_READ_REMOTE_VERSION 0x041d
+struct hci_cp_read_remote_version {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_CLOCK_OFFSET 0x041f
+struct hci_cp_read_clock_offset {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_SETUP_SYNC_CONN 0x0428
+struct hci_cp_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ __le16 max_latency;
+ __le16 voice_setting;
+ __u8 retrans_effort;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
+struct hci_cp_accept_sync_conn_req {
+ bdaddr_t bdaddr;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ __le16 max_latency;
+ __le16 content_format;
+ __u8 retrans_effort;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a
+struct hci_cp_reject_sync_conn_req {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_IO_CAPABILITY_REPLY 0x042b
+struct hci_cp_io_capability_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_REPLY 0x042c
+struct hci_cp_user_confirm_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_user_confirm_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
+
+#define HCI_OP_USER_PASSKEY_REPLY 0x042e
+struct hci_cp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f
+
+#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430
+struct hci_cp_remote_oob_data_reply {
+ bdaddr_t bdaddr;
+ __u8 hash[16];
+ __u8 rand[16];
+} __packed;
+
+#define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433
+struct hci_cp_remote_oob_data_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
+struct hci_cp_io_capability_neg_reply {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_CREATE_PHY_LINK 0x0435
+struct hci_cp_create_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_ACCEPT_PHY_LINK 0x0436
+struct hci_cp_accept_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_DISCONN_PHY_LINK 0x0437
+struct hci_cp_disconn_phy_link {
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+struct ext_flow_spec {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
+#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
+struct hci_cp_create_accept_logical_link {
+ __u8 phy_handle;
+ struct ext_flow_spec tx_flow_spec;
+ struct ext_flow_spec rx_flow_spec;
+} __packed;
+
+#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
+struct hci_cp_disconn_logical_link {
+ __le16 log_handle;
+} __packed;
+
+#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
+struct hci_cp_logical_link_cancel {
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+struct hci_rp_logical_link_cancel {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_OP_SET_CSB 0x0441
+struct hci_cp_set_csb {
+ __u8 enable;
+ __u8 lt_addr;
+ __u8 lpo_allowed;
+ __le16 packet_type;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 csb_sv_tout;
+} __packed;
+struct hci_rp_set_csb {
+ __u8 status;
+ __u8 lt_addr;
+ __le16 interval;
+} __packed;
+
+#define HCI_OP_START_SYNC_TRAIN 0x0443
+
+#define HCI_OP_REMOTE_OOB_EXT_DATA_REPLY 0x0445
+struct hci_cp_remote_oob_ext_data_reply {
+ bdaddr_t bdaddr;
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+
+#define HCI_OP_SNIFF_MODE 0x0803
+struct hci_cp_sniff_mode {
+ __le16 handle;
+ __le16 max_interval;
+ __le16 min_interval;
+ __le16 attempt;
+ __le16 timeout;
+} __packed;
+
+#define HCI_OP_EXIT_SNIFF_MODE 0x0804
+struct hci_cp_exit_sniff_mode {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_ROLE_DISCOVERY 0x0809
+struct hci_cp_role_discovery {
+ __le16 handle;
+} __packed;
+struct hci_rp_role_discovery {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+} __packed;
+
+#define HCI_OP_SWITCH_ROLE 0x080b
+struct hci_cp_switch_role {
+ bdaddr_t bdaddr;
+ __u8 role;
+} __packed;
+
+#define HCI_OP_READ_LINK_POLICY 0x080c
+struct hci_cp_read_link_policy {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_link_policy {
+ __u8 status;
+ __le16 handle;
+ __le16 policy;
+} __packed;
+
+#define HCI_OP_WRITE_LINK_POLICY 0x080d
+struct hci_cp_write_link_policy {
+ __le16 handle;
+ __le16 policy;
+} __packed;
+struct hci_rp_write_link_policy {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_READ_DEF_LINK_POLICY 0x080e
+struct hci_rp_read_def_link_policy {
+ __u8 status;
+ __le16 policy;
+} __packed;
+
+#define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f
+struct hci_cp_write_def_link_policy {
+ __le16 policy;
+} __packed;
+
+#define HCI_OP_SNIFF_SUBRATE 0x0811
+struct hci_cp_sniff_subrate {
+ __le16 handle;
+ __le16 max_latency;
+ __le16 min_remote_timeout;
+ __le16 min_local_timeout;
+} __packed;
+
+#define HCI_OP_SET_EVENT_MASK 0x0c01
+
+#define HCI_OP_RESET 0x0c03
+
+#define HCI_OP_SET_EVENT_FLT 0x0c05
+struct hci_cp_set_event_flt {
+ __u8 flt_type;
+ __u8 cond_type;
+ __u8 condition[0];
+} __packed;
+
+/* Filter types */
+#define HCI_FLT_CLEAR_ALL 0x00
+#define HCI_FLT_INQ_RESULT 0x01
+#define HCI_FLT_CONN_SETUP 0x02
+
+/* CONN_SETUP Condition types */
+#define HCI_CONN_SETUP_ALLOW_ALL 0x00
+#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
+#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
+
+/* CONN_SETUP Conditions */
+#define HCI_CONN_SETUP_AUTO_OFF 0x01
+#define HCI_CONN_SETUP_AUTO_ON 0x02
+
+#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
+struct hci_cp_read_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 read_all;
+} __packed;
+struct hci_rp_read_stored_link_key {
+ __u8 status;
+ __u8 max_keys;
+ __u8 num_keys;
+} __packed;
+
+#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
+struct hci_cp_delete_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 delete_all;
+} __packed;
+struct hci_rp_delete_stored_link_key {
+ __u8 status;
+ __u8 num_keys;
+} __packed;
+
+#define HCI_MAX_NAME_LENGTH 248
+
+#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
+struct hci_cp_write_local_name {
+ __u8 name[HCI_MAX_NAME_LENGTH];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_NAME 0x0c14
+struct hci_rp_read_local_name {
+ __u8 status;
+ __u8 name[HCI_MAX_NAME_LENGTH];
+} __packed;
+
+#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
+
+#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
+
+#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
+ #define SCAN_DISABLED 0x00
+ #define SCAN_INQUIRY 0x01
+ #define SCAN_PAGE 0x02
+
+#define HCI_OP_READ_AUTH_ENABLE 0x0c1f
+
+#define HCI_OP_WRITE_AUTH_ENABLE 0x0c20
+ #define AUTH_DISABLED 0x00
+ #define AUTH_ENABLED 0x01
+
+#define HCI_OP_READ_ENCRYPT_MODE 0x0c21
+
+#define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22
+ #define ENCRYPT_DISABLED 0x00
+ #define ENCRYPT_P2P 0x01
+ #define ENCRYPT_BOTH 0x02
+
+#define HCI_OP_READ_CLASS_OF_DEV 0x0c23
+struct hci_rp_read_class_of_dev {
+ __u8 status;
+ __u8 dev_class[3];
+} __packed;
+
+#define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24
+struct hci_cp_write_class_of_dev {
+ __u8 dev_class[3];
+} __packed;
+
+#define HCI_OP_READ_VOICE_SETTING 0x0c25
+struct hci_rp_read_voice_setting {
+ __u8 status;
+ __le16 voice_setting;
+} __packed;
+
+#define HCI_OP_WRITE_VOICE_SETTING 0x0c26
+struct hci_cp_write_voice_setting {
+ __le16 voice_setting;
+} __packed;
+
+#define HCI_OP_HOST_BUFFER_SIZE 0x0c33
+struct hci_cp_host_buffer_size {
+ __le16 acl_mtu;
+ __u8 sco_mtu;
+ __le16 acl_max_pkt;
+ __le16 sco_max_pkt;
+} __packed;
+
+#define HCI_OP_READ_NUM_SUPPORTED_IAC 0x0c38
+struct hci_rp_read_num_supported_iac {
+ __u8 status;
+ __u8 num_iac;
+} __packed;
+
+#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39
+
+#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a
+struct hci_cp_write_current_iac_lap {
+ __u8 num_iac;
+ __u8 iac_lap[6];
+} __packed;
+
+#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
+
+#define HCI_MAX_EIR_LENGTH 240
+
+#define HCI_OP_WRITE_EIR 0x0c52
+struct hci_cp_write_eir {
+ __u8 fec;
+ __u8 data[HCI_MAX_EIR_LENGTH];
+} __packed;
+
+#define HCI_OP_READ_SSP_MODE 0x0c55
+struct hci_rp_read_ssp_mode {
+ __u8 status;
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_WRITE_SSP_MODE 0x0c56
+struct hci_cp_write_ssp_mode {
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_OOB_DATA 0x0c57
+struct hci_rp_read_local_oob_data {
+ __u8 status;
+ __u8 hash[16];
+ __u8 rand[16];
+} __packed;
+
+#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
+struct hci_rp_read_inq_rsp_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
+
+#define HCI_OP_READ_LOCATION_DATA 0x0c64
+
+#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
+struct hci_rp_read_flow_control_mode {
+ __u8 status;
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
+struct hci_cp_write_le_host_supported {
+ __u8 le;
+ __u8 simul;
+} __packed;
+
+#define HCI_OP_SET_RESERVED_LT_ADDR 0x0c74
+struct hci_cp_set_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_set_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75
+struct hci_cp_delete_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_delete_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_SET_CSB_DATA 0x0c76
+struct hci_cp_set_csb_data {
+ __u8 lt_addr;
+ __u8 fragment;
+ __u8 data_length;
+ __u8 data[HCI_MAX_CSB_DATA_SIZE];
+} __packed;
+struct hci_rp_set_csb_data {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_READ_SYNC_TRAIN_PARAMS 0x0c77
+
+#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78
+struct hci_cp_write_sync_train_params {
+ __le16 interval_min;
+ __le16 interval_max;
+ __le32 sync_train_tout;
+ __u8 service_data;
+} __packed;
+struct hci_rp_write_sync_train_params {
+ __u8 status;
+ __le16 sync_train_int;
+} __packed;
+
+#define HCI_OP_READ_SC_SUPPORT 0x0c79
+struct hci_rp_read_sc_support {
+ __u8 status;
+ __u8 support;
+} __packed;
+
+#define HCI_OP_WRITE_SC_SUPPORT 0x0c7a
+struct hci_cp_write_sc_support {
+ __u8 support;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d
+struct hci_rp_read_local_oob_ext_data {
+ __u8 status;
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_VERSION 0x1001
+struct hci_rp_read_local_version {
+ __u8 status;
+ __u8 hci_ver;
+ __le16 hci_rev;
+ __u8 lmp_ver;
+ __le16 manufacturer;
+ __le16 lmp_subver;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_COMMANDS 0x1002
+struct hci_rp_read_local_commands {
+ __u8 status;
+ __u8 commands[64];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_FEATURES 0x1003
+struct hci_rp_read_local_features {
+ __u8 status;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
+struct hci_cp_read_local_ext_features {
+ __u8 page;
+} __packed;
+struct hci_rp_read_local_ext_features {
+ __u8 status;
+ __u8 page;
+ __u8 max_page;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_READ_BUFFER_SIZE 0x1005
+struct hci_rp_read_buffer_size {
+ __u8 status;
+ __le16 acl_mtu;
+ __u8 sco_mtu;
+ __le16 acl_max_pkt;
+ __le16 sco_max_pkt;
+} __packed;
+
+#define HCI_OP_READ_BD_ADDR 0x1009
+struct hci_rp_read_bd_addr {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a
+struct hci_rp_read_data_block_size {
+ __u8 status;
+ __le16 max_acl_len;
+ __le16 block_len;
+ __le16 num_blocks;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODECS 0x100b
+
+#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
+struct hci_rp_read_page_scan_activity {
+ __u8 status;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
+struct hci_cp_write_page_scan_activity {
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_READ_TX_POWER 0x0c2d
+struct hci_cp_read_tx_power {
+ __le16 handle;
+ __u8 type;
+} __packed;
+struct hci_rp_read_tx_power {
+ __u8 status;
+ __le16 handle;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
+struct hci_rp_read_page_scan_type {
+ __u8 status;
+ __u8 type;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47
+ #define PAGE_SCAN_TYPE_STANDARD 0x00
+ #define PAGE_SCAN_TYPE_INTERLACED 0x01
+
+#define HCI_OP_READ_RSSI 0x1405
+struct hci_cp_read_rssi {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_rssi {
+ __u8 status;
+ __le16 handle;
+ __s8 rssi;
+} __packed;
+
+#define HCI_OP_READ_CLOCK 0x1407
+struct hci_cp_read_clock {
+ __le16 handle;
+ __u8 which;
+} __packed;
+struct hci_rp_read_clock {
+ __u8 status;
+ __le16 handle;
+ __le32 clock;
+ __le16 accuracy;
+} __packed;
+
+#define HCI_OP_READ_ENC_KEY_SIZE 0x1408
+struct hci_cp_read_enc_key_size {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_enc_key_size {
+ __u8 status;
+ __le16 handle;
+ __u8 key_size;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
+struct hci_rp_read_local_amp_info {
+ __u8 status;
+ __u8 amp_status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le32 max_pdu;
+ __u8 amp_type;
+ __le16 pal_cap;
+ __le16 max_assoc_size;
+ __le32 max_flush_to;
+ __le32 be_flush_to;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
+struct hci_cp_read_local_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 max_len;
+} __packed;
+struct hci_rp_read_local_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+ __le16 rem_len;
+ __u8 frag[0];
+} __packed;
+
+#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
+struct hci_cp_write_remote_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 rem_len;
+ __u8 frag[0];
+} __packed;
+struct hci_rp_write_remote_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
+
+#define HCI_OP_ENABLE_DUT_MODE 0x1803
+
+#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
+
+#define HCI_OP_LE_SET_EVENT_MASK 0x2001
+struct hci_cp_le_set_event_mask {
+ __u8 mask[8];
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE 0x2002
+struct hci_rp_le_read_buffer_size {
+ __u8 status;
+ __le16 le_mtu;
+ __u8 le_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_READ_LOCAL_FEATURES 0x2003
+struct hci_rp_le_read_local_features {
+ __u8 status;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005
+
+#define HCI_OP_LE_SET_ADV_PARAM 0x2006
+struct hci_cp_le_set_adv_param {
+ __le16 min_interval;
+ __le16 max_interval;
+ __u8 type;
+ __u8 own_address_type;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 channel_map;
+ __u8 filter_policy;
+} __packed;
+
+#define HCI_OP_LE_READ_ADV_TX_POWER 0x2007
+struct hci_rp_le_read_adv_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_MAX_AD_LENGTH 31
+
+#define HCI_OP_LE_SET_ADV_DATA 0x2008
+struct hci_cp_le_set_adv_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009
+struct hci_cp_le_set_scan_rsp_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
+
+#define LE_SCAN_PASSIVE 0x00
+#define LE_SCAN_ACTIVE 0x01
+
+#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
+struct hci_cp_le_set_scan_param {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+ __u8 own_address_type;
+ __u8 filter_policy;
+} __packed;
+
+#define LE_SCAN_DISABLE 0x00
+#define LE_SCAN_ENABLE 0x01
+#define LE_SCAN_FILTER_DUP_DISABLE 0x00
+#define LE_SCAN_FILTER_DUP_ENABLE 0x01
+
+#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
+struct hci_cp_le_set_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+} __packed;
+
+#define HCI_LE_USE_PEER_ADDR 0x00
+#define HCI_LE_USE_WHITELIST 0x01
+
+#define HCI_OP_LE_CREATE_CONN 0x200d
+struct hci_cp_le_create_conn {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __u8 filter_policy;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 own_address_type;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
+
+#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
+struct hci_rp_le_read_white_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010
+
+#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011
+struct hci_cp_le_add_to_white_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012
+struct hci_cp_le_del_from_white_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_CONN_UPDATE 0x2013
+struct hci_cp_le_conn_update {
+ __le16 handle;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016
+struct hci_cp_le_read_remote_features {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_START_ENC 0x2019
+struct hci_cp_le_start_enc {
+ __le16 handle;
+ __le64 rand;
+ __le16 ediv;
+ __u8 ltk[16];
+} __packed;
+
+#define HCI_OP_LE_LTK_REPLY 0x201a
+struct hci_cp_le_ltk_reply {
+ __le16 handle;
+ __u8 ltk[16];
+} __packed;
+struct hci_rp_le_ltk_reply {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_LTK_NEG_REPLY 0x201b
+struct hci_cp_le_ltk_neg_reply {
+ __le16 handle;
+} __packed;
+struct hci_rp_le_ltk_neg_reply {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_SUPPORTED_STATES 0x201c
+struct hci_rp_le_read_supported_states {
+ __u8 status;
+ __u8 le_states[8];
+} __packed;
+
+#define HCI_OP_LE_CONN_PARAM_REQ_REPLY 0x2020
+struct hci_cp_le_conn_param_req_reply {
+ __le16 handle;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 latency;
+ __le16 timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY 0x2021
+struct hci_cp_le_conn_param_req_neg_reply {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LE_SET_DATA_LEN 0x2022
+struct hci_cp_le_set_data_len {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+struct hci_rp_le_set_data_len {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_DEF_DATA_LEN 0x2023
+struct hci_rp_le_read_def_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_WRITE_DEF_DATA_LEN 0x2024
+struct hci_cp_le_write_def_data_len {
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
+struct hci_rp_le_read_max_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
+#define HCI_OP_LE_SET_DEFAULT_PHY 0x2031
+struct hci_cp_le_set_default_phy {
+ __u8 all_phys;
+ __u8 tx_phys;
+ __u8 rx_phys;
+} __packed;
+
+/* ---- HCI Events ---- */
+#define HCI_EV_INQUIRY_COMPLETE 0x01
+
+#define HCI_EV_INQUIRY_RESULT 0x02
+struct inquiry_info {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+} __packed;
+
+#define HCI_EV_CONN_COMPLETE 0x03
+struct hci_ev_conn_complete {
+ __u8 status;
+ __le16 handle;
+ bdaddr_t bdaddr;
+ __u8 link_type;
+ __u8 encr_mode;
+} __packed;
+
+#define HCI_EV_CONN_REQUEST 0x04
+struct hci_ev_conn_request {
+ bdaddr_t bdaddr;
+ __u8 dev_class[3];
+ __u8 link_type;
+} __packed;
+
+#define HCI_EV_DISCONN_COMPLETE 0x05
+struct hci_ev_disconn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_AUTH_COMPLETE 0x06
+struct hci_ev_auth_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EV_REMOTE_NAME 0x07
+struct hci_ev_remote_name {
+ __u8 status;
+ bdaddr_t bdaddr;
+ __u8 name[HCI_MAX_NAME_LENGTH];
+} __packed;
+
+#define HCI_EV_ENCRYPT_CHANGE 0x08
+struct hci_ev_encrypt_change {
+ __u8 status;
+ __le16 handle;
+ __u8 encrypt;
+} __packed;
+
+#define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09
+struct hci_ev_change_link_key_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EV_REMOTE_FEATURES 0x0b
+struct hci_ev_remote_features {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_REMOTE_VERSION 0x0c
+struct hci_ev_remote_version {
+ __u8 status;
+ __le16 handle;
+ __u8 lmp_ver;
+ __le16 manufacturer;
+ __le16 lmp_subver;
+} __packed;
+
+#define HCI_EV_QOS_SETUP_COMPLETE 0x0d
+struct hci_qos {
+ __u8 service_type;
+ __u32 token_rate;
+ __u32 peak_bandwidth;
+ __u32 latency;
+ __u32 delay_variation;
+} __packed;
+struct hci_ev_qos_setup_complete {
+ __u8 status;
+ __le16 handle;
+ struct hci_qos qos;
+} __packed;
+
+#define HCI_EV_CMD_COMPLETE 0x0e
+struct hci_ev_cmd_complete {
+ __u8 ncmd;
+ __le16 opcode;
+} __packed;
+
+#define HCI_EV_CMD_STATUS 0x0f
+struct hci_ev_cmd_status {
+ __u8 status;
+ __u8 ncmd;
+ __le16 opcode;
+} __packed;
+
+#define HCI_EV_HARDWARE_ERROR 0x10
+struct hci_ev_hardware_error {
+ __u8 code;
+} __packed;
+
+#define HCI_EV_ROLE_CHANGE 0x12
+struct hci_ev_role_change {
+ __u8 status;
+ bdaddr_t bdaddr;
+ __u8 role;
+} __packed;
+
+#define HCI_EV_NUM_COMP_PKTS 0x13
+struct hci_comp_pkts_info {
+ __le16 handle;
+ __le16 count;
+} __packed;
+
+struct hci_ev_num_comp_pkts {
+ __u8 num_hndl;
+ struct hci_comp_pkts_info handles[0];
+} __packed;
+
+#define HCI_EV_MODE_CHANGE 0x14
+struct hci_ev_mode_change {
+ __u8 status;
+ __le16 handle;
+ __u8 mode;
+ __le16 interval;
+} __packed;
+
+#define HCI_EV_PIN_CODE_REQ 0x16
+struct hci_ev_pin_code_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_LINK_KEY_REQ 0x17
+struct hci_ev_link_key_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_LINK_KEY_NOTIFY 0x18
+struct hci_ev_link_key_notify {
+ bdaddr_t bdaddr;
+ __u8 link_key[HCI_LINK_KEY_SIZE];
+ __u8 key_type;
+} __packed;
+
+#define HCI_EV_CLOCK_OFFSET 0x1c
+struct hci_ev_clock_offset {
+ __u8 status;
+ __le16 handle;
+ __le16 clock_offset;
+} __packed;
+
+#define HCI_EV_PKT_TYPE_CHANGE 0x1d
+struct hci_ev_pkt_type_change {
+ __u8 status;
+ __le16 handle;
+ __le16 pkt_type;
+} __packed;
+
+#define HCI_EV_PSCAN_REP_MODE 0x20
+struct hci_ev_pscan_rep_mode {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+} __packed;
+
+#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
+struct inquiry_info_with_rssi {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+} __packed;
+struct inquiry_info_with_rssi_and_pscan_mode {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+} __packed;
+
+#define HCI_EV_REMOTE_EXT_FEATURES 0x23
+struct hci_ev_remote_ext_features {
+ __u8 status;
+ __le16 handle;
+ __u8 page;
+ __u8 max_page;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_SYNC_CONN_COMPLETE 0x2c
+struct hci_ev_sync_conn_complete {
+ __u8 status;
+ __le16 handle;
+ bdaddr_t bdaddr;
+ __u8 link_type;
+ __u8 tx_interval;
+ __u8 retrans_window;
+ __le16 rx_pkt_len;
+ __le16 tx_pkt_len;
+ __u8 air_mode;
+} __packed;
+
+#define HCI_EV_SYNC_CONN_CHANGED 0x2d
+struct hci_ev_sync_conn_changed {
+ __u8 status;
+ __le16 handle;
+ __u8 tx_interval;
+ __u8 retrans_window;
+ __le16 rx_pkt_len;
+ __le16 tx_pkt_len;
+} __packed;
+
+#define HCI_EV_SNIFF_SUBRATE 0x2e
+struct hci_ev_sniff_subrate {
+ __u8 status;
+ __le16 handle;
+ __le16 max_tx_latency;
+ __le16 max_rx_latency;
+ __le16 max_remote_timeout;
+ __le16 max_local_timeout;
+} __packed;
+
+#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f
+struct extended_inquiry_info {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+ __u8 data[240];
+} __packed;
+
+#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
+struct hci_ev_key_refresh_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EV_IO_CAPA_REQUEST 0x31
+struct hci_ev_io_capa_request {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_IO_CAPA_REPLY 0x32
+struct hci_ev_io_capa_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_EV_USER_CONFIRM_REQUEST 0x33
+struct hci_ev_user_confirm_req {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_EV_USER_PASSKEY_REQUEST 0x34
+struct hci_ev_user_passkey_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
+struct hci_ev_remote_oob_data_request {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
+struct hci_ev_simple_pair_complete {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_USER_PASSKEY_NOTIFY 0x3b
+struct hci_ev_user_passkey_notify {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_KEYPRESS_STARTED 0
+#define HCI_KEYPRESS_ENTERED 1
+#define HCI_KEYPRESS_ERASED 2
+#define HCI_KEYPRESS_CLEARED 3
+#define HCI_KEYPRESS_COMPLETED 4
+
+#define HCI_EV_KEYPRESS_NOTIFY 0x3c
+struct hci_ev_keypress_notify {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+
+#define HCI_EV_REMOTE_HOST_FEATURES 0x3d
+struct hci_ev_remote_host_features {
+ bdaddr_t bdaddr;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_LE_META 0x3e
+struct hci_ev_le_meta {
+ __u8 subevent;
+} __packed;
+
+#define HCI_EV_PHY_LINK_COMPLETE 0x40
+struct hci_ev_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_CHANNEL_SELECTED 0x41
+struct hci_ev_channel_selected {
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
+struct hci_ev_disconn_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
+struct hci_ev_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
+struct hci_ev_disconn_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_NUM_COMP_BLOCKS 0x48
+struct hci_comp_blocks_info {
+ __le16 handle;
+ __le16 pkts;
+ __le16 blocks;
+} __packed;
+
+struct hci_ev_num_comp_blocks {
+ __le16 num_blocks;
+ __u8 num_hndl;
+ struct hci_comp_blocks_info handles[0];
+} __packed;
+
+#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
+struct hci_ev_sync_train_complete {
+ __u8 status;
+} __packed;
+
+#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+
+#define HCI_EV_LE_CONN_COMPLETE 0x01
+struct hci_ev_le_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+/* Advertising report event types */
+#define LE_ADV_IND 0x00
+#define LE_ADV_DIRECT_IND 0x01
+#define LE_ADV_SCAN_IND 0x02
+#define LE_ADV_NONCONN_IND 0x03
+#define LE_ADV_SCAN_RSP 0x04
+
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+
+#define HCI_EV_LE_ADVERTISING_REPORT 0x02
+struct hci_ev_le_advertising_info {
+ __u8 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
+#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
+struct hci_ev_le_conn_update_complete {
+ __u8 status;
+ __le16 handle;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+} __packed;
+
+#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04
+struct hci_ev_le_remote_feat_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
+#define HCI_EV_LE_LTK_REQ 0x05
+struct hci_ev_le_ltk_req {
+ __le16 handle;
+ __le64 rand;
+ __le16 ediv;
+} __packed;
+
+#define HCI_EV_LE_REMOTE_CONN_PARAM_REQ 0x06
+struct hci_ev_le_remote_conn_param_req {
+ __le16 handle;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 latency;
+ __le16 timeout;
+} __packed;
+
+#define HCI_EV_LE_DATA_LEN_CHANGE 0x07
+struct hci_ev_le_data_len_change {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
+#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
+struct hci_ev_le_direct_adv_info {
+ __u8 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __s8 rssi;
+} __packed;
+
+/* Internal events generated by Bluetooth stack */
+#define HCI_EV_STACK_INTERNAL 0xfd
+struct hci_ev_stack_internal {
+ __u16 type;
+ __u8 data[0];
+} __packed;
+
+#define HCI_EV_SI_DEVICE 0x01
+struct hci_ev_si_device {
+ __u16 event;
+ __u16 dev_id;
+} __packed;
+
+#define HCI_EV_SI_SECURITY 0x02
+struct hci_ev_si_security {
+ __u16 event;
+ __u16 proto;
+ __u16 subproto;
+ __u8 incoming;
+} __packed;
+
+/* ---- HCI Packet structures ---- */
+#define HCI_COMMAND_HDR_SIZE 3
+#define HCI_EVENT_HDR_SIZE 2
+#define HCI_ACL_HDR_SIZE 4
+#define HCI_SCO_HDR_SIZE 3
+
+struct hci_command_hdr {
+ __le16 opcode; /* OCF & OGF */
+ __u8 plen;
+} __packed;
+
+struct hci_event_hdr {
+ __u8 evt;
+ __u8 plen;
+} __packed;
+
+struct hci_acl_hdr {
+ __le16 handle; /* Handle & Flags(PB, BC) */
+ __le16 dlen;
+} __packed;
+
+struct hci_sco_hdr {
+ __le16 handle;
+ __u8 dlen;
+} __packed;
+
+static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_event_hdr *) skb->data;
+}
+
+static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_acl_hdr *) skb->data;
+}
+
+static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_sco_hdr *) skb->data;
+}
+
+/* Command opcode pack/unpack */
+#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
+#define hci_opcode_ogf(op) (op >> 10)
+#define hci_opcode_ocf(op) (op & 0x03ff)
+
+/* ACL handle and flags pack/unpack */
+#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
+#define hci_handle(h) (h & 0x0fff)
+#define hci_flags(h) (h >> 12)
+
+#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
new file mode 100644
index 0000000..893bbbb
--- /dev/null
+++ b/include/net/bluetooth/hci_core.h
@@ -0,0 +1,1545 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_CORE_H
+#define __HCI_CORE_H
+
+#include <linux/leds.h>
+#include <linux/rculist.h>
+
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sock.h>
+
+/* HCI priority */
+#define HCI_PRIO_MAX 7
+
+/* HCI Core structures */
+struct inquiry_data {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __le16 clock_offset;
+ __s8 rssi;
+ __u8 ssp_mode;
+};
+
+struct inquiry_entry {
+ struct list_head all; /* inq_cache.all */
+ struct list_head list; /* unknown or resolve */
+ enum {
+ NAME_NOT_KNOWN,
+ NAME_NEEDED,
+ NAME_PENDING,
+ NAME_KNOWN,
+ } name_state;
+ __u32 timestamp;
+ struct inquiry_data data;
+};
+
+struct discovery_state {
+ int type;
+ enum {
+ DISCOVERY_STOPPED,
+ DISCOVERY_STARTING,
+ DISCOVERY_FINDING,
+ DISCOVERY_RESOLVING,
+ DISCOVERY_STOPPING,
+ } state;
+ struct list_head all; /* All devices found during inquiry */
+ struct list_head unknown; /* Name state not known */
+ struct list_head resolve; /* Name needs to be resolved */
+ __u32 timestamp;
+ bdaddr_t last_adv_addr;
+ u8 last_adv_addr_type;
+ s8 last_adv_rssi;
+ u32 last_adv_flags;
+ u8 last_adv_data[HCI_MAX_AD_LENGTH];
+ u8 last_adv_data_len;
+ bool report_invalid_rssi;
+ bool result_filtering;
+ bool limited;
+ s8 rssi;
+ u16 uuid_count;
+ u8 (*uuids)[16];
+ unsigned long scan_start;
+ unsigned long scan_duration;
+};
+
+struct hci_conn_hash {
+ struct list_head list;
+ unsigned int acl_num;
+ unsigned int amp_num;
+ unsigned int sco_num;
+ unsigned int le_num;
+ unsigned int le_num_slave;
+};
+
+struct bdaddr_list {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+};
+
+struct bt_uuid {
+ struct list_head list;
+ u8 uuid[16];
+ u8 size;
+ u8 svc_hint;
+};
+
+struct smp_csrk {
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 type;
+ u8 val[16];
+};
+
+struct smp_ltk {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 authenticated;
+ u8 type;
+ u8 enc_size;
+ __le16 ediv;
+ __le64 rand;
+ u8 val[16];
+};
+
+struct smp_irk {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t rpa;
+ bdaddr_t bdaddr;
+ u8 addr_type;
+ u8 val[16];
+};
+
+struct link_key {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t bdaddr;
+ u8 type;
+ u8 val[HCI_LINK_KEY_SIZE];
+ u8 pin_len;
+};
+
+struct oob_data {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 present;
+ u8 hash192[16];
+ u8 rand192[16];
+ u8 hash256[16];
+ u8 rand256[16];
+};
+
+struct adv_info {
+ struct list_head list;
+ bool pending;
+ __u8 instance;
+ __u32 flags;
+ __u16 timeout;
+ __u16 remaining_time;
+ __u16 duration;
+ __u16 adv_data_len;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u16 scan_rsp_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+};
+
+#define HCI_MAX_ADV_INSTANCES 5
+#define HCI_DEFAULT_ADV_DURATION 2
+
+#define HCI_MAX_SHORT_NAME_LENGTH 10
+
+/* Default LE RPA expiry time, 15 minutes */
+#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
+
+/* Default min/max age of connection information (1s/3s) */
+#define DEFAULT_CONN_INFO_MIN_AGE 1000
+#define DEFAULT_CONN_INFO_MAX_AGE 3000
+
+struct amp_assoc {
+ __u16 len;
+ __u16 offset;
+ __u16 rem_len;
+ __u16 len_so_far;
+ __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
+};
+
+#define HCI_MAX_PAGES 3
+
+struct hci_dev {
+ struct list_head list;
+ struct mutex lock;
+
+ char name[8];
+ unsigned long flags;
+ __u16 id;
+ __u8 bus;
+ __u8 dev_type;
+ bdaddr_t bdaddr;
+ bdaddr_t setup_addr;
+ bdaddr_t public_addr;
+ bdaddr_t random_addr;
+ bdaddr_t static_addr;
+ __u8 adv_addr_type;
+ __u8 dev_name[HCI_MAX_NAME_LENGTH];
+ __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
+ __u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
+ __u8 dev_class[3];
+ __u8 major_class;
+ __u8 minor_class;
+ __u8 max_page;
+ __u8 features[HCI_MAX_PAGES][8];
+ __u8 le_features[8];
+ __u8 le_white_list_size;
+ __u8 le_states[8];
+ __u8 commands[64];
+ __u8 hci_ver;
+ __u16 hci_rev;
+ __u8 lmp_ver;
+ __u16 manufacturer;
+ __u16 lmp_subver;
+ __u16 voice_setting;
+ __u8 num_iac;
+ __u8 stored_max_keys;
+ __u8 stored_num_keys;
+ __u8 io_capability;
+ __s8 inq_tx_power;
+ __u16 page_scan_interval;
+ __u16 page_scan_window;
+ __u8 page_scan_type;
+ __u8 le_adv_channel_map;
+ __u16 le_adv_min_interval;
+ __u16 le_adv_max_interval;
+ __u8 le_scan_type;
+ __u16 le_scan_interval;
+ __u16 le_scan_window;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __u16 le_conn_latency;
+ __u16 le_supv_timeout;
+ __u16 le_def_tx_len;
+ __u16 le_def_tx_time;
+ __u16 le_max_tx_len;
+ __u16 le_max_tx_time;
+ __u16 le_max_rx_len;
+ __u16 le_max_rx_time;
+ __u16 discov_interleaved_timeout;
+ __u16 conn_info_min_age;
+ __u16 conn_info_max_age;
+ __u8 ssp_debug_mode;
+ __u8 hw_error_code;
+ __u32 clock;
+
+ __u16 devid_source;
+ __u16 devid_vendor;
+ __u16 devid_product;
+ __u16 devid_version;
+
+ __u16 pkt_type;
+ __u16 esco_type;
+ __u16 link_policy;
+ __u16 link_mode;
+
+ __u32 idle_timeout;
+ __u16 sniff_min_interval;
+ __u16 sniff_max_interval;
+
+ __u8 amp_status;
+ __u32 amp_total_bw;
+ __u32 amp_max_bw;
+ __u32 amp_min_latency;
+ __u32 amp_max_pdu;
+ __u8 amp_type;
+ __u16 amp_pal_cap;
+ __u16 amp_assoc_size;
+ __u32 amp_max_flush_to;
+ __u32 amp_be_flush_to;
+
+ struct amp_assoc loc_assoc;
+
+ __u8 flow_ctl_mode;
+
+ unsigned int auto_accept_delay;
+
+ unsigned long quirks;
+
+ atomic_t cmd_cnt;
+ unsigned int acl_cnt;
+ unsigned int sco_cnt;
+ unsigned int le_cnt;
+
+ unsigned int acl_mtu;
+ unsigned int sco_mtu;
+ unsigned int le_mtu;
+ unsigned int acl_pkts;
+ unsigned int sco_pkts;
+ unsigned int le_pkts;
+
+ __u16 block_len;
+ __u16 block_mtu;
+ __u16 num_blocks;
+ __u16 block_cnt;
+
+ unsigned long acl_last_tx;
+ unsigned long sco_last_tx;
+ unsigned long le_last_tx;
+
+ struct workqueue_struct *workqueue;
+ struct workqueue_struct *req_workqueue;
+
+ struct work_struct power_on;
+ struct delayed_work power_off;
+ struct work_struct error_reset;
+
+ __u16 discov_timeout;
+ struct delayed_work discov_off;
+
+ struct delayed_work service_cache;
+
+ struct delayed_work cmd_timer;
+
+ struct work_struct rx_work;
+ struct work_struct cmd_work;
+ struct work_struct tx_work;
+
+ struct work_struct discov_update;
+ struct work_struct bg_scan_update;
+ struct work_struct scan_update;
+ struct work_struct connectable_update;
+ struct work_struct discoverable_update;
+ struct delayed_work le_scan_disable;
+ struct delayed_work le_scan_restart;
+
+ struct sk_buff_head rx_q;
+ struct sk_buff_head raw_q;
+ struct sk_buff_head cmd_q;
+
+ struct sk_buff *sent_cmd;
+
+ struct mutex req_lock;
+ wait_queue_head_t req_wait_q;
+ __u32 req_status;
+ __u32 req_result;
+ struct sk_buff *req_skb;
+
+ void *smp_data;
+ void *smp_bredr_data;
+
+ struct discovery_state discovery;
+ struct hci_conn_hash conn_hash;
+
+ struct list_head mgmt_pending;
+ struct list_head blacklist;
+ struct list_head whitelist;
+ struct list_head uuids;
+ struct list_head link_keys;
+ struct list_head long_term_keys;
+ struct list_head identity_resolving_keys;
+ struct list_head remote_oob_data;
+ struct list_head le_white_list;
+ struct list_head le_conn_params;
+ struct list_head pend_le_conns;
+ struct list_head pend_le_reports;
+
+ struct hci_dev_stats stat;
+
+ atomic_t promisc;
+
+ const char *hw_info;
+ const char *fw_info;
+ struct dentry *debugfs;
+
+ struct device dev;
+
+ struct rfkill *rfkill;
+
+ DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
+
+ __s8 adv_tx_power;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data_len;
+
+ struct list_head adv_instances;
+ unsigned int adv_instance_cnt;
+ __u8 cur_adv_instance;
+ __u16 adv_instance_timeout;
+ struct delayed_work adv_instance_expire;
+
+ __u8 irk[16];
+ __u32 rpa_timeout;
+ struct delayed_work rpa_expired;
+ bdaddr_t rpa;
+
+#if IS_ENABLED(CONFIG_BT_LEDS)
+ struct led_trigger *power_led;
+#endif
+
+ int (*open)(struct hci_dev *hdev);
+ int (*close)(struct hci_dev *hdev);
+ int (*flush)(struct hci_dev *hdev);
+ int (*setup)(struct hci_dev *hdev);
+ int (*shutdown)(struct hci_dev *hdev);
+ int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
+ void (*notify)(struct hci_dev *hdev, unsigned int evt);
+ void (*hw_error)(struct hci_dev *hdev, u8 code);
+ int (*post_init)(struct hci_dev *hdev);
+ int (*set_diag)(struct hci_dev *hdev, bool enable);
+ int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+};
+
+#define HCI_PHY_HANDLE(handle) (handle & 0xff)
+
+struct hci_conn {
+ struct list_head list;
+
+ atomic_t refcnt;
+
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
+ bdaddr_t init_addr;
+ __u8 init_addr_type;
+ bdaddr_t resp_addr;
+ __u8 resp_addr_type;
+ __u16 handle;
+ __u16 state;
+ __u8 mode;
+ __u8 type;
+ __u8 role;
+ bool out;
+ __u8 attempt;
+ __u8 dev_class[3];
+ __u8 features[HCI_MAX_PAGES][8];
+ __u16 pkt_type;
+ __u16 link_policy;
+ __u8 key_type;
+ __u8 auth_type;
+ __u8 sec_level;
+ __u8 pending_sec_level;
+ __u8 pin_length;
+ __u8 enc_key_size;
+ __u8 io_capability;
+ __u32 passkey_notify;
+ __u8 passkey_entered;
+ __u16 disc_timeout;
+ __u16 conn_timeout;
+ __u16 setting;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __u16 le_conn_interval;
+ __u16 le_conn_latency;
+ __u16 le_supv_timeout;
+ __u8 le_adv_data[HCI_MAX_AD_LENGTH];
+ __u8 le_adv_data_len;
+ __s8 rssi;
+ __s8 tx_power;
+ __s8 max_tx_power;
+ unsigned long flags;
+
+ __u32 clock;
+ __u16 clock_accuracy;
+
+ unsigned long conn_info_timestamp;
+
+ __u8 remote_cap;
+ __u8 remote_auth;
+ __u8 remote_id;
+
+ unsigned int sent;
+
+ struct sk_buff_head data_q;
+ struct list_head chan_list;
+
+ struct delayed_work disc_work;
+ struct delayed_work auto_accept_work;
+ struct delayed_work idle_work;
+ struct delayed_work le_conn_timeout;
+ struct work_struct le_scan_cleanup;
+
+ struct device dev;
+ struct dentry *debugfs;
+
+ struct hci_dev *hdev;
+ void *l2cap_data;
+ void *sco_data;
+ struct amp_mgr *amp_mgr;
+
+ struct hci_conn *link;
+
+ void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
+};
+
+struct hci_chan {
+ struct list_head list;
+ __u16 handle;
+ struct hci_conn *conn;
+ struct sk_buff_head data_q;
+ unsigned int sent;
+ __u8 state;
+};
+
+struct hci_conn_params {
+ struct list_head list;
+ struct list_head action;
+
+ bdaddr_t addr;
+ u8 addr_type;
+
+ u16 conn_min_interval;
+ u16 conn_max_interval;
+ u16 conn_latency;
+ u16 supervision_timeout;
+
+ enum {
+ HCI_AUTO_CONN_DISABLED,
+ HCI_AUTO_CONN_REPORT,
+ HCI_AUTO_CONN_DIRECT,
+ HCI_AUTO_CONN_ALWAYS,
+ HCI_AUTO_CONN_LINK_LOSS,
+ HCI_AUTO_CONN_EXPLICIT,
+ } auto_connect;
+
+ struct hci_conn *conn;
+ bool explicit_connect;
+};
+
+extern struct list_head hci_dev_list;
+extern struct list_head hci_cb_list;
+extern rwlock_t hci_dev_list_lock;
+extern struct mutex hci_cb_list_lock;
+
+#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
+ hci_dev_clear_flag(hdev, HCI_LE_ADV); \
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ } while (0)
+
+/* ----- HCI interface to upper protocols ----- */
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+#if IS_ENABLED(CONFIG_BT_BREDR)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+#else
+static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+
+static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+{
+}
+#endif
+
+/* ----- Inquiry cache ----- */
+#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
+#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
+
+static inline void discovery_init(struct hci_dev *hdev)
+{
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ INIT_LIST_HEAD(&hdev->discovery.all);
+ INIT_LIST_HEAD(&hdev->discovery.unknown);
+ INIT_LIST_HEAD(&hdev->discovery.resolve);
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+}
+
+static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
+{
+ hdev->discovery.result_filtering = false;
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+ hdev->discovery.uuid_count = 0;
+ kfree(hdev->discovery.uuids);
+ hdev->discovery.uuids = NULL;
+ hdev->discovery.scan_start = 0;
+ hdev->discovery.scan_duration = 0;
+}
+
+bool hci_discovery_active(struct hci_dev *hdev);
+
+void hci_discovery_set_state(struct hci_dev *hdev, int state);
+
+static inline int inquiry_cache_empty(struct hci_dev *hdev)
+{
+ return list_empty(&hdev->discovery.all);
+}
+
+static inline long inquiry_cache_age(struct hci_dev *hdev)
+{
+ struct discovery_state *c = &hdev->discovery;
+ return jiffies - c->timestamp;
+}
+
+static inline long inquiry_entry_age(struct inquiry_entry *e)
+{
+ return jiffies - e->timestamp;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state);
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie);
+u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known);
+void hci_inquiry_cache_flush(struct hci_dev *hdev);
+
+/* ----- HCI Connections ----- */
+enum {
+ HCI_CONN_AUTH_PEND,
+ HCI_CONN_REAUTH_PEND,
+ HCI_CONN_ENCRYPT_PEND,
+ HCI_CONN_RSWITCH_PEND,
+ HCI_CONN_MODE_CHANGE_PEND,
+ HCI_CONN_SCO_SETUP_PEND,
+ HCI_CONN_MGMT_CONNECTED,
+ HCI_CONN_SSP_ENABLED,
+ HCI_CONN_SC_ENABLED,
+ HCI_CONN_AES_CCM,
+ HCI_CONN_POWER_SAVE,
+ HCI_CONN_FLUSH_KEY,
+ HCI_CONN_ENCRYPT,
+ HCI_CONN_AUTH,
+ HCI_CONN_SECURE,
+ HCI_CONN_FIPS,
+ HCI_CONN_STK_ENCRYPT,
+ HCI_CONN_AUTH_INITIATOR,
+ HCI_CONN_DROP,
+ HCI_CONN_PARAM_REMOVAL_PEND,
+ HCI_CONN_NEW_LINK_KEY,
+ HCI_CONN_SCANNING,
+ HCI_CONN_AUTH_FAILURE,
+};
+
+static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
+ test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+}
+
+static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
+ test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
+}
+
+static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ list_add_rcu(&c->list, &h->list);
+ switch (c->type) {
+ case ACL_LINK:
+ h->acl_num++;
+ break;
+ case AMP_LINK:
+ h->amp_num++;
+ break;
+ case LE_LINK:
+ h->le_num++;
+ if (c->role == HCI_ROLE_SLAVE)
+ h->le_num_slave++;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ h->sco_num++;
+ break;
+ }
+}
+
+static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+
+ list_del_rcu(&c->list);
+ synchronize_rcu();
+
+ switch (c->type) {
+ case ACL_LINK:
+ h->acl_num--;
+ break;
+ case AMP_LINK:
+ h->amp_num--;
+ break;
+ case LE_LINK:
+ h->le_num--;
+ if (c->role == HCI_ROLE_SLAVE)
+ h->le_num_slave--;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ h->sco_num--;
+ break;
+ }
+}
+
+static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ switch (type) {
+ case ACL_LINK:
+ return h->acl_num;
+ case AMP_LINK:
+ return h->amp_num;
+ case LE_LINK:
+ return h->le_num;
+ case SCO_LINK:
+ case ESCO_LINK:
+ return h->sco_num;
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *c = &hdev->conn_hash;
+
+ return c->acl_num + c->amp_num + c->sco_num + c->le_num;
+}
+
+static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+ __u8 type = INVALID_LINK;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ type = c->type;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return type;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
+ __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
+ __u8 type, bdaddr_t *ba)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 ba_type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != LE_LINK)
+ continue;
+
+ if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
+ __u8 type, __u16 state)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+int hci_disconnect(struct hci_conn *conn, __u8 reason);
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
+void hci_sco_setup(struct hci_conn *conn, __u8 status);
+
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role);
+int hci_conn_del(struct hci_conn *conn);
+void hci_conn_hash_flush(struct hci_dev *hdev);
+void hci_conn_check_pending(struct hci_dev *hdev);
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+void hci_chan_del(struct hci_chan *chan);
+void hci_chan_list_flush(struct hci_conn *conn);
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
+
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout);
+struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level, u16 conn_timeout,
+ u8 role, bdaddr_t *direct_rpa);
+struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type);
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting);
+int hci_conn_check_link_mode(struct hci_conn *conn);
+int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
+int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+ bool initiator);
+int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
+
+void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
+
+void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+
+/*
+ * hci_conn_get() and hci_conn_put() are used to control the life-time of an
+ * "hci_conn" object. They do not guarantee that the hci_conn object is running,
+ * working or anything else. They just guarantee that the object is available
+ * and can be dereferenced. So you can use its locks, local variables and any
+ * other constant data.
+ * Before accessing runtime data, you _must_ lock the object and then check that
+ * it is still running. As soon as you release the locks, the connection might
+ * get dropped, though.
+ *
+ * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
+ * how long the underlying connection is held. So every channel that runs on the
+ * hci_conn object calls this to prevent the connection from disappearing. As
+ * long as you hold a device, you must also guarantee that you have a valid
+ * reference to the device via hci_conn_get() (or the initial reference from
+ * hci_conn_add()).
+ * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
+ * break because nobody cares for that. But this means, we cannot use
+ * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
+ */
+
+static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
+{
+ get_device(&conn->dev);
+ return conn;
+}
+
+static inline void hci_conn_put(struct hci_conn *conn)
+{
+ put_device(&conn->dev);
+}
+
+static inline void hci_conn_hold(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
+ atomic_inc(&conn->refcnt);
+ cancel_delayed_work(&conn->disc_work);
+}
+
+static inline void hci_conn_drop(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
+ if (atomic_dec_and_test(&conn->refcnt)) {
+ unsigned long timeo;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ cancel_delayed_work(&conn->idle_work);
+ if (conn->state == BT_CONNECTED) {
+ timeo = conn->disc_timeout;
+ if (!conn->out)
+ timeo *= 2;
+ } else {
+ timeo = 0;
+ }
+ break;
+
+ case AMP_LINK:
+ timeo = conn->disc_timeout;
+ break;
+
+ default:
+ timeo = 0;
+ break;
+ }
+
+ cancel_delayed_work(&conn->disc_work);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->disc_work, timeo);
+ }
+}
+
+/* ----- HCI Devices ----- */
+static inline void hci_dev_put(struct hci_dev *d)
+{
+ BT_DBG("%s orig refcnt %d", d->name,
+ kref_read(&d->dev.kobj.kref));
+
+ put_device(&d->dev);
+}
+
+static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
+{
+ BT_DBG("%s orig refcnt %d", d->name,
+ kref_read(&d->dev.kobj.kref));
+
+ get_device(&d->dev);
+ return d;
+}
+
+#define hci_dev_lock(d) mutex_lock(&d->lock)
+#define hci_dev_unlock(d) mutex_unlock(&d->lock)
+
+#define to_hci_dev(d) container_of(d, struct hci_dev, dev)
+#define to_hci_conn(c) container_of(c, struct hci_conn, dev)
+
+static inline void *hci_get_drvdata(struct hci_dev *hdev)
+{
+ return dev_get_drvdata(&hdev->dev);
+}
+
+static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
+{
+ dev_set_drvdata(&hdev->dev, data);
+}
+
+struct hci_dev *hci_dev_get(int index);
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
+
+struct hci_dev *hci_alloc_dev(void);
+void hci_free_dev(struct hci_dev *hdev);
+int hci_register_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
+int hci_suspend_dev(struct hci_dev *hdev);
+int hci_resume_dev(struct hci_dev *hdev);
+int hci_reset_dev(struct hci_dev *hdev);
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
+int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+int hci_dev_open(__u16 dev);
+int hci_dev_close(__u16 dev);
+int hci_dev_do_close(struct hci_dev *hdev);
+int hci_dev_reset(__u16 dev);
+int hci_dev_reset_stat(__u16 dev);
+int hci_dev_cmd(unsigned int cmd, void __user *arg);
+int hci_get_dev_list(void __user *arg);
+int hci_get_dev_info(void __user *arg);
+int hci_get_conn_list(void __user *arg);
+int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
+int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
+int hci_inquiry(void __user *arg);
+
+struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
+ bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+void hci_bdaddr_list_clear(struct list_head *list);
+
+struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type);
+struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type);
+void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_conn_params_clear_disabled(struct hci_dev *hdev);
+
+struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
+ bdaddr_t *addr,
+ u8 addr_type);
+
+void hci_uuids_clear(struct hci_dev *hdev);
+
+void hci_link_keys_clear(struct hci_dev *hdev);
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
+ bdaddr_t *bdaddr, u8 *val, u8 type,
+ u8 pin_len, bool *persistent);
+struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 type, u8 authenticated,
+ u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 role);
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
+void hci_smp_ltks_clear(struct hci_dev *hdev);
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+
+struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
+struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
+struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 val[16], bdaddr_t *rpa);
+void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
+void hci_smp_irks_clear(struct hci_dev *hdev);
+
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
+void hci_remote_oob_data_clear(struct hci_dev *hdev);
+struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 bdaddr_type);
+int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type, u8 *hash192, u8 *rand192,
+ u8 *hash256, u8 *rand256);
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type);
+
+void hci_adv_instances_clear(struct hci_dev *hdev);
+struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
+int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+ u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data,
+ u16 timeout, u16 duration);
+int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+
+void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
+
+void hci_init_sysfs(struct hci_dev *hdev);
+void hci_conn_init_sysfs(struct hci_conn *conn);
+void hci_conn_add_sysfs(struct hci_conn *conn);
+void hci_conn_del_sysfs(struct hci_conn *conn);
+
+#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
+
+/* ----- LMP capabilities ----- */
+#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
+#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
+#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
+#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
+#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
+#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
+#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
+#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
+#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
+#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
+#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
+#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
+#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
+#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
+#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
+#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
+#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
+
+/* ----- Extended LMP capabilities ----- */
+#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
+#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE)
+#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
+#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
+#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
+#define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING)
+
+/* ----- Host capabilities ----- */
+#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
+#define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC)
+#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
+#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
+
+#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
+ !hci_dev_test_flag(dev, HCI_AUTO_OFF))
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_SC_ENABLED))
+
+/* ----- HCI protocols ----- */
+#define HCI_PROTO_DEFER 0x01
+
+static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 type, __u8 *flags)
+{
+ switch (type) {
+ case ACL_LINK:
+ return l2cap_connect_ind(hdev, bdaddr);
+
+ case SCO_LINK:
+ case ESCO_LINK:
+ return sco_connect_ind(hdev, bdaddr, flags);
+
+ default:
+ BT_ERR("unknown link type %d", type);
+ return -EINVAL;
+ }
+}
+
+static inline int hci_proto_disconn_ind(struct hci_conn *conn)
+{
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return HCI_ERROR_REMOTE_USER_TERM;
+
+ return l2cap_disconn_ind(conn);
+}
+
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+ struct list_head list;
+
+ char *name;
+
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
+
+static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
+}
+
+static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
+}
+
+static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+ return;
+
+ encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
+}
+
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
+ __u8 encrypt)
+{
+ struct hci_cb *cb;
+
+ if (conn->sec_level == BT_SECURITY_SDP)
+ conn->sec_level = BT_SECURITY_LOW;
+
+ if (conn->pending_sec_level > conn->sec_level)
+ conn->sec_level = conn->pending_sec_level;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
+}
+
+static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->key_change_cfm)
+ cb->key_change_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+}
+
+static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ __u8 role)
+{
+ struct hci_cb *cb;
+
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->role_switch_cfm)
+ cb->role_switch_cfm(conn, status, role);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+}
+
+static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
+ size_t *data_len)
+{
+ size_t parsed = 0;
+
+ if (eir_len < 2)
+ return NULL;
+
+ while (parsed < eir_len - 1) {
+ u8 field_len = eir[0];
+
+ if (field_len == 0)
+ break;
+
+ parsed += field_len + 1;
+
+ if (parsed > eir_len)
+ break;
+
+ if (eir[1] != type) {
+ eir += field_len + 1;
+ continue;
+ }
+
+ /* Zero length data */
+ if (field_len == 1)
+ return NULL;
+
+ if (data_len)
+ *data_len = field_len - 1;
+
+ return &eir[2];
+ }
+
+ return NULL;
+}
+
+static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+{
+ if (addr_type != ADDR_LE_DEV_RANDOM)
+ return false;
+
+ if ((bdaddr->b[5] & 0xc0) == 0x40)
+ return true;
+
+ return false;
+}
+
+static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type)
+{
+ if (addr_type == ADDR_LE_DEV_PUBLIC)
+ return true;
+
+ /* Check for Random Static address type */
+ if ((addr->b[5] & 0xc0) == 0xc0)
+ return true;
+
+ return false;
+}
+
+static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 addr_type)
+{
+ if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
+ return NULL;
+
+ return hci_find_irk_by_rpa(hdev, bdaddr);
+}
+
+static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
+ u16 to_multiplier)
+{
+ u16 max_latency;
+
+ if (min > max || min < 6 || max > 3200)
+ return -EINVAL;
+
+ if (to_multiplier < 10 || to_multiplier > 3200)
+ return -EINVAL;
+
+ if (max >= to_multiplier * 8)
+ return -EINVAL;
+
+ max_latency = (to_multiplier * 4 / max) - 1;
+ if (latency > 499 || latency > max_latency)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hci_register_cb(struct hci_cb *hcb);
+int hci_unregister_cb(struct hci_cb *hcb);
+
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
+int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param);
+
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
+ const void *param);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
+void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
+
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
+
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+
+/* ----- HCI Sockets ----- */
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ int flag, struct sock *skip_sk);
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event);
+
+#define HCI_MGMT_VAR_LEN BIT(0)
+#define HCI_MGMT_NO_HDEV BIT(1)
+#define HCI_MGMT_UNTRUSTED BIT(2)
+#define HCI_MGMT_UNCONFIGURED BIT(3)
+
+struct hci_mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ size_t data_len;
+ unsigned long flags;
+};
+
+struct hci_mgmt_chan {
+ struct list_head list;
+ unsigned short channel;
+ size_t handler_count;
+ const struct hci_mgmt_handler *handlers;
+ void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
+};
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
+
+/* Management interface */
+#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
+#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
+ BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+
+/* These LE scan and inquiry parameters were chosen according to LE General
+ * Discovery Procedure specification.
+ */
+#define DISCOV_LE_SCAN_WIN 0x12
+#define DISCOV_LE_SCAN_INT 0x12
+#define DISCOV_LE_TIMEOUT 10240 /* msec */
+#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
+#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
+#define DISCOV_BREDR_INQUIRY_LEN 0x08
+#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+
+void mgmt_fill_version_info(void *ver);
+int mgmt_new_settings(struct hci_dev *hdev);
+void mgmt_index_added(struct hci_dev *hdev);
+void mgmt_index_removed(struct hci_dev *hdev);
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+void mgmt_power_on(struct hci_dev *hdev, int err);
+void __mgmt_power_off(struct hci_dev *hdev);
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent);
+void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ u32 flags, u8 *name, u8 name_len);
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason,
+ bool mgmt_connected);
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 value,
+ u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 passkey,
+ u8 entered);
+void mgmt_auth_failed(struct hci_conn *conn, u8 status);
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status);
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
+void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
+ u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+bool mgmt_powering_down(struct hci_dev *hdev);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
+void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ bool persistent);
+void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type, u8 store_hint, u16 min_interval,
+ u16 max_interval, u16 latency, u16 timeout);
+void mgmt_smp_complete(struct hci_conn *conn, bool complete);
+bool mgmt_get_connectable(struct hci_dev *hdev);
+void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status);
+u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
+void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
+ u8 instance);
+void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
+ u8 instance);
+
+u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
+ u16 to_multiplier);
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
+ __u8 ltk[16], __u8 key_size);
+
+void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *bdaddr_type);
+
+#define SCO_AIRMODE_MASK 0x0003
+#define SCO_AIRMODE_CVSD 0x0000
+#define SCO_AIRMODE_TRANSP 0x0003
+
+#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
new file mode 100644
index 0000000..240786b
--- /dev/null
+++ b/include/net/bluetooth/hci_mon.h
@@ -0,0 +1,67 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_MON_H
+#define __HCI_MON_H
+
+struct hci_mon_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+#define HCI_MON_HDR_SIZE 6
+
+#define HCI_MON_NEW_INDEX 0
+#define HCI_MON_DEL_INDEX 1
+#define HCI_MON_COMMAND_PKT 2
+#define HCI_MON_EVENT_PKT 3
+#define HCI_MON_ACL_TX_PKT 4
+#define HCI_MON_ACL_RX_PKT 5
+#define HCI_MON_SCO_TX_PKT 6
+#define HCI_MON_SCO_RX_PKT 7
+#define HCI_MON_OPEN_INDEX 8
+#define HCI_MON_CLOSE_INDEX 9
+#define HCI_MON_INDEX_INFO 10
+#define HCI_MON_VENDOR_DIAG 11
+#define HCI_MON_SYSTEM_NOTE 12
+#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
+
+struct hci_mon_new_index {
+ __u8 type;
+ __u8 bus;
+ bdaddr_t bdaddr;
+ char name[8];
+} __packed;
+#define HCI_MON_NEW_INDEX_SIZE 16
+
+struct hci_mon_index_info {
+ bdaddr_t bdaddr;
+ __le16 manufacturer;
+} __packed;
+#define HCI_MON_INDEX_INFO_SIZE 8
+
+#endif /* __HCI_MON_H */
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
new file mode 100644
index 0000000..8e9138a
--- /dev/null
+++ b/include/net/bluetooth/hci_sock.h
@@ -0,0 +1,176 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_SOCK_H
+#define __HCI_SOCK_H
+
+/* Socket options */
+#define HCI_DATA_DIR 1
+#define HCI_FILTER 2
+#define HCI_TIME_STAMP 3
+
+/* CMSG flags */
+#define HCI_CMSG_DIR 0x0001
+#define HCI_CMSG_TSTAMP 0x0002
+
+struct sockaddr_hci {
+ sa_family_t hci_family;
+ unsigned short hci_dev;
+ unsigned short hci_channel;
+};
+#define HCI_DEV_NONE 0xffff
+
+#define HCI_CHANNEL_RAW 0
+#define HCI_CHANNEL_USER 1
+#define HCI_CHANNEL_MONITOR 2
+#define HCI_CHANNEL_CONTROL 3
+#define HCI_CHANNEL_LOGGING 4
+
+struct hci_filter {
+ unsigned long type_mask;
+ unsigned long event_mask[2];
+ __le16 opcode;
+};
+
+struct hci_ufilter {
+ __u32 type_mask;
+ __u32 event_mask[2];
+ __le16 opcode;
+};
+
+#define HCI_FLT_TYPE_BITS 31
+#define HCI_FLT_EVENT_BITS 63
+#define HCI_FLT_OGF_BITS 63
+#define HCI_FLT_OCF_BITS 127
+
+/* Ioctl defines */
+#define HCIDEVUP _IOW('H', 201, int)
+#define HCIDEVDOWN _IOW('H', 202, int)
+#define HCIDEVRESET _IOW('H', 203, int)
+#define HCIDEVRESTAT _IOW('H', 204, int)
+
+#define HCIGETDEVLIST _IOR('H', 210, int)
+#define HCIGETDEVINFO _IOR('H', 211, int)
+#define HCIGETCONNLIST _IOR('H', 212, int)
+#define HCIGETCONNINFO _IOR('H', 213, int)
+#define HCIGETAUTHINFO _IOR('H', 215, int)
+
+#define HCISETRAW _IOW('H', 220, int)
+#define HCISETSCAN _IOW('H', 221, int)
+#define HCISETAUTH _IOW('H', 222, int)
+#define HCISETENCRYPT _IOW('H', 223, int)
+#define HCISETPTYPE _IOW('H', 224, int)
+#define HCISETLINKPOL _IOW('H', 225, int)
+#define HCISETLINKMODE _IOW('H', 226, int)
+#define HCISETACLMTU _IOW('H', 227, int)
+#define HCISETSCOMTU _IOW('H', 228, int)
+
+#define HCIBLOCKADDR _IOW('H', 230, int)
+#define HCIUNBLOCKADDR _IOW('H', 231, int)
+
+#define HCIINQUIRY _IOR('H', 240, int)
+
+/* Ioctl requests structures */
+struct hci_dev_stats {
+ __u32 err_rx;
+ __u32 err_tx;
+ __u32 cmd_tx;
+ __u32 evt_rx;
+ __u32 acl_tx;
+ __u32 acl_rx;
+ __u32 sco_tx;
+ __u32 sco_rx;
+ __u32 byte_rx;
+ __u32 byte_tx;
+};
+
+struct hci_dev_info {
+ __u16 dev_id;
+ char name[8];
+
+ bdaddr_t bdaddr;
+
+ __u32 flags;
+ __u8 type;
+
+ __u8 features[8];
+
+ __u32 pkt_type;
+ __u32 link_policy;
+ __u32 link_mode;
+
+ __u16 acl_mtu;
+ __u16 acl_pkts;
+ __u16 sco_mtu;
+ __u16 sco_pkts;
+
+ struct hci_dev_stats stat;
+};
+
+struct hci_conn_info {
+ __u16 handle;
+ bdaddr_t bdaddr;
+ __u8 type;
+ __u8 out;
+ __u16 state;
+ __u32 link_mode;
+};
+
+struct hci_dev_req {
+ __u16 dev_id;
+ __u32 dev_opt;
+};
+
+struct hci_dev_list_req {
+ __u16 dev_num;
+ struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
+};
+
+struct hci_conn_list_req {
+ __u16 dev_id;
+ __u16 conn_num;
+ struct hci_conn_info conn_info[0];
+};
+
+struct hci_conn_info_req {
+ bdaddr_t bdaddr;
+ __u8 type;
+ struct hci_conn_info conn_info[0];
+};
+
+struct hci_auth_info_req {
+ bdaddr_t bdaddr;
+ __u8 type;
+};
+
+struct hci_inquiry_req {
+ __u16 dev_id;
+ __u16 flags;
+ __u8 lap[3];
+ __u8 length;
+ __u8 num_rsp;
+};
+#define IREQ_CACHE_FLUSH 0x0001
+
+#endif /* __HCI_SOCK_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
new file mode 100644
index 0000000..0697fd4
--- /dev/null
+++ b/include/net/bluetooth/l2cap.h
@@ -0,0 +1,951 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+ Copyright (C) 2010 Google Inc.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __L2CAP_H
+#define __L2CAP_H
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+
+/* L2CAP defaults */
+#define L2CAP_DEFAULT_MTU 672
+#define L2CAP_DEFAULT_MIN_MTU 48
+#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
+#define L2CAP_EFS_DEFAULT_FLUSH_TO 0xFFFFFFFF
+#define L2CAP_DEFAULT_TX_WINDOW 63
+#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
+#define L2CAP_DEFAULT_MAX_TX 3
+#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
+#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
+#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
+#define L2CAP_DEFAULT_ACK_TO 200
+#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
+#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
+#define L2CAP_LE_MIN_MTU 23
+
+#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
+#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_ENC_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
+#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
+
+#define L2CAP_A2MP_DEFAULT_MTU 670
+
+/* L2CAP socket address */
+struct sockaddr_l2 {
+ sa_family_t l2_family;
+ __le16 l2_psm;
+ bdaddr_t l2_bdaddr;
+ __le16 l2_cid;
+ __u8 l2_bdaddr_type;
+};
+
+/* L2CAP socket options */
+#define L2CAP_OPTIONS 0x01
+struct l2cap_options {
+ __u16 omtu;
+ __u16 imtu;
+ __u16 flush_to;
+ __u8 mode;
+ __u8 fcs;
+ __u8 max_tx;
+ __u16 txwin_size;
+};
+
+#define L2CAP_CONNINFO 0x02
+struct l2cap_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define L2CAP_LM 0x03
+#define L2CAP_LM_MASTER 0x0001
+#define L2CAP_LM_AUTH 0x0002
+#define L2CAP_LM_ENCRYPT 0x0004
+#define L2CAP_LM_TRUSTED 0x0008
+#define L2CAP_LM_RELIABLE 0x0010
+#define L2CAP_LM_SECURE 0x0020
+#define L2CAP_LM_FIPS 0x0040
+
+/* L2CAP command codes */
+#define L2CAP_COMMAND_REJ 0x01
+#define L2CAP_CONN_REQ 0x02
+#define L2CAP_CONN_RSP 0x03
+#define L2CAP_CONF_REQ 0x04
+#define L2CAP_CONF_RSP 0x05
+#define L2CAP_DISCONN_REQ 0x06
+#define L2CAP_DISCONN_RSP 0x07
+#define L2CAP_ECHO_REQ 0x08
+#define L2CAP_ECHO_RSP 0x09
+#define L2CAP_INFO_REQ 0x0a
+#define L2CAP_INFO_RSP 0x0b
+#define L2CAP_CREATE_CHAN_REQ 0x0c
+#define L2CAP_CREATE_CHAN_RSP 0x0d
+#define L2CAP_MOVE_CHAN_REQ 0x0e
+#define L2CAP_MOVE_CHAN_RSP 0x0f
+#define L2CAP_MOVE_CHAN_CFM 0x10
+#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
+#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
+#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
+#define L2CAP_LE_CONN_REQ 0x14
+#define L2CAP_LE_CONN_RSP 0x15
+#define L2CAP_LE_CREDITS 0x16
+
+/* L2CAP extended feature mask */
+#define L2CAP_FEAT_FLOWCTL 0x00000001
+#define L2CAP_FEAT_RETRANS 0x00000002
+#define L2CAP_FEAT_BIDIR_QOS 0x00000004
+#define L2CAP_FEAT_ERTM 0x00000008
+#define L2CAP_FEAT_STREAMING 0x00000010
+#define L2CAP_FEAT_FCS 0x00000020
+#define L2CAP_FEAT_EXT_FLOW 0x00000040
+#define L2CAP_FEAT_FIXED_CHAN 0x00000080
+#define L2CAP_FEAT_EXT_WINDOW 0x00000100
+#define L2CAP_FEAT_UCD 0x00000200
+
+/* L2CAP checksum option */
+#define L2CAP_FCS_NONE 0x00
+#define L2CAP_FCS_CRC16 0x01
+
+/* L2CAP fixed channels */
+#define L2CAP_FC_SIG_BREDR 0x02
+#define L2CAP_FC_CONNLESS 0x04
+#define L2CAP_FC_A2MP 0x08
+#define L2CAP_FC_ATT 0x10
+#define L2CAP_FC_SIG_LE 0x20
+#define L2CAP_FC_SMP_LE 0x40
+#define L2CAP_FC_SMP_BREDR 0x80
+
+/* L2CAP Control Field bit masks */
+#define L2CAP_CTRL_SAR 0xC000
+#define L2CAP_CTRL_REQSEQ 0x3F00
+#define L2CAP_CTRL_TXSEQ 0x007E
+#define L2CAP_CTRL_SUPERVISE 0x000C
+
+#define L2CAP_CTRL_RETRANS 0x0080
+#define L2CAP_CTRL_FINAL 0x0080
+#define L2CAP_CTRL_POLL 0x0010
+#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
+
+#define L2CAP_CTRL_TXSEQ_SHIFT 1
+#define L2CAP_CTRL_SUPER_SHIFT 2
+#define L2CAP_CTRL_POLL_SHIFT 4
+#define L2CAP_CTRL_FINAL_SHIFT 7
+#define L2CAP_CTRL_REQSEQ_SHIFT 8
+#define L2CAP_CTRL_SAR_SHIFT 14
+
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR 0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
+
+#define L2CAP_EXT_CTRL_POLL 0x00040000
+#define L2CAP_EXT_CTRL_FINAL 0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
+#define L2CAP_EXT_CTRL_SAR_SHIFT 16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
+#define L2CAP_EXT_CTRL_POLL_SHIFT 18
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
+
+/* L2CAP Supervisory Function */
+#define L2CAP_SUPER_RR 0x00
+#define L2CAP_SUPER_REJ 0x01
+#define L2CAP_SUPER_RNR 0x02
+#define L2CAP_SUPER_SREJ 0x03
+
+/* L2CAP Segmentation and Reassembly */
+#define L2CAP_SAR_UNSEGMENTED 0x00
+#define L2CAP_SAR_START 0x01
+#define L2CAP_SAR_END 0x02
+#define L2CAP_SAR_CONTINUE 0x03
+
+/* L2CAP Command rej. reasons */
+#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
+#define L2CAP_REJ_MTU_EXCEEDED 0x0001
+#define L2CAP_REJ_INVALID_CID 0x0002
+
+/* L2CAP structures */
+struct l2cap_hdr {
+ __le16 len;
+ __le16 cid;
+} __packed;
+#define L2CAP_HDR_SIZE 4
+#define L2CAP_ENH_HDR_SIZE 6
+#define L2CAP_EXT_HDR_SIZE 8
+
+#define L2CAP_FCS_SIZE 2
+#define L2CAP_SDULEN_SIZE 2
+#define L2CAP_PSMLEN_SIZE 2
+#define L2CAP_ENH_CTRL_SIZE 2
+#define L2CAP_EXT_CTRL_SIZE 4
+
+struct l2cap_cmd_hdr {
+ __u8 code;
+ __u8 ident;
+ __le16 len;
+} __packed;
+#define L2CAP_CMD_HDR_SIZE 4
+
+struct l2cap_cmd_rej_unk {
+ __le16 reason;
+} __packed;
+
+struct l2cap_cmd_rej_mtu {
+ __le16 reason;
+ __le16 max_mtu;
+} __packed;
+
+struct l2cap_cmd_rej_cid {
+ __le16 reason;
+ __le16 scid;
+ __le16 dcid;
+} __packed;
+
+struct l2cap_conn_req {
+ __le16 psm;
+ __le16 scid;
+} __packed;
+
+struct l2cap_conn_rsp {
+ __le16 dcid;
+ __le16 scid;
+ __le16 result;
+ __le16 status;
+} __packed;
+
+/* protocol/service multiplexer (PSM) */
+#define L2CAP_PSM_SDP 0x0001
+#define L2CAP_PSM_RFCOMM 0x0003
+#define L2CAP_PSM_3DSP 0x0021
+#define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */
+
+#define L2CAP_PSM_DYN_START 0x1001
+#define L2CAP_PSM_DYN_END 0xffff
+#define L2CAP_PSM_AUTO_END 0x10ff
+#define L2CAP_PSM_LE_DYN_START 0x0080
+#define L2CAP_PSM_LE_DYN_END 0x00ff
+
+/* channel identifier */
+#define L2CAP_CID_SIGNALING 0x0001
+#define L2CAP_CID_CONN_LESS 0x0002
+#define L2CAP_CID_A2MP 0x0003
+#define L2CAP_CID_ATT 0x0004
+#define L2CAP_CID_LE_SIGNALING 0x0005
+#define L2CAP_CID_SMP 0x0006
+#define L2CAP_CID_SMP_BREDR 0x0007
+#define L2CAP_CID_DYN_START 0x0040
+#define L2CAP_CID_DYN_END 0xffff
+#define L2CAP_CID_LE_DYN_END 0x007f
+
+/* connect/create channel results */
+#define L2CAP_CR_SUCCESS 0x0000
+#define L2CAP_CR_PEND 0x0001
+#define L2CAP_CR_BAD_PSM 0x0002
+#define L2CAP_CR_SEC_BLOCK 0x0003
+#define L2CAP_CR_NO_MEM 0x0004
+#define L2CAP_CR_BAD_AMP 0x0005
+#define L2CAP_CR_AUTHENTICATION 0x0005
+#define L2CAP_CR_AUTHORIZATION 0x0006
+#define L2CAP_CR_BAD_KEY_SIZE 0x0007
+#define L2CAP_CR_ENCRYPTION 0x0008
+#define L2CAP_CR_INVALID_SCID 0x0009
+#define L2CAP_CR_SCID_IN_USE 0x000A
+
+/* connect/create channel status */
+#define L2CAP_CS_NO_INFO 0x0000
+#define L2CAP_CS_AUTHEN_PEND 0x0001
+#define L2CAP_CS_AUTHOR_PEND 0x0002
+
+struct l2cap_conf_req {
+ __le16 dcid;
+ __le16 flags;
+ __u8 data[0];
+} __packed;
+
+struct l2cap_conf_rsp {
+ __le16 scid;
+ __le16 flags;
+ __le16 result;
+ __u8 data[0];
+} __packed;
+
+#define L2CAP_CONF_SUCCESS 0x0000
+#define L2CAP_CONF_UNACCEPT 0x0001
+#define L2CAP_CONF_REJECT 0x0002
+#define L2CAP_CONF_UNKNOWN 0x0003
+#define L2CAP_CONF_PENDING 0x0004
+#define L2CAP_CONF_EFS_REJECT 0x0005
+
+/* configuration req/rsp continuation flag */
+#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
+
+struct l2cap_conf_opt {
+ __u8 type;
+ __u8 len;
+ __u8 val[0];
+} __packed;
+#define L2CAP_CONF_OPT_SIZE 2
+
+#define L2CAP_CONF_HINT 0x80
+#define L2CAP_CONF_MASK 0x7f
+
+#define L2CAP_CONF_MTU 0x01
+#define L2CAP_CONF_FLUSH_TO 0x02
+#define L2CAP_CONF_QOS 0x03
+#define L2CAP_CONF_RFC 0x04
+#define L2CAP_CONF_FCS 0x05
+#define L2CAP_CONF_EFS 0x06
+#define L2CAP_CONF_EWS 0x07
+
+#define L2CAP_CONF_MAX_SIZE 22
+
+struct l2cap_conf_rfc {
+ __u8 mode;
+ __u8 txwin_size;
+ __u8 max_transmit;
+ __le16 retrans_timeout;
+ __le16 monitor_timeout;
+ __le16 max_pdu_size;
+} __packed;
+
+#define L2CAP_MODE_BASIC 0x00
+#define L2CAP_MODE_RETRANS 0x01
+#define L2CAP_MODE_FLOWCTL 0x02
+#define L2CAP_MODE_ERTM 0x03
+#define L2CAP_MODE_STREAMING 0x04
+
+/* Unlike the above this one doesn't actually map to anything that would
+ * ever be sent over the air. Therefore, use a value that's unlikely to
+ * ever be used in the BR/EDR configuration phase.
+ */
+#define L2CAP_MODE_LE_FLOWCTL 0x80
+
+struct l2cap_conf_efs {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC 0x00
+#define L2CAP_SERV_BESTEFFORT 0x01
+#define L2CAP_SERV_GUARANTEED 0x02
+
+#define L2CAP_BESTEFFORT_ID 0x01
+
+struct l2cap_disconn_req {
+ __le16 dcid;
+ __le16 scid;
+} __packed;
+
+struct l2cap_disconn_rsp {
+ __le16 dcid;
+ __le16 scid;
+} __packed;
+
+struct l2cap_info_req {
+ __le16 type;
+} __packed;
+
+struct l2cap_info_rsp {
+ __le16 type;
+ __le16 result;
+ __u8 data[0];
+} __packed;
+
+struct l2cap_create_chan_req {
+ __le16 psm;
+ __le16 scid;
+ __u8 amp_id;
+} __packed;
+
+struct l2cap_create_chan_rsp {
+ __le16 dcid;
+ __le16 scid;
+ __le16 result;
+ __le16 status;
+} __packed;
+
+struct l2cap_move_chan_req {
+ __le16 icid;
+ __u8 dest_amp_id;
+} __packed;
+
+struct l2cap_move_chan_rsp {
+ __le16 icid;
+ __le16 result;
+} __packed;
+
+#define L2CAP_MR_SUCCESS 0x0000
+#define L2CAP_MR_PEND 0x0001
+#define L2CAP_MR_BAD_ID 0x0002
+#define L2CAP_MR_SAME_ID 0x0003
+#define L2CAP_MR_NOT_SUPP 0x0004
+#define L2CAP_MR_COLLISION 0x0005
+#define L2CAP_MR_NOT_ALLOWED 0x0006
+
+struct l2cap_move_chan_cfm {
+ __le16 icid;
+ __le16 result;
+} __packed;
+
+#define L2CAP_MC_CONFIRMED 0x0000
+#define L2CAP_MC_UNCONFIRMED 0x0001
+
+struct l2cap_move_chan_cfm_rsp {
+ __le16 icid;
+} __packed;
+
+/* info type */
+#define L2CAP_IT_CL_MTU 0x0001
+#define L2CAP_IT_FEAT_MASK 0x0002
+#define L2CAP_IT_FIXED_CHAN 0x0003
+
+/* info result */
+#define L2CAP_IR_SUCCESS 0x0000
+#define L2CAP_IR_NOTSUPP 0x0001
+
+struct l2cap_conn_param_update_req {
+ __le16 min;
+ __le16 max;
+ __le16 latency;
+ __le16 to_multiplier;
+} __packed;
+
+struct l2cap_conn_param_update_rsp {
+ __le16 result;
+} __packed;
+
+/* Connection Parameters result */
+#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
+#define L2CAP_CONN_PARAM_REJECTED 0x0001
+
+#define L2CAP_LE_MAX_CREDITS 10
+#define L2CAP_LE_DEFAULT_MPS 230
+
+struct l2cap_le_conn_req {
+ __le16 psm;
+ __le16 scid;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+} __packed;
+
+struct l2cap_le_conn_rsp {
+ __le16 dcid;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+} __packed;
+
+struct l2cap_le_credits {
+ __le16 cid;
+ __le16 credits;
+} __packed;
+
+/* ----- L2CAP channels and connections ----- */
+struct l2cap_seq_list {
+ __u16 head;
+ __u16 tail;
+ __u16 mask;
+ __u16 *list;
+};
+
+#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
+#define L2CAP_SEQ_LIST_TAIL 0x8000
+
+struct l2cap_chan {
+ struct l2cap_conn *conn;
+ struct hci_conn *hs_hcon;
+ struct hci_chan *hs_hchan;
+ struct kref kref;
+ atomic_t nesting;
+
+ __u8 state;
+
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
+ __le16 psm;
+ __le16 sport;
+ __u16 dcid;
+ __u16 scid;
+
+ __u16 imtu;
+ __u16 omtu;
+ __u16 flush_to;
+ __u8 mode;
+ __u8 chan_type;
+ __u8 chan_policy;
+
+ __u8 sec_level;
+
+ __u8 ident;
+
+ __u8 conf_req[64];
+ __u8 conf_len;
+ __u8 num_conf_req;
+ __u8 num_conf_rsp;
+
+ __u8 fcs;
+
+ __u16 tx_win;
+ __u16 tx_win_max;
+ __u16 ack_win;
+ __u8 max_tx;
+ __u16 retrans_timeout;
+ __u16 monitor_timeout;
+ __u16 mps;
+
+ __u16 tx_credits;
+ __u16 rx_credits;
+
+ __u8 tx_state;
+ __u8 rx_state;
+
+ unsigned long conf_state;
+ unsigned long conn_state;
+ unsigned long flags;
+
+ __u8 remote_amp_id;
+ __u8 local_amp_id;
+ __u8 move_id;
+ __u8 move_state;
+ __u8 move_role;
+
+ __u16 next_tx_seq;
+ __u16 expected_ack_seq;
+ __u16 expected_tx_seq;
+ __u16 buffer_seq;
+ __u16 srej_save_reqseq;
+ __u16 last_acked_seq;
+ __u16 frames_sent;
+ __u16 unacked_frames;
+ __u8 retry_count;
+ __u16 sdu_len;
+ struct sk_buff *sdu;
+ struct sk_buff *sdu_last_frag;
+
+ __u16 remote_tx_win;
+ __u8 remote_max_tx;
+ __u16 remote_mps;
+
+ __u8 local_id;
+ __u8 local_stype;
+ __u16 local_msdu;
+ __u32 local_sdu_itime;
+ __u32 local_acc_lat;
+ __u32 local_flush_to;
+
+ __u8 remote_id;
+ __u8 remote_stype;
+ __u16 remote_msdu;
+ __u32 remote_sdu_itime;
+ __u32 remote_acc_lat;
+ __u32 remote_flush_to;
+
+ struct delayed_work chan_timer;
+ struct delayed_work retrans_timer;
+ struct delayed_work monitor_timer;
+ struct delayed_work ack_timer;
+
+ struct sk_buff *tx_send_head;
+ struct sk_buff_head tx_q;
+ struct sk_buff_head srej_q;
+ struct l2cap_seq_list srej_list;
+ struct l2cap_seq_list retrans_list;
+
+ struct list_head list;
+ struct list_head global_l;
+
+ void *data;
+ const struct l2cap_ops *ops;
+ struct mutex lock;
+};
+
+struct l2cap_ops {
+ char *name;
+
+ struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
+ int (*recv) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
+ void (*teardown) (struct l2cap_chan *chan, int err);
+ void (*close) (struct l2cap_chan *chan);
+ void (*state_change) (struct l2cap_chan *chan,
+ int state, int err);
+ void (*ready) (struct l2cap_chan *chan);
+ void (*defer) (struct l2cap_chan *chan);
+ void (*resume) (struct l2cap_chan *chan);
+ void (*suspend) (struct l2cap_chan *chan);
+ void (*set_shutdown) (struct l2cap_chan *chan);
+ long (*get_sndtimeo) (struct l2cap_chan *chan);
+ struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb);
+};
+
+struct l2cap_conn {
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
+
+ unsigned int mtu;
+
+ __u32 feat_mask;
+ __u8 remote_fixed_chan;
+ __u8 local_fixed_chan;
+
+ __u8 info_state;
+ __u8 info_ident;
+
+ struct delayed_work info_timer;
+
+ struct sk_buff *rx_skb;
+ __u32 rx_len;
+ __u8 tx_ident;
+ struct mutex ident_lock;
+
+ struct sk_buff_head pending_rx;
+ struct work_struct pending_rx_work;
+
+ struct work_struct id_addr_update_work;
+
+ __u8 disc_reason;
+
+ struct l2cap_chan *smp;
+
+ struct list_head chan_l;
+ struct mutex chan_lock;
+ struct kref ref;
+ struct list_head users;
+};
+
+struct l2cap_user {
+ struct list_head list;
+ int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
+ void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
+};
+
+#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
+#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
+#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
+
+#define L2CAP_CHAN_RAW 1
+#define L2CAP_CHAN_CONN_LESS 2
+#define L2CAP_CHAN_CONN_ORIENTED 3
+#define L2CAP_CHAN_FIXED 4
+
+/* ----- L2CAP socket info ----- */
+#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+
+struct l2cap_pinfo {
+ struct bt_sock bt;
+ struct l2cap_chan *chan;
+ struct sk_buff *rx_busy_skb;
+};
+
+enum {
+ CONF_REQ_SENT,
+ CONF_INPUT_DONE,
+ CONF_OUTPUT_DONE,
+ CONF_MTU_DONE,
+ CONF_MODE_DONE,
+ CONF_CONNECT_PEND,
+ CONF_RECV_NO_FCS,
+ CONF_STATE2_DEVICE,
+ CONF_EWS_RECV,
+ CONF_LOC_CONF_PEND,
+ CONF_REM_CONF_PEND,
+ CONF_NOT_COMPLETE,
+};
+
+#define L2CAP_CONF_MAX_CONF_REQ 2
+#define L2CAP_CONF_MAX_CONF_RSP 2
+
+enum {
+ CONN_SREJ_SENT,
+ CONN_WAIT_F,
+ CONN_SREJ_ACT,
+ CONN_SEND_PBIT,
+ CONN_REMOTE_BUSY,
+ CONN_LOCAL_BUSY,
+ CONN_REJ_ACT,
+ CONN_SEND_FBIT,
+ CONN_RNR_SENT,
+};
+
+/* Definitions for flags in l2cap_chan */
+enum {
+ FLAG_ROLE_SWITCH,
+ FLAG_FORCE_ACTIVE,
+ FLAG_FORCE_RELIABLE,
+ FLAG_FLUSHABLE,
+ FLAG_EXT_CTRL,
+ FLAG_EFS_ENABLE,
+ FLAG_DEFER_SETUP,
+ FLAG_LE_CONN_REQ_SENT,
+ FLAG_PENDING_SECURITY,
+ FLAG_HOLD_HCI_CONN,
+};
+
+/* Lock nesting levels for L2CAP channels. We need these because lockdep
+ * otherwise considers all channels equal and will e.g. complain about a
+ * connection oriented channel triggering SMP procedures or a listening
+ * channel creating and locking a child channel.
+ */
+enum {
+ L2CAP_NESTING_SMP,
+ L2CAP_NESTING_NORMAL,
+ L2CAP_NESTING_PARENT,
+};
+
+enum {
+ L2CAP_TX_STATE_XMIT,
+ L2CAP_TX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_RX_STATE_RECV,
+ L2CAP_RX_STATE_SREJ_SENT,
+ L2CAP_RX_STATE_MOVE,
+ L2CAP_RX_STATE_WAIT_P,
+ L2CAP_RX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_TXSEQ_EXPECTED,
+ L2CAP_TXSEQ_EXPECTED_SREJ,
+ L2CAP_TXSEQ_UNEXPECTED,
+ L2CAP_TXSEQ_UNEXPECTED_SREJ,
+ L2CAP_TXSEQ_DUPLICATE,
+ L2CAP_TXSEQ_DUPLICATE_SREJ,
+ L2CAP_TXSEQ_INVALID,
+ L2CAP_TXSEQ_INVALID_IGNORE,
+};
+
+enum {
+ L2CAP_EV_DATA_REQUEST,
+ L2CAP_EV_LOCAL_BUSY_DETECTED,
+ L2CAP_EV_LOCAL_BUSY_CLEAR,
+ L2CAP_EV_RECV_REQSEQ_AND_FBIT,
+ L2CAP_EV_RECV_FBIT,
+ L2CAP_EV_RETRANS_TO,
+ L2CAP_EV_MONITOR_TO,
+ L2CAP_EV_EXPLICIT_POLL,
+ L2CAP_EV_RECV_IFRAME,
+ L2CAP_EV_RECV_RR,
+ L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR,
+ L2CAP_EV_RECV_SREJ,
+ L2CAP_EV_RECV_FRAME,
+};
+
+enum {
+ L2CAP_MOVE_ROLE_NONE,
+ L2CAP_MOVE_ROLE_INITIATOR,
+ L2CAP_MOVE_ROLE_RESPONDER,
+};
+
+enum {
+ L2CAP_MOVE_STABLE,
+ L2CAP_MOVE_WAIT_REQ,
+ L2CAP_MOVE_WAIT_RSP,
+ L2CAP_MOVE_WAIT_RSP_SUCCESS,
+ L2CAP_MOVE_WAIT_CONFIRM,
+ L2CAP_MOVE_WAIT_CONFIRM_RSP,
+ L2CAP_MOVE_WAIT_LOGICAL_COMP,
+ L2CAP_MOVE_WAIT_LOGICAL_CFM,
+ L2CAP_MOVE_WAIT_LOCAL_BUSY,
+ L2CAP_MOVE_WAIT_PREPARE,
+};
+
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
+
+static inline void l2cap_chan_lock(struct l2cap_chan *chan)
+{
+ mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting));
+}
+
+static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
+{
+ mutex_unlock(&chan->lock);
+}
+
+static inline void l2cap_set_timer(struct l2cap_chan *chan,
+ struct delayed_work *work, long timeout)
+{
+ BT_DBG("chan %p state %s timeout %ld", chan,
+ state_to_string(chan->state), timeout);
+
+ /* If delayed work cancelled do not hold(chan)
+ since it is already done with previous set_timer */
+ if (!cancel_delayed_work(work))
+ l2cap_chan_hold(chan);
+
+ schedule_delayed_work(work, timeout);
+}
+
+static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
+ struct delayed_work *work)
+{
+ bool ret;
+
+ /* put(chan) if delayed work cancelled otherwise it
+ is done in delayed work function */
+ ret = cancel_delayed_work(work);
+ if (ret)
+ l2cap_chan_put(chan);
+
+ return ret;
+}
+
+#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
+#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
+#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
+#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
+#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
+ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
+
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+ if (seq1 >= seq2)
+ return seq1 - seq2;
+ else
+ return chan->tx_win_max + 1 - seq2 + seq1;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+ return (seq + 1) % (chan->tx_win_max + 1);
+}
+
+static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
+{
+ return NULL;
+}
+
+static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ return -ENOSYS;
+}
+
+static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
+{
+}
+
+static inline void l2cap_chan_no_close(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan,
+ int state, int err)
+{
+}
+
+static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+{
+}
+
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
+ return 0;
+}
+
+extern bool disable_ertm;
+
+int l2cap_init_sockets(void);
+void l2cap_cleanup_sockets(void);
+bool l2cap_is_socket(struct socket *sock);
+
+void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan);
+void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
+
+int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
+int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
+
+struct l2cap_chan *l2cap_chan_create(void);
+void l2cap_chan_close(struct l2cap_chan *chan, int reason);
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
+void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+int l2cap_ertm_init(struct l2cap_chan *chan);
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_send_conn_req(struct l2cap_chan *chan);
+void l2cap_move_start(struct l2cap_chan *chan);
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+ u8 status);
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
+
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
+void l2cap_conn_put(struct l2cap_conn *conn);
+
+int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
+void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
+
+#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
new file mode 100644
index 0000000..e7303ee
--- /dev/null
+++ b/include/net/bluetooth/mgmt.h
@@ -0,0 +1,826 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#define MGMT_INDEX_NONE 0xFFFF
+
+#define MGMT_STATUS_SUCCESS 0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND 0x01
+#define MGMT_STATUS_NOT_CONNECTED 0x02
+#define MGMT_STATUS_FAILED 0x03
+#define MGMT_STATUS_CONNECT_FAILED 0x04
+#define MGMT_STATUS_AUTH_FAILED 0x05
+#define MGMT_STATUS_NOT_PAIRED 0x06
+#define MGMT_STATUS_NO_RESOURCES 0x07
+#define MGMT_STATUS_TIMEOUT 0x08
+#define MGMT_STATUS_ALREADY_CONNECTED 0x09
+#define MGMT_STATUS_BUSY 0x0a
+#define MGMT_STATUS_REJECTED 0x0b
+#define MGMT_STATUS_NOT_SUPPORTED 0x0c
+#define MGMT_STATUS_INVALID_PARAMS 0x0d
+#define MGMT_STATUS_DISCONNECTED 0x0e
+#define MGMT_STATUS_NOT_POWERED 0x0f
+#define MGMT_STATUS_CANCELLED 0x10
+#define MGMT_STATUS_INVALID_INDEX 0x11
+#define MGMT_STATUS_RFKILLED 0x12
+#define MGMT_STATUS_ALREADY_PAIRED 0x13
+#define MGMT_STATUS_PERMISSION_DENIED 0x14
+
+struct mgmt_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+
+struct mgmt_addr_info {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+#define MGMT_ADDR_INFO_SIZE 7
+
+#define MGMT_OP_READ_VERSION 0x0001
+#define MGMT_READ_VERSION_SIZE 0
+struct mgmt_rp_read_version {
+ __u8 version;
+ __le16 revision;
+} __packed;
+
+#define MGMT_OP_READ_COMMANDS 0x0002
+#define MGMT_READ_COMMANDS_SIZE 0
+struct mgmt_rp_read_commands {
+ __le16 num_commands;
+ __le16 num_events;
+ __le16 opcodes[0];
+} __packed;
+
+#define MGMT_OP_READ_INDEX_LIST 0x0003
+#define MGMT_READ_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_index_list {
+ __le16 num_controllers;
+ __le16 index[0];
+} __packed;
+
+/* Reserve one extra byte for names in management messages so that they
+ * are always guaranteed to be nul-terminated */
+#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1)
+
+#define MGMT_SETTING_POWERED 0x00000001
+#define MGMT_SETTING_CONNECTABLE 0x00000002
+#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
+#define MGMT_SETTING_DISCOVERABLE 0x00000008
+#define MGMT_SETTING_BONDABLE 0x00000010
+#define MGMT_SETTING_LINK_SECURITY 0x00000020
+#define MGMT_SETTING_SSP 0x00000040
+#define MGMT_SETTING_BREDR 0x00000080
+#define MGMT_SETTING_HS 0x00000100
+#define MGMT_SETTING_LE 0x00000200
+#define MGMT_SETTING_ADVERTISING 0x00000400
+#define MGMT_SETTING_SECURE_CONN 0x00000800
+#define MGMT_SETTING_DEBUG_KEYS 0x00001000
+#define MGMT_SETTING_PRIVACY 0x00002000
+#define MGMT_SETTING_CONFIGURATION 0x00004000
+#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
+
+#define MGMT_OP_READ_INFO 0x0004
+#define MGMT_READ_INFO_SIZE 0
+struct mgmt_rp_read_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __u8 dev_class[3];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+
+struct mgmt_mode {
+ __u8 val;
+} __packed;
+
+#define MGMT_SETTING_SIZE 1
+
+#define MGMT_OP_SET_POWERED 0x0005
+
+#define MGMT_OP_SET_DISCOVERABLE 0x0006
+struct mgmt_cp_set_discoverable {
+ __u8 val;
+ __le16 timeout;
+} __packed;
+#define MGMT_SET_DISCOVERABLE_SIZE 3
+
+#define MGMT_OP_SET_CONNECTABLE 0x0007
+
+#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
+
+#define MGMT_OP_SET_BONDABLE 0x0009
+
+#define MGMT_OP_SET_LINK_SECURITY 0x000A
+
+#define MGMT_OP_SET_SSP 0x000B
+
+#define MGMT_OP_SET_HS 0x000C
+
+#define MGMT_OP_SET_LE 0x000D
+#define MGMT_OP_SET_DEV_CLASS 0x000E
+struct mgmt_cp_set_dev_class {
+ __u8 major;
+ __u8 minor;
+} __packed;
+#define MGMT_SET_DEV_CLASS_SIZE 2
+
+#define MGMT_OP_SET_LOCAL_NAME 0x000F
+struct mgmt_cp_set_local_name {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+#define MGMT_SET_LOCAL_NAME_SIZE 260
+
+#define MGMT_OP_ADD_UUID 0x0010
+struct mgmt_cp_add_uuid {
+ __u8 uuid[16];
+ __u8 svc_hint;
+} __packed;
+#define MGMT_ADD_UUID_SIZE 17
+
+#define MGMT_OP_REMOVE_UUID 0x0011
+struct mgmt_cp_remove_uuid {
+ __u8 uuid[16];
+} __packed;
+#define MGMT_REMOVE_UUID_SIZE 16
+
+struct mgmt_link_key_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+ __u8 pin_len;
+} __packed;
+
+#define MGMT_OP_LOAD_LINK_KEYS 0x0012
+struct mgmt_cp_load_link_keys {
+ __u8 debug_keys;
+ __le16 key_count;
+ struct mgmt_link_key_info keys[0];
+} __packed;
+#define MGMT_LOAD_LINK_KEYS_SIZE 3
+
+#define MGMT_LTK_UNAUTHENTICATED 0x00
+#define MGMT_LTK_AUTHENTICATED 0x01
+#define MGMT_LTK_P256_UNAUTH 0x02
+#define MGMT_LTK_P256_AUTH 0x03
+#define MGMT_LTK_P256_DEBUG 0x04
+
+struct mgmt_ltk_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 master;
+ __u8 enc_size;
+ __le16 ediv;
+ __le64 rand;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013
+struct mgmt_cp_load_long_term_keys {
+ __le16 key_count;
+ struct mgmt_ltk_info keys[0];
+} __packed;
+#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2
+
+#define MGMT_OP_DISCONNECT 0x0014
+struct mgmt_cp_disconnect {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_DISCONNECT_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_disconnect {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS 0x0015
+#define MGMT_GET_CONNECTIONS_SIZE 0
+struct mgmt_rp_get_connections {
+ __le16 conn_count;
+ struct mgmt_addr_info addr[0];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_REPLY 0x0016
+struct mgmt_cp_pin_code_reply {
+ struct mgmt_addr_info addr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __packed;
+#define MGMT_PIN_CODE_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 17)
+struct mgmt_rp_pin_code_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
+struct mgmt_cp_pin_code_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_PIN_CODE_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_SET_IO_CAPABILITY 0x0018
+struct mgmt_cp_set_io_capability {
+ __u8 io_capability;
+} __packed;
+#define MGMT_SET_IO_CAPABILITY_SIZE 1
+
+#define MGMT_OP_PAIR_DEVICE 0x0019
+struct mgmt_cp_pair_device {
+ struct mgmt_addr_info addr;
+ __u8 io_cap;
+} __packed;
+#define MGMT_PAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_pair_device {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_CANCEL_PAIR_DEVICE 0x001A
+#define MGMT_CANCEL_PAIR_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNPAIR_DEVICE 0x001B
+struct mgmt_cp_unpair_device {
+ struct mgmt_addr_info addr;
+ __u8 disconnect;
+} __packed;
+#define MGMT_UNPAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_unpair_device {
+ struct mgmt_addr_info addr;
+};
+
+#define MGMT_OP_USER_CONFIRM_REPLY 0x001C
+struct mgmt_cp_user_confirm_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_CONFIRM_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_user_confirm_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001D
+struct mgmt_cp_user_confirm_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_CONFIRM_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_USER_PASSKEY_REPLY 0x001E
+struct mgmt_cp_user_passkey_reply {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+} __packed;
+#define MGMT_USER_PASSKEY_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 4)
+struct mgmt_rp_user_passkey_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001F
+struct mgmt_cp_user_passkey_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_PASSKEY_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
+#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
+struct mgmt_rp_read_local_oob_data {
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+
+#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
+struct mgmt_cp_add_remote_oob_data {
+ struct mgmt_addr_info addr;
+ __u8 hash[16];
+ __u8 rand[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
+struct mgmt_cp_add_remote_oob_ext_data {
+ struct mgmt_addr_info addr;
+ __u8 hash192[16];
+ __u8 rand192[16];
+ __u8 hash256[16];
+ __u8 rand256[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64)
+
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022
+struct mgmt_cp_remove_remote_oob_data {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_REMOTE_OOB_DATA_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_START_DISCOVERY 0x0023
+struct mgmt_cp_start_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_START_DISCOVERY_SIZE 1
+
+#define MGMT_OP_STOP_DISCOVERY 0x0024
+struct mgmt_cp_stop_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_STOP_DISCOVERY_SIZE 1
+
+#define MGMT_OP_CONFIRM_NAME 0x0025
+struct mgmt_cp_confirm_name {
+ struct mgmt_addr_info addr;
+ __u8 name_known;
+} __packed;
+#define MGMT_CONFIRM_NAME_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_confirm_name {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_BLOCK_DEVICE 0x0026
+struct mgmt_cp_block_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_BLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNBLOCK_DEVICE 0x0027
+struct mgmt_cp_unblock_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_SET_DEVICE_ID 0x0028
+struct mgmt_cp_set_device_id {
+ __le16 source;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
+} __packed;
+#define MGMT_SET_DEVICE_ID_SIZE 8
+
+#define MGMT_OP_SET_ADVERTISING 0x0029
+
+#define MGMT_OP_SET_BREDR 0x002A
+
+#define MGMT_OP_SET_STATIC_ADDRESS 0x002B
+struct mgmt_cp_set_static_address {
+ bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_STATIC_ADDRESS_SIZE 6
+
+#define MGMT_OP_SET_SCAN_PARAMS 0x002C
+struct mgmt_cp_set_scan_params {
+ __le16 interval;
+ __le16 window;
+} __packed;
+#define MGMT_SET_SCAN_PARAMS_SIZE 4
+
+#define MGMT_OP_SET_SECURE_CONN 0x002D
+
+#define MGMT_OP_SET_DEBUG_KEYS 0x002E
+
+#define MGMT_OP_SET_PRIVACY 0x002F
+struct mgmt_cp_set_privacy {
+ __u8 privacy;
+ __u8 irk[16];
+} __packed;
+#define MGMT_SET_PRIVACY_SIZE 17
+
+struct mgmt_irk_info {
+ struct mgmt_addr_info addr;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_IRKS 0x0030
+struct mgmt_cp_load_irks {
+ __le16 irk_count;
+ struct mgmt_irk_info irks[0];
+} __packed;
+#define MGMT_LOAD_IRKS_SIZE 2
+
+#define MGMT_OP_GET_CONN_INFO 0x0031
+struct mgmt_cp_get_conn_info {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_GET_CONN_INFO_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_get_conn_info {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __s8 tx_power;
+ __s8 max_tx_power;
+} __packed;
+
+#define MGMT_OP_GET_CLOCK_INFO 0x0032
+struct mgmt_cp_get_clock_info {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_GET_CLOCK_INFO_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_get_clock_info {
+ struct mgmt_addr_info addr;
+ __le32 local_clock;
+ __le32 piconet_clock;
+ __le16 accuracy;
+} __packed;
+
+#define MGMT_OP_ADD_DEVICE 0x0033
+struct mgmt_cp_add_device {
+ struct mgmt_addr_info addr;
+ __u8 action;
+} __packed;
+#define MGMT_ADD_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+
+#define MGMT_OP_REMOVE_DEVICE 0x0034
+struct mgmt_cp_remove_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+struct mgmt_conn_param {
+ struct mgmt_addr_info addr;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 latency;
+ __le16 timeout;
+} __packed;
+
+#define MGMT_OP_LOAD_CONN_PARAM 0x0035
+struct mgmt_cp_load_conn_param {
+ __le16 param_count;
+ struct mgmt_conn_param params[0];
+} __packed;
+#define MGMT_LOAD_CONN_PARAM_SIZE 2
+
+#define MGMT_OP_READ_UNCONF_INDEX_LIST 0x0036
+#define MGMT_READ_UNCONF_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_unconf_index_list {
+ __le16 num_controllers;
+ __le16 index[0];
+} __packed;
+
+#define MGMT_OPTION_EXTERNAL_CONFIG 0x00000001
+#define MGMT_OPTION_PUBLIC_ADDRESS 0x00000002
+
+#define MGMT_OP_READ_CONFIG_INFO 0x0037
+#define MGMT_READ_CONFIG_INFO_SIZE 0
+struct mgmt_rp_read_config_info {
+ __le16 manufacturer;
+ __le32 supported_options;
+ __le32 missing_options;
+} __packed;
+
+#define MGMT_OP_SET_EXTERNAL_CONFIG 0x0038
+struct mgmt_cp_set_external_config {
+ __u8 config;
+} __packed;
+#define MGMT_SET_EXTERNAL_CONFIG_SIZE 1
+
+#define MGMT_OP_SET_PUBLIC_ADDRESS 0x0039
+struct mgmt_cp_set_public_address {
+ bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_PUBLIC_ADDRESS_SIZE 6
+
+#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A
+struct mgmt_cp_start_service_discovery {
+ __u8 type;
+ __s8 rssi;
+ __le16 uuid_count;
+ __u8 uuids[0][16];
+} __packed;
+#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+
+#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B
+struct mgmt_cp_read_local_oob_ext_data {
+ __u8 type;
+} __packed;
+#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1
+struct mgmt_rp_read_local_oob_ext_data {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C
+#define MGMT_READ_EXT_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_ext_index_list {
+ __le16 num_controllers;
+ struct {
+ __le16 index;
+ __u8 type;
+ __u8 bus;
+ } entry[0];
+} __packed;
+
+#define MGMT_OP_READ_ADV_FEATURES 0x0003D
+#define MGMT_READ_ADV_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_features {
+ __le32 supported_flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+ __u8 max_instances;
+ __u8 num_instances;
+ __u8 instance[0];
+} __packed;
+
+#define MGMT_OP_ADD_ADVERTISING 0x003E
+struct mgmt_cp_add_advertising {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[0];
+} __packed;
+#define MGMT_ADD_ADVERTISING_SIZE 11
+struct mgmt_rp_add_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_ADV_FLAG_CONNECTABLE BIT(0)
+#define MGMT_ADV_FLAG_DISCOV BIT(1)
+#define MGMT_ADV_FLAG_LIMITED_DISCOV BIT(2)
+#define MGMT_ADV_FLAG_MANAGED_FLAGS BIT(3)
+#define MGMT_ADV_FLAG_TX_POWER BIT(4)
+#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
+#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+
+#define MGMT_OP_REMOVE_ADVERTISING 0x003F
+struct mgmt_cp_remove_advertising {
+ __u8 instance;
+} __packed;
+#define MGMT_REMOVE_ADVERTISING_SIZE 1
+struct mgmt_rp_remove_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_OP_GET_ADV_SIZE_INFO 0x0040
+struct mgmt_cp_get_adv_size_info {
+ __u8 instance;
+ __le32 flags;
+} __packed;
+#define MGMT_GET_ADV_SIZE_INFO_SIZE 5
+struct mgmt_rp_get_adv_size_info {
+ __u8 instance;
+ __le32 flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+} __packed;
+
+#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __le16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
+#define MGMT_EV_CMD_COMPLETE 0x0001
+struct mgmt_ev_cmd_complete {
+ __le16 opcode;
+ __u8 status;
+ __u8 data[0];
+} __packed;
+
+#define MGMT_EV_CMD_STATUS 0x0002
+struct mgmt_ev_cmd_status {
+ __le16 opcode;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_ERROR 0x0003
+struct mgmt_ev_controller_error {
+ __u8 error_code;
+} __packed;
+
+#define MGMT_EV_INDEX_ADDED 0x0004
+
+#define MGMT_EV_INDEX_REMOVED 0x0005
+
+#define MGMT_EV_NEW_SETTINGS 0x0006
+
+#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
+struct mgmt_ev_class_of_dev_changed {
+ __u8 dev_class[3];
+};
+
+#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
+struct mgmt_ev_local_name_changed {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+
+#define MGMT_EV_NEW_LINK_KEY 0x0009
+struct mgmt_ev_new_link_key {
+ __u8 store_hint;
+ struct mgmt_link_key_info key;
+} __packed;
+
+#define MGMT_EV_NEW_LONG_TERM_KEY 0x000A
+struct mgmt_ev_new_long_term_key {
+ __u8 store_hint;
+ struct mgmt_ltk_info key;
+} __packed;
+
+#define MGMT_EV_DEVICE_CONNECTED 0x000B
+struct mgmt_ev_device_connected {
+ struct mgmt_addr_info addr;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_DEV_DISCONN_UNKNOWN 0x00
+#define MGMT_DEV_DISCONN_TIMEOUT 0x01
+#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
+#define MGMT_DEV_DISCONN_REMOTE 0x03
+#define MGMT_DEV_DISCONN_AUTH_FAILURE 0x04
+
+#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
+struct mgmt_ev_device_disconnected {
+ struct mgmt_addr_info addr;
+ __u8 reason;
+} __packed;
+
+#define MGMT_EV_CONNECT_FAILED 0x000D
+struct mgmt_ev_connect_failed {
+ struct mgmt_addr_info addr;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_PIN_CODE_REQUEST 0x000E
+struct mgmt_ev_pin_code_request {
+ struct mgmt_addr_info addr;
+ __u8 secure;
+} __packed;
+
+#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
+struct mgmt_ev_user_confirm_request {
+ struct mgmt_addr_info addr;
+ __u8 confirm_hint;
+ __le32 value;
+} __packed;
+
+#define MGMT_EV_USER_PASSKEY_REQUEST 0x0010
+struct mgmt_ev_user_passkey_request {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_AUTH_FAILED 0x0011
+struct mgmt_ev_auth_failed {
+ struct mgmt_addr_info addr;
+ __u8 status;
+} __packed;
+
+#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
+#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
+#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
+
+#define MGMT_EV_DEVICE_FOUND 0x0012
+struct mgmt_ev_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_EV_DISCOVERING 0x0013
+struct mgmt_ev_discovering {
+ __u8 type;
+ __u8 discovering;
+} __packed;
+
+#define MGMT_EV_DEVICE_BLOCKED 0x0014
+struct mgmt_ev_device_blocked {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
+struct mgmt_ev_device_unblocked {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNPAIRED 0x0016
+struct mgmt_ev_device_unpaired {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_PASSKEY_NOTIFY 0x0017
+struct mgmt_ev_passkey_notify {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+ __u8 entered;
+} __packed;
+
+#define MGMT_EV_NEW_IRK 0x0018
+struct mgmt_ev_new_irk {
+ __u8 store_hint;
+ bdaddr_t rpa;
+ struct mgmt_irk_info irk;
+} __packed;
+
+#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00
+#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01
+#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02
+#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03
+
+struct mgmt_csrk_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_EV_NEW_CSRK 0x0019
+struct mgmt_ev_new_csrk {
+ __u8 store_hint;
+ struct mgmt_csrk_info key;
+} __packed;
+
+#define MGMT_EV_DEVICE_ADDED 0x001a
+struct mgmt_ev_device_added {
+ struct mgmt_addr_info addr;
+ __u8 action;
+} __packed;
+
+#define MGMT_EV_DEVICE_REMOVED 0x001b
+struct mgmt_ev_device_removed {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_NEW_CONN_PARAM 0x001c
+struct mgmt_ev_new_conn_param {
+ struct mgmt_addr_info addr;
+ __u8 store_hint;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 latency;
+ __le16 timeout;
+} __packed;
+
+#define MGMT_EV_UNCONF_INDEX_ADDED 0x001d
+
+#define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e
+
+#define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f
+
+struct mgmt_ev_ext_index {
+ __u8 type;
+ __u8 bus;
+} __packed;
+
+#define MGMT_EV_EXT_INDEX_ADDED 0x0020
+
+#define MGMT_EV_EXT_INDEX_REMOVED 0x0021
+
+#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022
+struct mgmt_ev_local_oob_data_updated {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_EV_ADVERTISING_ADDED 0x0023
+struct mgmt_ev_advertising_added {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_ADVERTISING_REMOVED 0x0024
+struct mgmt_ev_advertising_removed {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
new file mode 100644
index 0000000..da4acef
--- /dev/null
+++ b/include/net/bluetooth/rfcomm.h
@@ -0,0 +1,376 @@
+/*
+ RFCOMM implementation for Linux Bluetooth stack (BlueZ)
+ Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
+ Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <linux/refcount.h>
+
+#ifndef __RFCOMM_H
+#define __RFCOMM_H
+
+#define RFCOMM_CONN_TIMEOUT (HZ * 30)
+#define RFCOMM_DISC_TIMEOUT (HZ * 20)
+#define RFCOMM_AUTH_TIMEOUT (HZ * 25)
+#define RFCOMM_IDLE_TIMEOUT (HZ * 2)
+
+#define RFCOMM_DEFAULT_MTU 127
+#define RFCOMM_DEFAULT_CREDITS 7
+
+#define RFCOMM_MAX_L2CAP_MTU 1013
+#define RFCOMM_MAX_CREDITS 40
+
+#define RFCOMM_SKB_HEAD_RESERVE 8
+#define RFCOMM_SKB_TAIL_RESERVE 2
+#define RFCOMM_SKB_RESERVE (RFCOMM_SKB_HEAD_RESERVE + RFCOMM_SKB_TAIL_RESERVE)
+
+#define RFCOMM_SABM 0x2f
+#define RFCOMM_DISC 0x43
+#define RFCOMM_UA 0x63
+#define RFCOMM_DM 0x0f
+#define RFCOMM_UIH 0xef
+
+#define RFCOMM_TEST 0x08
+#define RFCOMM_FCON 0x28
+#define RFCOMM_FCOFF 0x18
+#define RFCOMM_MSC 0x38
+#define RFCOMM_RPN 0x24
+#define RFCOMM_RLS 0x14
+#define RFCOMM_PN 0x20
+#define RFCOMM_NSC 0x04
+
+#define RFCOMM_V24_FC 0x02
+#define RFCOMM_V24_RTC 0x04
+#define RFCOMM_V24_RTR 0x08
+#define RFCOMM_V24_IC 0x40
+#define RFCOMM_V24_DV 0x80
+
+#define RFCOMM_RPN_BR_2400 0x0
+#define RFCOMM_RPN_BR_4800 0x1
+#define RFCOMM_RPN_BR_7200 0x2
+#define RFCOMM_RPN_BR_9600 0x3
+#define RFCOMM_RPN_BR_19200 0x4
+#define RFCOMM_RPN_BR_38400 0x5
+#define RFCOMM_RPN_BR_57600 0x6
+#define RFCOMM_RPN_BR_115200 0x7
+#define RFCOMM_RPN_BR_230400 0x8
+
+#define RFCOMM_RPN_DATA_5 0x0
+#define RFCOMM_RPN_DATA_6 0x1
+#define RFCOMM_RPN_DATA_7 0x2
+#define RFCOMM_RPN_DATA_8 0x3
+
+#define RFCOMM_RPN_STOP_1 0
+#define RFCOMM_RPN_STOP_15 1
+
+#define RFCOMM_RPN_PARITY_NONE 0x0
+#define RFCOMM_RPN_PARITY_ODD 0x1
+#define RFCOMM_RPN_PARITY_EVEN 0x3
+#define RFCOMM_RPN_PARITY_MARK 0x5
+#define RFCOMM_RPN_PARITY_SPACE 0x7
+
+#define RFCOMM_RPN_FLOW_NONE 0x00
+
+#define RFCOMM_RPN_XON_CHAR 0x11
+#define RFCOMM_RPN_XOFF_CHAR 0x13
+
+#define RFCOMM_RPN_PM_BITRATE 0x0001
+#define RFCOMM_RPN_PM_DATA 0x0002
+#define RFCOMM_RPN_PM_STOP 0x0004
+#define RFCOMM_RPN_PM_PARITY 0x0008
+#define RFCOMM_RPN_PM_PARITY_TYPE 0x0010
+#define RFCOMM_RPN_PM_XON 0x0020
+#define RFCOMM_RPN_PM_XOFF 0x0040
+#define RFCOMM_RPN_PM_FLOW 0x3F00
+
+#define RFCOMM_RPN_PM_ALL 0x3F7F
+
+struct rfcomm_hdr {
+ u8 addr;
+ u8 ctrl;
+ u8 len; /* Actual size can be 2 bytes */
+} __packed;
+
+struct rfcomm_cmd {
+ u8 addr;
+ u8 ctrl;
+ u8 len;
+ u8 fcs;
+} __packed;
+
+struct rfcomm_mcc {
+ u8 type;
+ u8 len;
+} __packed;
+
+struct rfcomm_pn {
+ u8 dlci;
+ u8 flow_ctrl;
+ u8 priority;
+ u8 ack_timer;
+ __le16 mtu;
+ u8 max_retrans;
+ u8 credits;
+} __packed;
+
+struct rfcomm_rpn {
+ u8 dlci;
+ u8 bit_rate;
+ u8 line_settings;
+ u8 flow_ctrl;
+ u8 xon_char;
+ u8 xoff_char;
+ __le16 param_mask;
+} __packed;
+
+struct rfcomm_rls {
+ u8 dlci;
+ u8 status;
+} __packed;
+
+struct rfcomm_msc {
+ u8 dlci;
+ u8 v24_sig;
+} __packed;
+
+/* ---- Core structures, flags etc ---- */
+
+struct rfcomm_session {
+ struct list_head list;
+ struct socket *sock;
+ struct timer_list timer;
+ unsigned long state;
+ unsigned long flags;
+ int initiator;
+
+ /* Default DLC parameters */
+ int cfc;
+ uint mtu;
+
+ struct list_head dlcs;
+};
+
+struct rfcomm_dlc {
+ struct list_head list;
+ struct rfcomm_session *session;
+ struct sk_buff_head tx_queue;
+ struct timer_list timer;
+
+ struct mutex lock;
+ unsigned long state;
+ unsigned long flags;
+ refcount_t refcnt;
+ u8 dlci;
+ u8 addr;
+ u8 priority;
+ u8 v24_sig;
+ u8 remote_v24_sig;
+ u8 mscex;
+ u8 out;
+ u8 sec_level;
+ u8 role_switch;
+ u32 defer_setup;
+
+ uint mtu;
+ uint cfc;
+ uint rx_credits;
+ uint tx_credits;
+
+ void *owner;
+
+ void (*data_ready)(struct rfcomm_dlc *d, struct sk_buff *skb);
+ void (*state_change)(struct rfcomm_dlc *d, int err);
+ void (*modem_status)(struct rfcomm_dlc *d, u8 v24_sig);
+};
+
+/* DLC and session flags */
+#define RFCOMM_RX_THROTTLED 0
+#define RFCOMM_TX_THROTTLED 1
+#define RFCOMM_TIMED_OUT 2
+#define RFCOMM_MSC_PENDING 3
+#define RFCOMM_SEC_PENDING 4
+#define RFCOMM_AUTH_PENDING 5
+#define RFCOMM_AUTH_ACCEPT 6
+#define RFCOMM_AUTH_REJECT 7
+#define RFCOMM_DEFER_SETUP 8
+#define RFCOMM_ENC_DROP 9
+
+/* Scheduling flags and events */
+#define RFCOMM_SCHED_WAKEUP 31
+
+/* MSC exchange flags */
+#define RFCOMM_MSCEX_TX 1
+#define RFCOMM_MSCEX_RX 2
+#define RFCOMM_MSCEX_OK (RFCOMM_MSCEX_TX + RFCOMM_MSCEX_RX)
+
+/* CFC states */
+#define RFCOMM_CFC_UNKNOWN -1
+#define RFCOMM_CFC_DISABLED 0
+#define RFCOMM_CFC_ENABLED RFCOMM_MAX_CREDITS
+
+/* ---- RFCOMM SEND RPN ---- */
+int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
+ u8 bit_rate, u8 data_bits, u8 stop_bits,
+ u8 parity, u8 flow_ctrl_settings,
+ u8 xon_char, u8 xoff_char, u16 param_mask);
+
+/* ---- RFCOMM DLCs (channels) ---- */
+struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
+void rfcomm_dlc_free(struct rfcomm_dlc *d);
+int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
+ u8 channel);
+int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
+int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
+void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb);
+int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
+int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
+void rfcomm_dlc_accept(struct rfcomm_dlc *d);
+struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel);
+
+#define rfcomm_dlc_lock(d) mutex_lock(&d->lock)
+#define rfcomm_dlc_unlock(d) mutex_unlock(&d->lock)
+
+static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d)
+{
+ refcount_inc(&d->refcnt);
+}
+
+static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
+{
+ if (refcount_dec_and_test(&d->refcnt))
+ rfcomm_dlc_free(d);
+}
+
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+
+static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
+{
+ if (!test_and_set_bit(RFCOMM_RX_THROTTLED, &d->flags))
+ __rfcomm_dlc_throttle(d);
+}
+
+static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
+{
+ if (test_and_clear_bit(RFCOMM_RX_THROTTLED, &d->flags))
+ __rfcomm_dlc_unthrottle(d);
+}
+
+/* ---- RFCOMM sessions ---- */
+void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src,
+ bdaddr_t *dst);
+
+/* ---- RFCOMM sockets ---- */
+struct sockaddr_rc {
+ sa_family_t rc_family;
+ bdaddr_t rc_bdaddr;
+ u8 rc_channel;
+};
+
+#define RFCOMM_CONNINFO 0x02
+struct rfcomm_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define RFCOMM_LM 0x03
+#define RFCOMM_LM_MASTER 0x0001
+#define RFCOMM_LM_AUTH 0x0002
+#define RFCOMM_LM_ENCRYPT 0x0004
+#define RFCOMM_LM_TRUSTED 0x0008
+#define RFCOMM_LM_RELIABLE 0x0010
+#define RFCOMM_LM_SECURE 0x0020
+#define RFCOMM_LM_FIPS 0x0040
+
+#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk)
+
+struct rfcomm_pinfo {
+ struct bt_sock bt;
+ bdaddr_t src;
+ bdaddr_t dst;
+ struct rfcomm_dlc *dlc;
+ u8 channel;
+ u8 sec_level;
+ u8 role_switch;
+};
+
+int rfcomm_init_sockets(void);
+void rfcomm_cleanup_sockets(void);
+
+int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel,
+ struct rfcomm_dlc **d);
+
+/* ---- RFCOMM TTY ---- */
+#define RFCOMM_MAX_DEV 256
+
+#define RFCOMMCREATEDEV _IOW('R', 200, int)
+#define RFCOMMRELEASEDEV _IOW('R', 201, int)
+#define RFCOMMGETDEVLIST _IOR('R', 210, int)
+#define RFCOMMGETDEVINFO _IOR('R', 211, int)
+#define RFCOMMSTEALDLC _IOW('R', 220, int)
+
+/* rfcomm_dev.flags bit definitions */
+#define RFCOMM_REUSE_DLC 0
+#define RFCOMM_RELEASE_ONHUP 1
+#define RFCOMM_HANGUP_NOW 2
+#define RFCOMM_TTY_ATTACHED 3
+#define RFCOMM_DEFUNCT_BIT4 4 /* don't reuse this bit - userspace visible */
+
+/* rfcomm_dev.status bit definitions */
+#define RFCOMM_DEV_RELEASED 0
+#define RFCOMM_TTY_OWNED 1
+
+struct rfcomm_dev_req {
+ s16 dev_id;
+ u32 flags;
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+};
+
+struct rfcomm_dev_info {
+ s16 id;
+ u32 flags;
+ u16 state;
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+};
+
+struct rfcomm_dev_list_req {
+ u16 dev_num;
+ struct rfcomm_dev_info dev_info[0];
+};
+
+int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+
+#ifdef CONFIG_BT_RFCOMM_TTY
+int rfcomm_init_ttys(void);
+void rfcomm_cleanup_ttys(void);
+#else
+static inline int rfcomm_init_ttys(void)
+{
+ return 0;
+}
+static inline void rfcomm_cleanup_ttys(void)
+{
+}
+#endif
+#endif /* __RFCOMM_H */
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
new file mode 100644
index 0000000..f40ddb4
--- /dev/null
+++ b/include/net/bluetooth/sco.h
@@ -0,0 +1,49 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __SCO_H
+#define __SCO_H
+
+/* SCO defaults */
+#define SCO_DEFAULT_MTU 500
+
+/* SCO socket address */
+struct sockaddr_sco {
+ sa_family_t sco_family;
+ bdaddr_t sco_bdaddr;
+};
+
+/* SCO socket options */
+#define SCO_OPTIONS 0x01
+struct sco_options {
+ __u16 mtu;
+};
+
+#define SCO_CONNINFO 0x02
+struct sco_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#endif /* __SCO_H */
diff --git a/local-symbols b/local-symbols
index 8ded69e..c072a46 100644
--- a/local-symbols
+++ b/local-symbols
@@ -534,3 +534,4 @@ R8188EU=
88EU_AP_MODE=
R8822BE=
RTLWIFI_DEBUG_ST=
+SERIAL_DEV_BUS=
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 08f5808..0f68a23 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -14,7 +14,8 @@ menuconfig BT
depends on CRYPTO_CMAC
depends on CRYPTO_ECB
depends on CRYPTO_SHA256
- depends on CRYPTO_ECDH
+ depends on CRYPTO_ECDH
+ depends on !KERNEL_4_7
help
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 51c2cf2..9346e75 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -239,7 +239,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
}
len -= sizeof(*cl);
- cl = skb_pull(skb, sizeof(*cl));
+ cl = (void *)skb_pull(skb, sizeof(*cl));
}
/* Fall back to L2CAP init sequence */
@@ -279,7 +279,7 @@ static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
while (skb->len >= sizeof(*cl)) {
BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
cl->status);
- cl = skb_pull(skb, sizeof(*cl));
+ cl = (void *) skb_pull(skb, sizeof(*cl));
}
/* TODO send A2MP_CHANGE_RSP */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3264e18..77a68c0 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -28,6 +28,7 @@
#include <linux/debugfs.h>
#include <linux/stringify.h>
#include <linux/sched/signal.h>
+#include <linux/refcount.h>
#include <asm/ioctls.h>
@@ -649,7 +650,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
- refcount_read(&sk->sk_refcnt),
+ refcount_read((const refcount_t *)&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
@@ -679,8 +680,9 @@ int bt_procfs_init(struct net *net, const char *name,
{
sk_list->custom_seq_show = seq_show;
- if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
- return -ENOMEM;
+ /*TODO: FIXME */
+ //if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
+ // return -ENOMEM;
return 0;
}
diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c
index 2155ce8..9c91522 100644
--- a/net/bluetooth/ecdh_helper.c
+++ b/net/bluetooth/ecdh_helper.c
@@ -23,6 +23,9 @@
#include "ecdh_helper.h"
#include <linux/scatterlist.h>
+#if LINUX_VERSION_IS_LESS(4,8,0)
+#include <crypto/backport-kpp.h>
+#endif
#include <crypto/ecdh.h>
struct ecdh_completion {
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 3d3b23d..d1ed594 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1338,9 +1338,13 @@ done:
release_sock(sk);
return err;
}
-
+#if LINUX_VERSION_IS_LESS(4,17,0)
+static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *sockaddr_len, int peer)
+#else
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
+#endif
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
struct sock *sk = sock->sk;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d17a473..3bfa43f 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -33,7 +33,7 @@
#include <linux/debugfs.h>
#include <linux/crc16.h>
#include <linux/filter.h>
-
+#include <linux/uio.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
@@ -2119,6 +2119,42 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
}
}
+extern void __compiletime_error("copy source size is too small")
+__bad_copy_from(void);
+extern void __compiletime_error("copy destination size is too small")
+__bad_copy_to(void);
+
+static inline void copy_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+static __always_inline bool
+check_copy_size(const void *addr, size_t bytes, bool is_source)
+{
+ int sz = __compiletime_object_size(addr);
+ if (unlikely(sz >= 0 && sz < bytes)) {
+ if (!__builtin_constant_p(bytes))
+ copy_overflow(sz, bytes);
+ else if (is_source)
+ __bad_copy_from();
+ else
+ __bad_copy_to();
+ return false;
+ }
+ check_object_size(addr, bytes, is_source);
+ return true;
+}
+
+static __always_inline __must_check
+bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return false;
+ else
+ return _copy_from_iter_full(addr, bytes, i);
+}
+
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
struct msghdr *msg, int len,
int count, struct sk_buff *skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 95b7293..f1e8ed6 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -300,9 +300,13 @@ done:
release_sock(sk);
return err;
}
-
+#if LINUX_VERSION_IS_LESS(4,12,0)
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags)
+#else
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
int flags, bool kern)
+#endif
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
@@ -357,8 +361,13 @@ done:
return err;
}
+#if LINUX_VERSION_IS_LESS(4,17,0)
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *sockaddr_len, int peer)
+#else
static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
+#endif
{
struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
struct sock *sk = sock->sk;
@@ -1027,7 +1036,11 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
goto done;
if (pi->rx_busy_skb) {
+#if LINUX_VERSION_IS_GEQ(4,7,0)
if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+#else
+ if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+#endif
pi->rx_busy_skb = NULL;
else
goto done;
@@ -1296,7 +1309,11 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
goto done;
}
+#if LINUX_VERSION_IS_GEQ(4,7,0)
err = __sock_queue_rcv_skb(sk, skb);
+#else
+ err = sock_queue_rcv_skb(sk, skb);
+#endif
/* For ERTM, handle one skb that doesn't fit into the recv
* buffer. This is important to do because the data frames
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 2289d6c..2e7e16c 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -351,7 +351,7 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
{
struct rfcomm_session *s = d->session;
- BT_DBG("dlc %p refcnt %d session %p", d, refcount_read(&d->refcnt), s);
+ BT_DBG("dlc %p refcnt %d session %p", d, refcount_read((const refcount_t *)&d->refcnt), s);
list_del(&d->list);
d->session = NULL;
@@ -872,7 +872,7 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
if (!skb)
return -ENOMEM;
- cmd = __skb_put(skb, sizeof(*cmd));
+ cmd = (void *)__skb_put(skb, sizeof(*cmd));
cmd->addr = d->addr;
cmd->ctrl = __ctrl(RFCOMM_DISC, 1);
cmd->len = __len8(0);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 90e1c3b..5d0b0a5 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -205,7 +205,7 @@ static void rfcomm_sock_kill(struct sock *sk)
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
+ BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read((const refcount_t *)&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
@@ -481,8 +481,12 @@ done:
return err;
}
+#if LINUX_VERSION_IS_LESS(4,12,0)
+static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+#else
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
+#endif
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
@@ -542,7 +546,12 @@ done:
return err;
}
+#if LINUX_VERSION_IS_LESS(4,17,0)
+static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *sockaddr_len, int peer)
+#else
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int peer)
+#endif
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 2ece5af..2ee2bd5 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -623,9 +623,13 @@ done:
release_sock(sk);
return err;
}
-
+#if LINUX_VERSION_IS_LESS(4,12,0)
+static int sco_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags)
+#else
static int sco_sock_accept(struct socket *sock, struct socket *newsock,
int flags, bool kern)
+#endif
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
@@ -678,9 +682,13 @@ done:
release_sock(sk);
return err;
}
-
+#if LINUX_VERSION_IS_LESS(4,17,0)
+static int sco_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *sockaddr_len, int peer)
+#else
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
+#endif
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;