summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/net
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/net')
-rw-r--r--include/net/act_api.h116
-rw-r--r--include/net/addrconf.h240
-rw-r--r--include/net/af_unix.h78
-rw-r--r--include/net/ah.h35
-rw-r--r--include/net/arp.h32
-rw-r--r--include/net/atmclip.h62
-rw-r--r--include/net/ax25.h385
-rw-r--r--include/net/bluetooth/bluetooth.h181
-rw-r--r--include/net/bluetooth/hci.h755
-rw-r--r--include/net/bluetooth/hci_core.h626
-rw-r--r--include/net/bluetooth/l2cap.h238
-rw-r--r--include/net/bluetooth/rfcomm.h353
-rw-r--r--include/net/bluetooth/sco.h79
-rw-r--r--include/net/checksum.h87
-rw-r--r--include/net/compat.h39
-rw-r--r--include/net/datalink.h18
-rw-r--r--include/net/dn.h236
-rw-r--r--include/net/dn_dev.h194
-rw-r--r--include/net/dn_fib.h204
-rw-r--r--include/net/dn_neigh.h28
-rw-r--r--include/net/dn_nsp.h209
-rw-r--r--include/net/dn_route.h112
-rw-r--r--include/net/dsfield.h54
-rw-r--r--include/net/dst.h279
-rw-r--r--include/net/esp.h59
-rw-r--r--include/net/flow.h95
-rw-r--r--include/net/gen_stats.h49
-rw-r--r--include/net/icmp.h60
-rw-r--r--include/net/if_inet6.h285
-rw-r--r--include/net/inet_common.h44
-rw-r--r--include/net/inet_ecn.h108
-rw-r--r--include/net/inetpeer.h66
-rw-r--r--include/net/ip.h353
-rw-r--r--include/net/ip6_checksum.h94
-rw-r--r--include/net/ip6_fib.h185
-rw-r--r--include/net/ip6_route.h141
-rw-r--r--include/net/ip6_tunnel.h40
-rw-r--r--include/net/ip_fib.h284
-rw-r--r--include/net/ip_mp_alg.h99
-rw-r--r--include/net/ip_vs.h999
-rw-r--r--include/net/ipcomp.h11
-rw-r--r--include/net/ipconfig.h27
-rw-r--r--include/net/ipip.h51
-rw-r--r--include/net/ipv6.h472
-rw-r--r--include/net/ipx.h161
-rw-r--r--include/net/irda/af_irda.h87
-rw-r--r--include/net/irda/crc.h29
-rw-r--r--include/net/irda/discovery.h100
-rw-r--r--include/net/irda/ircomm_core.h108
-rw-r--r--include/net/irda/ircomm_event.h85
-rw-r--r--include/net/irda/ircomm_lmp.h38
-rw-r--r--include/net/irda/ircomm_param.h149
-rw-r--r--include/net/irda/ircomm_ttp.h39
-rw-r--r--include/net/irda/ircomm_tty.h139
-rw-r--r--include/net/irda/ircomm_tty_attach.h94
-rw-r--r--include/net/irda/irda.h117
-rw-r--r--include/net/irda/irda_device.h301
-rw-r--r--include/net/irda/iriap.h108
-rw-r--r--include/net/irda/iriap_event.h85
-rw-r--r--include/net/irda/irias_object.h108
-rw-r--r--include/net/irda/irlan_client.h42
-rw-r--r--include/net/irda/irlan_common.h222
-rw-r--r--include/net/irda/irlan_eth.h33
-rw-r--r--include/net/irda/irlan_event.h81
-rw-r--r--include/net/irda/irlan_filter.h33
-rw-r--r--include/net/irda/irlan_provider.h52
-rw-r--r--include/net/irda/irlap.h290
-rw-r--r--include/net/irda/irlap_event.h131
-rw-r--r--include/net/irda/irlap_frame.h142
-rw-r--r--include/net/irda/irlmp.h295
-rw-r--r--include/net/irda/irlmp_event.h98
-rw-r--r--include/net/irda/irlmp_frame.h63
-rw-r--r--include/net/irda/irmod.h109
-rw-r--r--include/net/irda/irqueue.h96
-rw-r--r--include/net/irda/irttp.h210
-rw-r--r--include/net/irda/parameters.h102
-rw-r--r--include/net/irda/qos.h104
-rw-r--r--include/net/irda/timer.h104
-rw-r--r--include/net/irda/wrapper.h58
-rw-r--r--include/net/iw_handler.h540
-rw-r--r--include/net/lapb.h152
-rw-r--r--include/net/llc.h99
-rw-r--r--include/net/llc_c_ac.h202
-rw-r--r--include/net/llc_c_ev.h269
-rw-r--r--include/net/llc_c_st.h48
-rw-r--r--include/net/llc_conn.h119
-rw-r--r--include/net/llc_if.h101
-rw-r--r--include/net/llc_pdu.h436
-rw-r--r--include/net/llc_s_ac.h39
-rw-r--r--include/net/llc_s_ev.h67
-rw-r--r--include/net/llc_s_st.h32
-rw-r--r--include/net/llc_sap.h29
-rw-r--r--include/net/ndisc.h135
-rw-r--r--include/net/neighbour.h370
-rw-r--r--include/net/netrom.h242
-rw-r--r--include/net/p8022.h10
-rw-r--r--include/net/pkt_act.h275
-rw-r--r--include/net/pkt_cls.h366
-rw-r--r--include/net/pkt_sched.h249
-rw-r--r--include/net/protocol.h99
-rw-r--r--include/net/psnap.h7
-rw-r--r--include/net/raw.h42
-rw-r--r--include/net/rawv6.h27
-rw-r--r--include/net/rose.h235
-rw-r--r--include/net/route.h205
-rw-r--r--include/net/sch_generic.h175
-rw-r--r--include/net/scm.h71
-rw-r--r--include/net/sctp/command.h211
-rw-r--r--include/net/sctp/constants.h432
-rw-r--r--include/net/sctp/sctp.h620
-rw-r--r--include/net/sctp/sm.h442
-rw-r--r--include/net/sctp/structs.h1752
-rw-r--r--include/net/sctp/tsnmap.h207
-rw-r--r--include/net/sctp/ulpevent.h162
-rw-r--r--include/net/sctp/ulpqueue.h92
-rw-r--r--include/net/sctp/user.h589
-rw-r--r--include/net/slhc_vj.h188
-rw-r--r--include/net/snmp.h145
-rw-r--r--include/net/sock.h1297
-rw-r--r--include/net/syncppp.h105
-rw-r--r--include/net/tc_act/tc_gact.h17
-rw-r--r--include/net/tc_act/tc_ipt.h16
-rw-r--r--include/net/tc_act/tc_mirred.h15
-rw-r--r--include/net/tc_act/tc_pedit.h14
-rw-r--r--include/net/tcp.h2022
-rw-r--r--include/net/tcp_ecn.h126
-rw-r--r--include/net/transp_v6.h53
-rw-r--r--include/net/udp.h99
-rw-r--r--include/net/x25.h273
-rw-r--r--include/net/x25device.h17
-rw-r--r--include/net/xfrm.h905
131 files changed, 26615 insertions, 0 deletions
diff --git a/include/net/act_api.h b/include/net/act_api.h
new file mode 100644
index 000000000000..ed00a995f576
--- /dev/null
+++ b/include/net/act_api.h
@@ -0,0 +1,116 @@
+#ifndef __NET_ACT_API_H
+#define __NET_ACT_API_H
+
+/*
+ * Public police action API for classifiers/qdiscs
+ */
+
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+
+#define tca_gen(name) \
+struct tcf_##name *next; \
+ u32 index; \
+ int refcnt; \
+ int bindcnt; \
+ u32 capab; \
+ int action; \
+ struct tcf_t tm; \
+ struct gnet_stats_basic bstats; \
+ struct gnet_stats_queue qstats; \
+ struct gnet_stats_rate_est rate_est; \
+ spinlock_t *stats_lock; \
+ spinlock_t lock
+
+struct tcf_police
+{
+ tca_gen(police);
+ int result;
+ u32 ewma_rate;
+ u32 burst;
+ u32 mtu;
+ u32 toks;
+ u32 ptoks;
+ psched_time_t t_c;
+ struct qdisc_rate_table *R_tab;
+ struct qdisc_rate_table *P_tab;
+};
+
+#ifdef CONFIG_NET_CLS_ACT
+
+#define ACT_P_CREATED 1
+#define ACT_P_DELETED 1
+
+struct tcf_act_hdr
+{
+ tca_gen(act_hdr);
+};
+
+struct tc_action
+{
+ void *priv;
+ struct tc_action_ops *ops;
+ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
+ __u32 order;
+ struct tc_action *next;
+};
+
+#define TCA_CAP_NONE 0
+struct tc_action_ops
+{
+ struct tc_action_ops *next;
+ char kind[IFNAMSIZ];
+ __u32 type; /* TBD to match kind */
+ __u32 capab; /* capabilities includes 4 bit version */
+ struct module *owner;
+ int (*act)(struct sk_buff **, struct tc_action *);
+ int (*get_stats)(struct sk_buff *, struct tc_action *);
+ int (*dump)(struct sk_buff *, struct tc_action *,int , int);
+ int (*cleanup)(struct tc_action *, int bind);
+ int (*lookup)(struct tc_action *, u32 );
+ int (*init)(struct rtattr *,struct rtattr *,struct tc_action *, int , int );
+ int (*walk)(struct sk_buff *, struct netlink_callback *, int , struct tc_action *);
+};
+
+extern int tcf_register_action(struct tc_action_ops *a);
+extern int tcf_unregister_action(struct tc_action_ops *a);
+extern void tcf_action_destroy(struct tc_action *a, int bind);
+extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res);
+extern struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est, char *n, int ovr, int bind, int *err);
+extern struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, char *n, int ovr, int bind, int *err);
+extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
+extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int);
+#endif /* CONFIG_NET_CLS_ACT */
+
+extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
+extern void tcf_police_destroy(struct tcf_police *p);
+extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est);
+extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p);
+extern int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p);
+
+static inline int
+tcf_police_release(struct tcf_police *p, int bind)
+{
+ int ret = 0;
+#ifdef CONFIG_NET_CLS_ACT
+ if (p) {
+ if (bind) {
+ p->bindcnt--;
+ }
+ p->refcnt--;
+ if (p->refcnt <= 0 && !p->bindcnt) {
+ tcf_police_destroy(p);
+ ret = 1;
+ }
+ }
+#else
+ if (p && --p->refcnt == 0)
+ tcf_police_destroy(p);
+
+#endif /* CONFIG_NET_CLS_ACT */
+ return ret;
+}
+
+#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
new file mode 100644
index 000000000000..7af9a13cb9be
--- /dev/null
+++ b/include/net/addrconf.h
@@ -0,0 +1,240 @@
+#ifndef _ADDRCONF_H
+#define _ADDRCONF_H
+
+#define RETRANS_TIMER HZ
+
+#define MAX_RTR_SOLICITATIONS 3
+#define RTR_SOLICITATION_INTERVAL (4*HZ)
+
+#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
+
+#define TEMP_VALID_LIFETIME (7*86400)
+#define TEMP_PREFERRED_LIFETIME (86400)
+#define REGEN_MAX_RETRY (5)
+#define MAX_DESYNC_FACTOR (600)
+
+#define ADDR_CHECK_FREQUENCY (120*HZ)
+
+#define IPV6_MAX_ADDRESSES 16
+
+struct prefix_info {
+ __u8 type;
+ __u8 length;
+ __u8 prefix_len;
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 onlink : 1,
+ autoconf : 1,
+ reserved : 6;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 reserved : 6,
+ autoconf : 1,
+ onlink : 1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u32 valid;
+ __u32 prefered;
+ __u32 reserved2;
+
+ struct in6_addr prefix;
+};
+
+
+#ifdef __KERNEL__
+
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <net/if_inet6.h>
+
+#define IN6_ADDR_HSIZE 16
+
+extern int addrconf_init(void);
+extern void addrconf_cleanup(void);
+
+extern int addrconf_add_ifaddr(void __user *arg);
+extern int addrconf_del_ifaddr(void __user *arg);
+extern int addrconf_set_dstaddr(void __user *arg);
+
+extern int ipv6_chk_addr(struct in6_addr *addr,
+ struct net_device *dev,
+ int strict);
+extern struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr,
+ struct net_device *dev,
+ int strict);
+extern int ipv6_get_saddr(struct dst_entry *dst,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr);
+extern int ipv6_dev_get_saddr(struct net_device *dev,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr);
+extern int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *);
+extern int ipv6_rcv_saddr_equal(const struct sock *sk,
+ const struct sock *sk2);
+extern void addrconf_join_solict(struct net_device *dev,
+ struct in6_addr *addr);
+extern void addrconf_leave_solict(struct inet6_dev *idev,
+ struct in6_addr *addr);
+
+/*
+ * multicast prototypes (mcast.c)
+ */
+extern int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ struct in6_addr *addr);
+extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ struct in6_addr *addr);
+extern void ipv6_sock_mc_close(struct sock *sk);
+extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr,
+ struct in6_addr *src_addr);
+
+extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr);
+extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr);
+extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr);
+extern void ipv6_mc_up(struct inet6_dev *idev);
+extern void ipv6_mc_down(struct inet6_dev *idev);
+extern void ipv6_mc_init_dev(struct inet6_dev *idev);
+extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
+
+extern int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group,
+ struct in6_addr *src_addr);
+extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
+
+extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len);
+
+extern int ipv6_get_hoplimit(struct net_device *dev);
+
+/*
+ * anycast prototypes (anycast.c)
+ */
+extern int ipv6_sock_ac_join(struct sock *sk,int ifindex,struct in6_addr *addr);
+extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex,struct in6_addr *addr);
+extern void ipv6_sock_ac_close(struct sock *sk);
+extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex);
+
+extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr);
+extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr);
+extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr);
+
+
+/* Device notifier */
+extern int register_inet6addr_notifier(struct notifier_block *nb);
+extern int unregister_inet6addr_notifier(struct notifier_block *nb);
+
+static inline struct inet6_dev *
+__in6_dev_get(struct net_device *dev)
+{
+ return (struct inet6_dev *)dev->ip6_ptr;
+}
+
+extern rwlock_t addrconf_lock;
+
+static inline struct inet6_dev *
+in6_dev_get(struct net_device *dev)
+{
+ struct inet6_dev *idev = NULL;
+ read_lock(&addrconf_lock);
+ idev = dev->ip6_ptr;
+ if (idev)
+ atomic_inc(&idev->refcnt);
+ read_unlock(&addrconf_lock);
+ return idev;
+}
+
+extern void in6_dev_finish_destroy(struct inet6_dev *idev);
+
+static inline void
+in6_dev_put(struct inet6_dev *idev)
+{
+ if (atomic_dec_and_test(&idev->refcnt))
+ in6_dev_finish_destroy(idev);
+}
+
+#define __in6_dev_put(idev) atomic_dec(&(idev)->refcnt)
+#define in6_dev_hold(idev) atomic_inc(&(idev)->refcnt)
+
+
+extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+
+static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
+{
+ if (atomic_dec_and_test(&ifp->refcnt))
+ inet6_ifa_finish_destroy(ifp);
+}
+
+#define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt)
+#define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt)
+
+
+extern void addrconf_forwarding_on(void);
+/*
+ * Hash function taken from net_alias.c
+ */
+
+static __inline__ u8 ipv6_addr_hash(const struct in6_addr *addr)
+{
+ __u32 word;
+
+ /*
+ * We perform the hash function over the last 64 bits of the address
+ * This will include the IEEE address token on links that support it.
+ */
+
+ word = addr->s6_addr32[2] ^ addr->s6_addr32[3];
+ word ^= (word >> 16);
+ word ^= (word >> 8);
+
+ return ((word ^ (word >> 4)) & 0x0f);
+}
+
+/*
+ * compute link-local solicited-node multicast address
+ */
+
+static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
+ struct in6_addr *solicited)
+{
+ ipv6_addr_set(solicited,
+ __constant_htonl(0xFF020000), 0,
+ __constant_htonl(0x1),
+ __constant_htonl(0xFF000000) | addr->s6_addr32[3]);
+}
+
+
+static inline void ipv6_addr_all_nodes(struct in6_addr *addr)
+{
+ ipv6_addr_set(addr,
+ __constant_htonl(0xFF020000), 0, 0,
+ __constant_htonl(0x1));
+}
+
+static inline void ipv6_addr_all_routers(struct in6_addr *addr)
+{
+ ipv6_addr_set(addr,
+ __constant_htonl(0xFF020000), 0, 0,
+ __constant_htonl(0x2));
+}
+
+static inline int ipv6_addr_is_multicast(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[0] & __constant_htonl(0xFF000000)) == __constant_htonl(0xFF000000);
+}
+
+static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[0] == htonl(0xff020000) &&
+ addr->s6_addr32[1] == 0 &&
+ addr->s6_addr32[2] == 0 &&
+ addr->s6_addr32[3] == htonl(0x00000001));
+}
+
+static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[0] == htonl(0xff020000) &&
+ addr->s6_addr32[1] == 0 &&
+ addr->s6_addr32[2] == 0 &&
+ addr->s6_addr32[3] == htonl(0x00000002));
+}
+
+#endif
+#endif
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
new file mode 100644
index 000000000000..b60b3846b9d1
--- /dev/null
+++ b/include/net/af_unix.h
@@ -0,0 +1,78 @@
+#ifndef __LINUX_NET_AFUNIX_H
+#define __LINUX_NET_AFUNIX_H
+extern void unix_inflight(struct file *fp);
+extern void unix_notinflight(struct file *fp);
+extern void unix_gc(void);
+
+#define UNIX_HASH_SIZE 256
+
+extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+extern rwlock_t unix_table_lock;
+
+extern atomic_t unix_tot_inflight;
+
+static inline struct sock *first_unix_socket(int *i)
+{
+ for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
+ if (!hlist_empty(&unix_socket_table[*i]))
+ return __sk_head(&unix_socket_table[*i]);
+ }
+ return NULL;
+}
+
+static inline struct sock *next_unix_socket(int *i, struct sock *s)
+{
+ struct sock *next = sk_next(s);
+ /* More in this chain? */
+ if (next)
+ return next;
+ /* Look for next non-empty chain. */
+ for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
+ if (!hlist_empty(&unix_socket_table[*i]))
+ return __sk_head(&unix_socket_table[*i]);
+ }
+ return NULL;
+}
+
+#define forall_unix_sockets(i, s) \
+ for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))
+
+struct unix_address {
+ atomic_t refcnt;
+ int len;
+ unsigned hash;
+ struct sockaddr_un name[0];
+};
+
+struct unix_skb_parms {
+ struct ucred creds; /* Skb credentials */
+ struct scm_fp_list *fp; /* Passed files */
+};
+
+#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb))
+#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
+
+#define unix_state_rlock(s) read_lock(&unix_sk(s)->lock)
+#define unix_state_runlock(s) read_unlock(&unix_sk(s)->lock)
+#define unix_state_wlock(s) write_lock(&unix_sk(s)->lock)
+#define unix_state_wunlock(s) write_unlock(&unix_sk(s)->lock)
+
+#ifdef __KERNEL__
+/* The AF_UNIX socket */
+struct unix_sock {
+ /* WARNING: sk has to be the first member */
+ struct sock sk;
+ struct unix_address *addr;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
+ struct semaphore readsem;
+ struct sock *peer;
+ struct sock *other;
+ struct sock *gc_tree;
+ atomic_t inflight;
+ rwlock_t lock;
+ wait_queue_head_t peer_wait;
+};
+#define unix_sk(__sk) ((struct unix_sock *)__sk)
+#endif
+#endif
diff --git a/include/net/ah.h b/include/net/ah.h
new file mode 100644
index 000000000000..ceff00afae09
--- /dev/null
+++ b/include/net/ah.h
@@ -0,0 +1,35 @@
+#ifndef _NET_AH_H
+#define _NET_AH_H
+
+#include <net/xfrm.h>
+
+/* This is the maximum truncated ICV length that we know of. */
+#define MAX_AH_AUTH_LEN 12
+
+struct ah_data
+{
+ u8 *key;
+ int key_len;
+ u8 *work_icv;
+ int icv_full_len;
+ int icv_trunc_len;
+
+ void (*icv)(struct ah_data*,
+ struct sk_buff *skb, u8 *icv);
+
+ struct crypto_tfm *tfm;
+};
+
+static inline void
+ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
+{
+ struct crypto_tfm *tfm = ahp->tfm;
+
+ memset(auth_data, 0, ahp->icv_trunc_len);
+ crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
+ skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update);
+ crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv);
+ memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len);
+}
+
+#endif
diff --git a/include/net/arp.h b/include/net/arp.h
new file mode 100644
index 000000000000..a1f09fad6a52
--- /dev/null
+++ b/include/net/arp.h
@@ -0,0 +1,32 @@
+/* linux/net/inet/arp.h */
+#ifndef _ARP_H
+#define _ARP_H
+
+#include <linux/if_arp.h>
+#include <net/neighbour.h>
+
+#define HAVE_ARP_CREATE
+
+extern struct neigh_table arp_tbl;
+
+extern void arp_init(void);
+extern int arp_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt);
+extern int arp_find(unsigned char *haddr, struct sk_buff *skb);
+extern int arp_ioctl(unsigned int cmd, void __user *arg);
+extern void arp_send(int type, int ptype, u32 dest_ip,
+ struct net_device *dev, u32 src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th);
+extern int arp_bind_neighbour(struct dst_entry *dst);
+extern int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir);
+extern void arp_ifdown(struct net_device *dev);
+
+extern struct sk_buff *arp_create(int type, int ptype, u32 dest_ip,
+ struct net_device *dev, u32 src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw,
+ unsigned char *target_hw);
+extern void arp_xmit(struct sk_buff *skb);
+
+extern struct neigh_ops arp_broken_ops;
+
+#endif /* _ARP_H */
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
new file mode 100644
index 000000000000..47048b1d179a
--- /dev/null
+++ b/include/net/atmclip.h
@@ -0,0 +1,62 @@
+/* net/atm/atmarp.h - RFC1577 ATM ARP */
+
+/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+
+
+#ifndef _ATMCLIP_H
+#define _ATMCLIP_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/atmarp.h>
+#include <linux/spinlock.h>
+#include <net/neighbour.h>
+
+
+#define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
+#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
+
+
+struct clip_vcc {
+ struct atm_vcc *vcc; /* VCC descriptor */
+ struct atmarp_entry *entry; /* ATMARP table entry, NULL if IP addr.
+ isn't known yet */
+ int xoff; /* 1 if send buffer is full */
+ unsigned char encap; /* 0: NULL, 1: LLC/SNAP */
+ unsigned long last_use; /* last send or receive operation */
+ unsigned long idle_timeout; /* keep open idle for so many jiffies*/
+ void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb);
+ /* keep old push fn for chaining */
+ void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb);
+ /* keep old pop fn for chaining */
+ struct clip_vcc *next; /* next VCC */
+};
+
+
+struct atmarp_entry {
+ u32 ip; /* IP address */
+ struct clip_vcc *vccs; /* active VCCs; NULL if resolution is
+ pending */
+ unsigned long expires; /* entry expiration time */
+ struct neighbour *neigh; /* neighbour back-pointer */
+};
+
+
+#define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
+
+
+struct clip_priv {
+ int number; /* for convenience ... */
+ spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */
+ struct net_device_stats stats;
+ struct net_device *next; /* next CLIP interface */
+};
+
+
+#ifdef __KERNEL__
+extern struct neigh_table *clip_tbl_hook;
+#endif
+
+#endif
diff --git a/include/net/ax25.h b/include/net/ax25.h
new file mode 100644
index 000000000000..fb95ecb6fe03
--- /dev/null
+++ b/include/net/ax25.h
@@ -0,0 +1,385 @@
+/*
+ * Declarations of AX.25 type objects.
+ *
+ * Alan Cox (GW4PTS) 10/11/93
+ */
+#ifndef _AX25_H
+#define _AX25_H
+
+#include <linux/config.h>
+#include <linux/ax25.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <asm/atomic.h>
+
+#define AX25_T1CLAMPLO 1
+#define AX25_T1CLAMPHI (30 * HZ)
+
+#define AX25_BPQ_HEADER_LEN 16
+#define AX25_KISS_HEADER_LEN 1
+
+#define AX25_HEADER_LEN 17
+#define AX25_ADDR_LEN 7
+#define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN)
+#define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN)
+
+/* AX.25 Protocol IDs */
+#define AX25_P_ROSE 0x01
+#define AX25_P_IP 0xCC
+#define AX25_P_ARP 0xCD
+#define AX25_P_TEXT 0xF0
+#define AX25_P_NETROM 0xCF
+#define AX25_P_SEGMENT 0x08
+
+/* AX.25 Segment control values */
+#define AX25_SEG_REM 0x7F
+#define AX25_SEG_FIRST 0x80
+
+#define AX25_CBIT 0x80 /* Command/Response bit */
+#define AX25_EBIT 0x01 /* HDLC Address Extension bit */
+#define AX25_HBIT 0x80 /* Has been repeated bit */
+
+#define AX25_SSSID_SPARE 0x60 /* Unused bits in SSID for standard AX.25 */
+#define AX25_ESSID_SPARE 0x20 /* Unused bits in SSID for extended AX.25 */
+#define AX25_DAMA_FLAG 0x20 /* Well, it is *NOT* unused! (dl1bke 951121 */
+
+#define AX25_COND_ACK_PENDING 0x01
+#define AX25_COND_REJECT 0x02
+#define AX25_COND_PEER_RX_BUSY 0x04
+#define AX25_COND_OWN_RX_BUSY 0x08
+#define AX25_COND_DAMA_MODE 0x10
+
+#ifndef _LINUX_NETDEVICE_H
+#include <linux/netdevice.h>
+#endif
+
+/* Upper sub-layer (LAPB) definitions */
+
+/* Control field templates */
+#define AX25_I 0x00 /* Information frames */
+#define AX25_S 0x01 /* Supervisory frames */
+#define AX25_RR 0x01 /* Receiver ready */
+#define AX25_RNR 0x05 /* Receiver not ready */
+#define AX25_REJ 0x09 /* Reject */
+#define AX25_U 0x03 /* Unnumbered frames */
+#define AX25_SABM 0x2f /* Set Asynchronous Balanced Mode */
+#define AX25_SABME 0x6f /* Set Asynchronous Balanced Mode Extended */
+#define AX25_DISC 0x43 /* Disconnect */
+#define AX25_DM 0x0f /* Disconnected mode */
+#define AX25_UA 0x63 /* Unnumbered acknowledge */
+#define AX25_FRMR 0x87 /* Frame reject */
+#define AX25_UI 0x03 /* Unnumbered information */
+#define AX25_XID 0xaf /* Exchange information */
+#define AX25_TEST 0xe3 /* Test */
+
+#define AX25_PF 0x10 /* Poll/final bit for standard AX.25 */
+#define AX25_EPF 0x01 /* Poll/final bit for extended AX.25 */
+
+#define AX25_ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define AX25_POLLOFF 0
+#define AX25_POLLON 1
+
+/* AX25 L2 C-bit */
+#define AX25_COMMAND 1
+#define AX25_RESPONSE 2
+
+/* Define Link State constants. */
+
+enum {
+ AX25_STATE_0,
+ AX25_STATE_1,
+ AX25_STATE_2,
+ AX25_STATE_3,
+ AX25_STATE_4
+};
+
+#define AX25_MODULUS 8 /* Standard AX.25 modulus */
+#define AX25_EMODULUS 128 /* Extended AX.25 modulus */
+
+enum {
+ AX25_PROTO_STD_SIMPLEX,
+ AX25_PROTO_STD_DUPLEX,
+ AX25_PROTO_DAMA_SLAVE,
+ AX25_PROTO_DAMA_MASTER
+};
+
+enum {
+ AX25_VALUES_IPDEFMODE, /* 0=DG 1=VC */
+ AX25_VALUES_AXDEFMODE, /* 0=Normal 1=Extended Seq Nos */
+ AX25_VALUES_BACKOFF, /* 0=None 1=Linear 2=Exponential */
+ AX25_VALUES_CONMODE, /* Allow connected modes - 0=No 1=no "PID text" 2=all PIDs */
+ AX25_VALUES_WINDOW, /* Default window size for standard AX.25 */
+ AX25_VALUES_EWINDOW, /* Default window size for extended AX.25 */
+ AX25_VALUES_T1, /* Default T1 timeout value */
+ AX25_VALUES_T2, /* Default T2 timeout value */
+ AX25_VALUES_T3, /* Default T3 timeout value */
+ AX25_VALUES_IDLE, /* Connected mode idle timer */
+ AX25_VALUES_N2, /* Default N2 value */
+ AX25_VALUES_PACLEN, /* AX.25 MTU */
+ AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave, DAMA Master */
+ AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */
+ AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */
+};
+
+#define AX25_DEF_IPDEFMODE 0 /* Datagram */
+#define AX25_DEF_AXDEFMODE 0 /* Normal */
+#define AX25_DEF_BACKOFF 1 /* Linear backoff */
+#define AX25_DEF_CONMODE 2 /* Connected mode allowed */
+#define AX25_DEF_WINDOW 2 /* Window=2 */
+#define AX25_DEF_EWINDOW 32 /* Module-128 Window=32 */
+#define AX25_DEF_T1 (10 * HZ) /* T1=10s */
+#define AX25_DEF_T2 (3 * HZ) /* T2=3s */
+#define AX25_DEF_T3 (300 * HZ) /* T3=300s */
+#define AX25_DEF_N2 10 /* N2=10 */
+#define AX25_DEF_IDLE (0 * 60 * HZ) /* Idle=None */
+#define AX25_DEF_PACLEN 256 /* Paclen=256 */
+#define AX25_DEF_PROTOCOL AX25_PROTO_STD_SIMPLEX /* Standard AX.25 */
+#define AX25_DEF_DS_TIMEOUT (3 * 60 * HZ) /* DAMA timeout 3 minutes */
+
+typedef struct ax25_uid_assoc {
+ struct ax25_uid_assoc *next;
+ uid_t uid;
+ ax25_address call;
+} ax25_uid_assoc;
+
+typedef struct {
+ ax25_address calls[AX25_MAX_DIGIS];
+ unsigned char repeated[AX25_MAX_DIGIS];
+ unsigned char ndigi;
+ char lastrepeat;
+} ax25_digi;
+
+typedef struct ax25_route {
+ struct ax25_route *next;
+ atomic_t ref;
+ ax25_address callsign;
+ struct net_device *dev;
+ ax25_digi *digipeat;
+ char ip_mode;
+ struct timer_list timer;
+} ax25_route;
+
+typedef struct {
+ char slave; /* slave_mode? */
+ struct timer_list slave_timer; /* timeout timer */
+ unsigned short slave_timeout; /* when? */
+} ax25_dama_info;
+
+struct ctl_table;
+
+typedef struct ax25_dev {
+ struct ax25_dev *next;
+ struct net_device *dev;
+ struct net_device *forward;
+ struct ctl_table *systable;
+ int values[AX25_MAX_VALUES];
+#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
+ ax25_dama_info dama;
+#endif
+} ax25_dev;
+
+typedef struct ax25_cb {
+ struct hlist_node ax25_node;
+ ax25_address source_addr, dest_addr;
+ ax25_digi *digipeat;
+ ax25_dev *ax25_dev;
+ unsigned char iamdigi;
+ unsigned char state, modulus, pidincl;
+ unsigned short vs, vr, va;
+ unsigned char condition, backoff;
+ unsigned char n2, n2count;
+ struct timer_list t1timer, t2timer, t3timer, idletimer;
+ unsigned long t1, t2, t3, idle, rtt;
+ unsigned short paclen, fragno, fraglen;
+ struct sk_buff_head write_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head frag_queue;
+ unsigned char window;
+ struct timer_list timer, dtimer;
+ struct sock *sk; /* Backlink to socket */
+ atomic_t refcount;
+} ax25_cb;
+
+#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
+
+#define ax25_for_each(__ax25, node, list) \
+ hlist_for_each_entry(__ax25, node, list, ax25_node)
+
+#define ax25_cb_hold(__ax25) \
+ atomic_inc(&((__ax25)->refcount))
+
+static __inline__ void ax25_cb_put(ax25_cb *ax25)
+{
+ if (atomic_dec_and_test(&ax25->refcount)) {
+ if (ax25->digipeat)
+ kfree(ax25->digipeat);
+ kfree(ax25);
+ }
+}
+
+/* af_ax25.c */
+extern struct hlist_head ax25_list;
+extern spinlock_t ax25_list_lock;
+extern void ax25_cb_add(ax25_cb *);
+struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
+struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
+extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
+extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
+extern void ax25_destroy_socket(ax25_cb *);
+extern ax25_cb *ax25_create_cb(void);
+extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
+extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
+
+/* ax25_addr.c */
+extern ax25_address null_ax25_address;
+extern char *ax2asc(ax25_address *);
+extern ax25_address *asc2ax(char *);
+extern int ax25cmp(ax25_address *, ax25_address *);
+extern int ax25digicmp(ax25_digi *, ax25_digi *);
+extern unsigned char *ax25_addr_parse(unsigned char *, int, ax25_address *, ax25_address *, ax25_digi *, int *, int *);
+extern int ax25_addr_build(unsigned char *, ax25_address *, ax25_address *, ax25_digi *, int, int);
+extern int ax25_addr_size(ax25_digi *);
+extern void ax25_digi_invert(ax25_digi *, ax25_digi *);
+
+/* ax25_dev.c */
+extern ax25_dev *ax25_dev_list;
+extern spinlock_t ax25_dev_lock;
+
+static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
+{
+ return dev->ax25_ptr;
+}
+
+extern ax25_dev *ax25_addr_ax25dev(ax25_address *);
+extern void ax25_dev_device_up(struct net_device *);
+extern void ax25_dev_device_down(struct net_device *);
+extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+extern struct net_device *ax25_fwd_dev(struct net_device *);
+extern void ax25_dev_free(void);
+
+/* ax25_ds_in.c */
+extern int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
+
+/* ax25_ds_subr.c */
+extern void ax25_ds_nr_error_recovery(ax25_cb *);
+extern void ax25_ds_enquiry_response(ax25_cb *);
+extern void ax25_ds_establish_data_link(ax25_cb *);
+extern void ax25_dev_dama_off(ax25_dev *);
+extern void ax25_dama_on(ax25_cb *);
+extern void ax25_dama_off(ax25_cb *);
+
+/* ax25_ds_timer.c */
+extern void ax25_ds_set_timer(ax25_dev *);
+extern void ax25_ds_del_timer(ax25_dev *);
+extern void ax25_ds_timer(ax25_cb *);
+extern void ax25_ds_t1_timeout(ax25_cb *);
+extern void ax25_ds_heartbeat_expiry(ax25_cb *);
+extern void ax25_ds_t3timer_expiry(ax25_cb *);
+extern void ax25_ds_idletimer_expiry(ax25_cb *);
+
+/* ax25_iface.c */
+extern int ax25_protocol_register(unsigned int, int (*)(struct sk_buff *, ax25_cb *));
+extern void ax25_protocol_release(unsigned int);
+extern int ax25_linkfail_register(void (*)(ax25_cb *, int));
+extern void ax25_linkfail_release(void (*)(ax25_cb *, int));
+extern int ax25_listen_register(ax25_address *, struct net_device *);
+extern void ax25_listen_release(ax25_address *, struct net_device *);
+extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+extern int ax25_listen_mine(ax25_address *, struct net_device *);
+extern void ax25_link_failed(ax25_cb *, int);
+extern int ax25_protocol_is_registered(unsigned int);
+
+/* ax25_in.c */
+extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
+extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *);
+
+/* ax25_ip.c */
+extern int ax25_encapsulate(struct sk_buff *, struct net_device *, unsigned short, void *, void *, unsigned int);
+extern int ax25_rebuild_header(struct sk_buff *);
+
+/* ax25_out.c */
+extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
+extern void ax25_output(ax25_cb *, int, struct sk_buff *);
+extern void ax25_kick(ax25_cb *);
+extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+extern void ax25_queue_xmit(struct sk_buff *);
+extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
+
+/* ax25_route.c */
+extern void ax25_rt_device_down(struct net_device *);
+extern int ax25_rt_ioctl(unsigned int, void __user *);
+extern struct file_operations ax25_route_fops;
+extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
+extern ax25_route *ax25_rt_find_route(ax25_route *, ax25_address *,
+ struct net_device *);
+extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_rt_free(void);
+
+static inline void ax25_put_route(ax25_route *ax25_rt)
+{
+ atomic_dec(&ax25_rt->ref);
+}
+
+/* ax25_std_in.c */
+extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
+
+/* ax25_std_subr.c */
+extern void ax25_std_nr_error_recovery(ax25_cb *);
+extern void ax25_std_establish_data_link(ax25_cb *);
+extern void ax25_std_transmit_enquiry(ax25_cb *);
+extern void ax25_std_enquiry_response(ax25_cb *);
+extern void ax25_std_timeout_response(ax25_cb *);
+
+/* ax25_std_timer.c */
+extern void ax25_std_heartbeat_expiry(ax25_cb *);
+extern void ax25_std_t1timer_expiry(ax25_cb *);
+extern void ax25_std_t2timer_expiry(ax25_cb *);
+extern void ax25_std_t3timer_expiry(ax25_cb *);
+extern void ax25_std_idletimer_expiry(ax25_cb *);
+
+/* ax25_subr.c */
+extern void ax25_clear_queues(ax25_cb *);
+extern void ax25_frames_acked(ax25_cb *, unsigned short);
+extern void ax25_requeue_frames(ax25_cb *);
+extern int ax25_validate_nr(ax25_cb *, unsigned short);
+extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+extern void ax25_send_control(ax25_cb *, int, int, int);
+extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_calculate_t1(ax25_cb *);
+extern void ax25_calculate_rtt(ax25_cb *);
+extern void ax25_disconnect(ax25_cb *, int);
+
+/* ax25_timer.c */
+extern void ax25_start_heartbeat(ax25_cb *);
+extern void ax25_start_t1timer(ax25_cb *);
+extern void ax25_start_t2timer(ax25_cb *);
+extern void ax25_start_t3timer(ax25_cb *);
+extern void ax25_start_idletimer(ax25_cb *);
+extern void ax25_stop_heartbeat(ax25_cb *);
+extern void ax25_stop_t1timer(ax25_cb *);
+extern void ax25_stop_t2timer(ax25_cb *);
+extern void ax25_stop_t3timer(ax25_cb *);
+extern void ax25_stop_idletimer(ax25_cb *);
+extern int ax25_t1timer_running(ax25_cb *);
+extern unsigned long ax25_display_timer(struct timer_list *);
+
+/* ax25_uid.c */
+extern int ax25_uid_policy;
+extern ax25_address *ax25_findbyuid(uid_t);
+extern int ax25_uid_ioctl(int, struct sockaddr_ax25 *);
+extern struct file_operations ax25_uid_fops;
+extern void ax25_uid_free(void);
+
+/* sysctl_net_ax25.c */
+#ifdef CONFIG_SYSCTL
+extern void ax25_register_sysctl(void);
+extern void ax25_unregister_sysctl(void);
+#else
+static inline void ax25_register_sysctl(void) {};
+static inline void ax25_unregister_sysctl(void) {};
+#endif /* CONFIG_SYSCTL */
+
+#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
new file mode 100644
index 000000000000..42a84c53678b
--- /dev/null
+++ b/include/net/bluetooth/bluetooth.h
@@ -0,0 +1,181 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __BLUETOOTH_H
+#define __BLUETOOTH_H
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <net/sock.h>
+
+#ifndef AF_BLUETOOTH
+#define AF_BLUETOOTH 31
+#define PF_BLUETOOTH AF_BLUETOOTH
+#endif
+
+/* Reserv for core and drivers use */
+#define BT_SKB_RESERVE 8
+
+#define BTPROTO_L2CAP 0
+#define BTPROTO_HCI 1
+#define BTPROTO_SCO 2
+#define BTPROTO_RFCOMM 3
+#define BTPROTO_BNEP 4
+#define BTPROTO_CMTP 5
+#define BTPROTO_HIDP 6
+#define BTPROTO_AVDTP 7
+
+#define SOL_HCI 0
+#define SOL_L2CAP 6
+#define SOL_SCO 17
+#define SOL_RFCOMM 18
+
+#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
+#define BT_DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __FUNCTION__ , ## arg)
+#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __FUNCTION__ , ## arg)
+
+#ifdef HCI_DATA_DUMP
+#define BT_DMP(buf, len) bt_dump(__FUNCTION__, buf, len)
+#else
+#define BT_DMP(D...)
+#endif
+
+extern struct proc_dir_entry *proc_bt;
+
+/* Connection and socket states */
+enum {
+ BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
+ BT_OPEN,
+ BT_BOUND,
+ BT_LISTEN,
+ BT_CONNECT,
+ BT_CONNECT2,
+ BT_CONFIG,
+ BT_DISCONN,
+ BT_CLOSED
+};
+
+/* Endianness conversions */
+#define htobs(a) __cpu_to_le16(a)
+#define htobl(a) __cpu_to_le32(a)
+#define btohs(a) __le16_to_cpu(a)
+#define btohl(a) __le32_to_cpu(a)
+
+/* BD Address */
+typedef struct {
+ __u8 b[6];
+} __attribute__((packed)) bdaddr_t;
+
+#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
+
+/* Copy, swap, convert BD Address */
+static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
+{
+ return memcmp(ba1, ba2, sizeof(bdaddr_t));
+}
+static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
+{
+ memcpy(dst, src, sizeof(bdaddr_t));
+}
+
+void baswap(bdaddr_t *dst, bdaddr_t *src);
+char *batostr(bdaddr_t *ba);
+bdaddr_t *strtoba(char *str);
+
+/* Common socket structures and functions */
+
+#define bt_sk(__sk) ((struct bt_sock *) __sk)
+
+struct bt_sock {
+ struct sock sk;
+ bdaddr_t src;
+ bdaddr_t dst;
+ struct list_head accept_q;
+ struct sock *parent;
+};
+
+struct bt_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+};
+
+int bt_sock_register(int proto, struct net_proto_family *ops);
+int bt_sock_unregister(int proto);
+void bt_sock_link(struct bt_sock_list *l, struct sock *s);
+void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags);
+uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
+int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+
+void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+void bt_accept_unlink(struct sock *sk);
+struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
+
+/* Skb helpers */
+struct bt_skb_cb {
+ int incoming;
+};
+#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb))
+
+static inline struct sk_buff *bt_skb_alloc(unsigned int len, int how)
+{
+ struct sk_buff *skb;
+
+ if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) {
+ skb_reserve(skb, BT_SKB_RESERVE);
+ bt_cb(skb)->incoming = 0;
+ }
+ return skb;
+}
+
+static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len,
+ int nb, int *err)
+{
+ struct sk_buff *skb;
+
+ if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
+ skb_reserve(skb, BT_SKB_RESERVE);
+ bt_cb(skb)->incoming = 0;
+ }
+
+ return skb;
+}
+
+static inline int skb_frags_no(struct sk_buff *skb)
+{
+ register struct sk_buff *frag = skb_shinfo(skb)->frag_list;
+ register int n = 1;
+
+ for (; frag; frag=frag->next, n++);
+ return n;
+}
+
+void bt_dump(char *pref, __u8 *buf, int count);
+
+int bt_err(__u16 code);
+
+#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
new file mode 100644
index 000000000000..6f0706f4af68
--- /dev/null
+++ b/include/net/bluetooth/hci.h
@@ -0,0 +1,755 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_H
+#define __HCI_H
+
+#define HCI_MAX_ACL_SIZE 1024
+#define HCI_MAX_SCO_SIZE 255
+#define HCI_MAX_EVENT_SIZE 260
+#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+
+/* HCI dev events */
+#define HCI_DEV_REG 1
+#define HCI_DEV_UNREG 2
+#define HCI_DEV_UP 3
+#define HCI_DEV_DOWN 4
+#define HCI_DEV_SUSPEND 5
+#define HCI_DEV_RESUME 6
+
+/* HCI notify events */
+#define HCI_NOTIFY_CONN_ADD 1
+#define HCI_NOTIFY_CONN_DEL 2
+#define HCI_NOTIFY_VOICE_SETTING 3
+
+/* HCI device types */
+#define HCI_VHCI 0
+#define HCI_USB 1
+#define HCI_PCCARD 2
+#define HCI_UART 3
+#define HCI_RS232 4
+#define HCI_PCI 5
+
+/* HCI device quirks */
+enum {
+ HCI_QUIRK_RESET_ON_INIT,
+ HCI_QUIRK_RAW_DEVICE
+};
+
+/* HCI device flags */
+enum {
+ HCI_UP,
+ HCI_INIT,
+ HCI_RUNNING,
+
+ HCI_PSCAN,
+ HCI_ISCAN,
+ HCI_AUTH,
+ HCI_ENCRYPT,
+ HCI_INQUIRY,
+
+ HCI_RAW,
+
+ HCI_SECMGR
+};
+
+/* HCI ioctl defines */
+#define HCIDEVUP _IOW('H', 201, int)
+#define HCIDEVDOWN _IOW('H', 202, int)
+#define HCIDEVRESET _IOW('H', 203, int)
+#define HCIDEVRESTAT _IOW('H', 204, int)
+
+#define HCIGETDEVLIST _IOR('H', 210, int)
+#define HCIGETDEVINFO _IOR('H', 211, int)
+#define HCIGETCONNLIST _IOR('H', 212, int)
+#define HCIGETCONNINFO _IOR('H', 213, int)
+
+#define HCISETRAW _IOW('H', 220, int)
+#define HCISETSCAN _IOW('H', 221, int)
+#define HCISETAUTH _IOW('H', 222, int)
+#define HCISETENCRYPT _IOW('H', 223, int)
+#define HCISETPTYPE _IOW('H', 224, int)
+#define HCISETLINKPOL _IOW('H', 225, int)
+#define HCISETLINKMODE _IOW('H', 226, int)
+#define HCISETACLMTU _IOW('H', 227, int)
+#define HCISETSCOMTU _IOW('H', 228, int)
+
+#define HCISETSECMGR _IOW('H', 230, int)
+
+#define HCIINQUIRY _IOR('H', 240, int)
+
+/* HCI timeouts */
+#define HCI_CONN_TIMEOUT (HZ * 40)
+#define HCI_DISCONN_TIMEOUT (HZ * 2)
+#define HCI_CONN_IDLE_TIMEOUT (HZ * 60)
+
+/* HCI Packet types */
+#define HCI_COMMAND_PKT 0x01
+#define HCI_ACLDATA_PKT 0x02
+#define HCI_SCODATA_PKT 0x03
+#define HCI_EVENT_PKT 0x04
+#define HCI_VENDOR_PKT 0xff
+
+/* HCI Packet types */
+#define HCI_DM1 0x0008
+#define HCI_DM3 0x0400
+#define HCI_DM5 0x4000
+#define HCI_DH1 0x0010
+#define HCI_DH3 0x0800
+#define HCI_DH5 0x8000
+
+#define HCI_HV1 0x0020
+#define HCI_HV2 0x0040
+#define HCI_HV3 0x0080
+
+#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3)
+#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK)
+
+/* ACL flags */
+#define ACL_CONT 0x01
+#define ACL_START 0x02
+#define ACL_ACTIVE_BCAST 0x04
+#define ACL_PICO_BCAST 0x08
+
+/* Baseband links */
+#define SCO_LINK 0x00
+#define ACL_LINK 0x01
+
+/* LMP features */
+#define LMP_3SLOT 0x01
+#define LMP_5SLOT 0x02
+#define LMP_ENCRYPT 0x04
+#define LMP_SOFFSET 0x08
+#define LMP_TACCURACY 0x10
+#define LMP_RSWITCH 0x20
+#define LMP_HOLD 0x40
+#define LMP_SNIF 0x80
+
+#define LMP_PARK 0x01
+#define LMP_RSSI 0x02
+#define LMP_QUALITY 0x04
+#define LMP_SCO 0x08
+#define LMP_HV2 0x10
+#define LMP_HV3 0x20
+#define LMP_ULAW 0x40
+#define LMP_ALAW 0x80
+
+#define LMP_CVSD 0x01
+#define LMP_PSCHEME 0x02
+#define LMP_PCONTROL 0x04
+
+/* Link policies */
+#define HCI_LP_RSWITCH 0x0001
+#define HCI_LP_HOLD 0x0002
+#define HCI_LP_SNIFF 0x0004
+#define HCI_LP_PARK 0x0008
+
+/* Link mode */
+#define HCI_LM_ACCEPT 0x8000
+#define HCI_LM_MASTER 0x0001
+#define HCI_LM_AUTH 0x0002
+#define HCI_LM_ENCRYPT 0x0004
+#define HCI_LM_TRUSTED 0x0008
+#define HCI_LM_RELIABLE 0x0010
+#define HCI_LM_SECURE 0x0020
+
+/* ----- HCI Commands ---- */
+/* OGF & OCF values */
+
+/* Informational Parameters */
+#define OGF_INFO_PARAM 0x04
+
+#define OCF_READ_LOCAL_VERSION 0x0001
+struct hci_rp_read_loc_version {
+ __u8 status;
+ __u8 hci_ver;
+ __u16 hci_rev;
+ __u8 lmp_ver;
+ __u16 manufacturer;
+ __u16 lmp_subver;
+} __attribute__ ((packed));
+
+#define OCF_READ_LOCAL_FEATURES 0x0003
+struct hci_rp_read_loc_features {
+ __u8 status;
+ __u8 features[8];
+} __attribute__ ((packed));
+
+#define OCF_READ_BUFFER_SIZE 0x0005
+struct hci_rp_read_buffer_size {
+ __u8 status;
+ __u16 acl_mtu;
+ __u8 sco_mtu;
+ __u16 acl_max_pkt;
+ __u16 sco_max_pkt;
+} __attribute__ ((packed));
+
+#define OCF_READ_BD_ADDR 0x0009
+struct hci_rp_read_bd_addr {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __attribute__ ((packed));
+
+/* Host Controller and Baseband */
+#define OGF_HOST_CTL 0x03
+#define OCF_RESET 0x0003
+#define OCF_READ_AUTH_ENABLE 0x001F
+#define OCF_WRITE_AUTH_ENABLE 0x0020
+ #define AUTH_DISABLED 0x00
+ #define AUTH_ENABLED 0x01
+
+#define OCF_READ_ENCRYPT_MODE 0x0021
+#define OCF_WRITE_ENCRYPT_MODE 0x0022
+ #define ENCRYPT_DISABLED 0x00
+ #define ENCRYPT_P2P 0x01
+ #define ENCRYPT_BOTH 0x02
+
+#define OCF_WRITE_CA_TIMEOUT 0x0016
+#define OCF_WRITE_PG_TIMEOUT 0x0018
+
+#define OCF_WRITE_SCAN_ENABLE 0x001A
+ #define SCAN_DISABLED 0x00
+ #define SCAN_INQUIRY 0x01
+ #define SCAN_PAGE 0x02
+
+#define OCF_SET_EVENT_FLT 0x0005
+struct hci_cp_set_event_flt {
+ __u8 flt_type;
+ __u8 cond_type;
+ __u8 condition[0];
+} __attribute__ ((packed));
+
+/* Filter types */
+#define HCI_FLT_CLEAR_ALL 0x00
+#define HCI_FLT_INQ_RESULT 0x01
+#define HCI_FLT_CONN_SETUP 0x02
+
+/* CONN_SETUP Condition types */
+#define HCI_CONN_SETUP_ALLOW_ALL 0x00
+#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
+#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
+
+/* CONN_SETUP Conditions */
+#define HCI_CONN_SETUP_AUTO_OFF 0x01
+#define HCI_CONN_SETUP_AUTO_ON 0x02
+
+#define OCF_READ_CLASS_OF_DEV 0x0023
+struct hci_rp_read_dev_class {
+ __u8 status;
+ __u8 dev_class[3];
+} __attribute__ ((packed));
+
+#define OCF_WRITE_CLASS_OF_DEV 0x0024
+struct hci_cp_write_dev_class {
+ __u8 dev_class[3];
+} __attribute__ ((packed));
+
+#define OCF_READ_VOICE_SETTING 0x0025
+struct hci_rp_read_voice_setting {
+ __u8 status;
+ __u16 voice_setting;
+} __attribute__ ((packed));
+
+#define OCF_WRITE_VOICE_SETTING 0x0026
+struct hci_cp_write_voice_setting {
+ __u16 voice_setting;
+} __attribute__ ((packed));
+
+#define OCF_HOST_BUFFER_SIZE 0x0033
+struct hci_cp_host_buffer_size {
+ __u16 acl_mtu;
+ __u8 sco_mtu;
+ __u16 acl_max_pkt;
+ __u16 sco_max_pkt;
+} __attribute__ ((packed));
+
+/* Link Control */
+#define OGF_LINK_CTL 0x01
+#define OCF_CREATE_CONN 0x0005
+struct hci_cp_create_conn {
+ bdaddr_t bdaddr;
+ __u16 pkt_type;
+ __u8 pscan_rep_mode;
+ __u8 pscan_mode;
+ __u16 clock_offset;
+ __u8 role_switch;
+} __attribute__ ((packed));
+
+#define OCF_ACCEPT_CONN_REQ 0x0009
+struct hci_cp_accept_conn_req {
+ bdaddr_t bdaddr;
+ __u8 role;
+} __attribute__ ((packed));
+
+#define OCF_REJECT_CONN_REQ 0x000a
+struct hci_cp_reject_conn_req {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __attribute__ ((packed));
+
+#define OCF_DISCONNECT 0x0006
+struct hci_cp_disconnect {
+ __u16 handle;
+ __u8 reason;
+} __attribute__ ((packed));
+
+#define OCF_ADD_SCO 0x0007
+struct hci_cp_add_sco {
+ __u16 handle;
+ __u16 pkt_type;
+} __attribute__ ((packed));
+
+#define OCF_INQUIRY 0x0001
+struct hci_cp_inquiry {
+ __u8 lap[3];
+ __u8 length;
+ __u8 num_rsp;
+} __attribute__ ((packed));
+
+#define OCF_INQUIRY_CANCEL 0x0002
+
+#define OCF_LINK_KEY_REPLY 0x000B
+struct hci_cp_link_key_reply {
+ bdaddr_t bdaddr;
+ __u8 link_key[16];
+} __attribute__ ((packed));
+
+#define OCF_LINK_KEY_NEG_REPLY 0x000C
+struct hci_cp_link_key_neg_reply {
+ bdaddr_t bdaddr;
+} __attribute__ ((packed));
+
+#define OCF_PIN_CODE_REPLY 0x000D
+struct hci_cp_pin_code_reply {
+ bdaddr_t bdaddr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __attribute__ ((packed));
+
+#define OCF_PIN_CODE_NEG_REPLY 0x000E
+struct hci_cp_pin_code_neg_reply {
+ bdaddr_t bdaddr;
+} __attribute__ ((packed));
+
+#define OCF_CHANGE_CONN_PTYPE 0x000F
+struct hci_cp_change_conn_ptype {
+ __u16 handle;
+ __u16 pkt_type;
+} __attribute__ ((packed));
+
+#define OCF_AUTH_REQUESTED 0x0011
+struct hci_cp_auth_requested {
+ __u16 handle;
+} __attribute__ ((packed));
+
+#define OCF_SET_CONN_ENCRYPT 0x0013
+struct hci_cp_set_conn_encrypt {
+ __u16 handle;
+ __u8 encrypt;
+} __attribute__ ((packed));
+
+#define OCF_CHANGE_CONN_LINK_KEY 0x0015
+struct hci_cp_change_conn_link_key {
+ __u16 handle;
+} __attribute__ ((packed));
+
+#define OCF_READ_REMOTE_FEATURES 0x001B
+struct hci_cp_read_rmt_features {
+ __u16 handle;
+} __attribute__ ((packed));
+
+#define OCF_READ_REMOTE_VERSION 0x001D
+struct hci_cp_read_rmt_version {
+ __u16 handle;
+} __attribute__ ((packed));
+
+/* Link Policy */
+#define OGF_LINK_POLICY 0x02
+#define OCF_ROLE_DISCOVERY 0x0009
+struct hci_cp_role_discovery {
+ __u16 handle;
+} __attribute__ ((packed));
+struct hci_rp_role_discovery {
+ __u8 status;
+ __u16 handle;
+ __u8 role;
+} __attribute__ ((packed));
+
+#define OCF_READ_LINK_POLICY 0x000C
+struct hci_cp_read_link_policy {
+ __u16 handle;
+} __attribute__ ((packed));
+struct hci_rp_read_link_policy {
+ __u8 status;
+ __u16 handle;
+ __u16 policy;
+} __attribute__ ((packed));
+
+#define OCF_SWITCH_ROLE 0x000B
+struct hci_cp_switch_role {
+ bdaddr_t bdaddr;
+ __u8 role;
+} __attribute__ ((packed));
+
+#define OCF_WRITE_LINK_POLICY 0x000D
+struct hci_cp_write_link_policy {
+ __u16 handle;
+ __u16 policy;
+} __attribute__ ((packed));
+struct hci_rp_write_link_policy {
+ __u8 status;
+ __u16 handle;
+} __attribute__ ((packed));
+
+/* Status params */
+#define OGF_STATUS_PARAM 0x05
+
+/* Testing commands */
+#define OGF_TESTING_CMD 0x3E
+
+/* Vendor specific commands */
+#define OGF_VENDOR_CMD 0x3F
+
+/* ---- HCI Events ---- */
+#define HCI_EV_INQUIRY_COMPLETE 0x01
+
+#define HCI_EV_INQUIRY_RESULT 0x02
+struct inquiry_info {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __u16 clock_offset;
+} __attribute__ ((packed));
+
+#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
+struct inquiry_info_with_rssi {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 dev_class[3];
+ __u16 clock_offset;
+ __s8 rssi;
+} __attribute__ ((packed));
+
+#define HCI_EV_CONN_COMPLETE 0x03
+struct hci_ev_conn_complete {
+ __u8 status;
+ __u16 handle;
+ bdaddr_t bdaddr;
+ __u8 link_type;
+ __u8 encr_mode;
+} __attribute__ ((packed));
+
+#define HCI_EV_CONN_REQUEST 0x04
+struct hci_ev_conn_request {
+ bdaddr_t bdaddr;
+ __u8 dev_class[3];
+ __u8 link_type;
+} __attribute__ ((packed));
+
+#define HCI_EV_DISCONN_COMPLETE 0x05
+struct hci_ev_disconn_complete {
+ __u8 status;
+ __u16 handle;
+ __u8 reason;
+} __attribute__ ((packed));
+
+#define HCI_EV_AUTH_COMPLETE 0x06
+struct hci_ev_auth_complete {
+ __u8 status;
+ __u16 handle;
+} __attribute__ ((packed));
+
+#define HCI_EV_ENCRYPT_CHANGE 0x08
+struct hci_ev_encrypt_change {
+ __u8 status;
+ __u16 handle;
+ __u8 encrypt;
+} __attribute__ ((packed));
+
+#define HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE 0x09
+struct hci_ev_change_conn_link_key_complete {
+ __u8 status;
+ __u16 handle;
+} __attribute__ ((packed));
+
+#define HCI_EV_QOS_SETUP_COMPLETE 0x0D
+struct hci_qos {
+ __u8 service_type;
+ __u32 token_rate;
+ __u32 peak_bandwidth;
+ __u32 latency;
+ __u32 delay_variation;
+} __attribute__ ((packed));
+struct hci_ev_qos_setup_complete {
+ __u8 status;
+ __u16 handle;
+ struct hci_qos qos;
+} __attribute__ ((packed));
+
+#define HCI_EV_CMD_COMPLETE 0x0E
+struct hci_ev_cmd_complete {
+ __u8 ncmd;
+ __u16 opcode;
+} __attribute__ ((packed));
+
+#define HCI_EV_CMD_STATUS 0x0F
+struct hci_ev_cmd_status {
+ __u8 status;
+ __u8 ncmd;
+ __u16 opcode;
+} __attribute__ ((packed));
+
+#define HCI_EV_NUM_COMP_PKTS 0x13
+struct hci_ev_num_comp_pkts {
+ __u8 num_hndl;
+ /* variable length part */
+} __attribute__ ((packed));
+
+#define HCI_EV_ROLE_CHANGE 0x12
+struct hci_ev_role_change {
+ __u8 status;
+ bdaddr_t bdaddr;
+ __u8 role;
+} __attribute__ ((packed));
+
+#define HCI_EV_MODE_CHANGE 0x14
+struct hci_ev_mode_change {
+ __u8 status;
+ __u16 handle;
+ __u8 mode;
+ __u16 interval;
+} __attribute__ ((packed));
+
+#define HCI_EV_PIN_CODE_REQ 0x16
+struct hci_ev_pin_code_req {
+ bdaddr_t bdaddr;
+} __attribute__ ((packed));
+
+#define HCI_EV_LINK_KEY_REQ 0x17
+struct hci_ev_link_key_req {
+ bdaddr_t bdaddr;
+} __attribute__ ((packed));
+
+#define HCI_EV_LINK_KEY_NOTIFY 0x18
+struct hci_ev_link_key_notify {
+ bdaddr_t bdaddr;
+ __u8 link_key[16];
+ __u8 key_type;
+} __attribute__ ((packed));
+
+#define HCI_EV_RMT_FEATURES 0x0B
+struct hci_ev_rmt_features {
+ __u8 status;
+ __u16 handle;
+ __u8 features[8];
+} __attribute__ ((packed));
+
+#define HCI_EV_RMT_VERSION 0x0C
+struct hci_ev_rmt_version {
+ __u8 status;
+ __u16 handle;
+ __u8 lmp_ver;
+ __u16 manufacturer;
+ __u16 lmp_subver;
+} __attribute__ ((packed));
+
+#define HCI_EV_CLOCK_OFFSET 0x01C
+struct hci_ev_clock_offset {
+ __u8 status;
+ __u16 handle;
+ __u16 clock_offset;
+} __attribute__ ((packed));
+
+/* Internal events generated by Bluetooth stack */
+#define HCI_EV_STACK_INTERNAL 0xFD
+struct hci_ev_stack_internal {
+ __u16 type;
+ __u8 data[0];
+} __attribute__ ((packed));
+
+#define HCI_EV_SI_DEVICE 0x01
+struct hci_ev_si_device {
+ __u16 event;
+ __u16 dev_id;
+} __attribute__ ((packed));
+
+#define HCI_EV_SI_SECURITY 0x02
+struct hci_ev_si_security {
+ __u16 event;
+ __u16 proto;
+ __u16 subproto;
+ __u8 incoming;
+} __attribute__ ((packed));
+
+/* ---- HCI Packet structures ---- */
+#define HCI_COMMAND_HDR_SIZE 3
+#define HCI_EVENT_HDR_SIZE 2
+#define HCI_ACL_HDR_SIZE 4
+#define HCI_SCO_HDR_SIZE 3
+
+struct hci_command_hdr {
+ __u16 opcode; /* OCF & OGF */
+ __u8 plen;
+} __attribute__ ((packed));
+
+struct hci_event_hdr {
+ __u8 evt;
+ __u8 plen;
+} __attribute__ ((packed));
+
+struct hci_acl_hdr {
+ __u16 handle; /* Handle & Flags(PB, BC) */
+ __u16 dlen;
+} __attribute__ ((packed));
+
+struct hci_sco_hdr {
+ __u16 handle;
+ __u8 dlen;
+} __attribute__ ((packed));
+
+/* Command opcode pack/unpack */
+#define hci_opcode_pack(ogf, ocf) (__u16)((ocf & 0x03ff)|(ogf << 10))
+#define hci_opcode_ogf(op) (op >> 10)
+#define hci_opcode_ocf(op) (op & 0x03ff)
+
+/* ACL handle and flags pack/unpack */
+#define hci_handle_pack(h, f) (__u16)((h & 0x0fff)|(f << 12))
+#define hci_handle(h) (h & 0x0fff)
+#define hci_flags(h) (h >> 12)
+
+/* ---- HCI Sockets ---- */
+
+/* Socket options */
+#define HCI_DATA_DIR 1
+#define HCI_FILTER 2
+#define HCI_TIME_STAMP 3
+
+/* CMSG flags */
+#define HCI_CMSG_DIR 0x0001
+#define HCI_CMSG_TSTAMP 0x0002
+
+struct sockaddr_hci {
+ sa_family_t hci_family;
+ unsigned short hci_dev;
+};
+#define HCI_DEV_NONE 0xffff
+
+struct hci_filter {
+ unsigned long type_mask;
+ unsigned long event_mask[2];
+ __u16 opcode;
+};
+
+struct hci_ufilter {
+ __u32 type_mask;
+ __u32 event_mask[2];
+ __u16 opcode;
+};
+
+#define HCI_FLT_TYPE_BITS 31
+#define HCI_FLT_EVENT_BITS 63
+#define HCI_FLT_OGF_BITS 63
+#define HCI_FLT_OCF_BITS 127
+
+/* ---- HCI Ioctl requests structures ---- */
+struct hci_dev_stats {
+ __u32 err_rx;
+ __u32 err_tx;
+ __u32 cmd_tx;
+ __u32 evt_rx;
+ __u32 acl_tx;
+ __u32 acl_rx;
+ __u32 sco_tx;
+ __u32 sco_rx;
+ __u32 byte_rx;
+ __u32 byte_tx;
+};
+
+struct hci_dev_info {
+ __u16 dev_id;
+ char name[8];
+
+ bdaddr_t bdaddr;
+
+ __u32 flags;
+ __u8 type;
+
+ __u8 features[8];
+
+ __u32 pkt_type;
+ __u32 link_policy;
+ __u32 link_mode;
+
+ __u16 acl_mtu;
+ __u16 acl_pkts;
+ __u16 sco_mtu;
+ __u16 sco_pkts;
+
+ struct hci_dev_stats stat;
+};
+
+struct hci_conn_info {
+ __u16 handle;
+ bdaddr_t bdaddr;
+ __u8 type;
+ __u8 out;
+ __u16 state;
+ __u32 link_mode;
+};
+
+struct hci_dev_req {
+ __u16 dev_id;
+ __u32 dev_opt;
+};
+
+struct hci_dev_list_req {
+ __u16 dev_num;
+ struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
+};
+
+struct hci_conn_list_req {
+ __u16 dev_id;
+ __u16 conn_num;
+ struct hci_conn_info conn_info[0];
+};
+
+struct hci_conn_info_req {
+ bdaddr_t bdaddr;
+ __u8 type;
+ struct hci_conn_info conn_info[0];
+};
+
+struct hci_inquiry_req {
+ __u16 dev_id;
+ __u16 flags;
+ __u8 lap[3];
+ __u8 length;
+ __u8 num_rsp;
+};
+#define IREQ_CACHE_FLUSH 0x0001
+
+#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
new file mode 100644
index 000000000000..6d63a47c731b
--- /dev/null
+++ b/include/net/bluetooth/hci_core.h
@@ -0,0 +1,626 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_CORE_H
+#define __HCI_CORE_H
+
+#include <linux/proc_fs.h>
+#include <net/bluetooth/hci.h>
+
+/* HCI upper protocols */
+#define HCI_PROTO_L2CAP 0
+#define HCI_PROTO_SCO 1
+
+#define HCI_INIT_TIMEOUT (HZ * 10)
+
+extern struct proc_dir_entry *proc_bt_hci;
+
+/* HCI Core structures */
+
+struct inquiry_data {
+ bdaddr_t bdaddr;
+ __u8 pscan_rep_mode;
+ __u8 pscan_period_mode;
+ __u8 pscan_mode;
+ __u8 dev_class[3];
+ __u16 clock_offset;
+ __s8 rssi;
+};
+
+struct inquiry_entry {
+ struct inquiry_entry *next;
+ __u32 timestamp;
+ struct inquiry_data data;
+};
+
+struct inquiry_cache {
+ spinlock_t lock;
+ __u32 timestamp;
+ struct inquiry_entry *list;
+};
+
+struct hci_conn_hash {
+ struct list_head list;
+ spinlock_t lock;
+ unsigned int acl_num;
+ unsigned int sco_num;
+};
+
+struct hci_dev {
+ struct list_head list;
+ spinlock_t lock;
+ atomic_t refcnt;
+
+ char name[8];
+ unsigned long flags;
+ __u16 id;
+ __u8 type;
+ bdaddr_t bdaddr;
+ __u8 features[8];
+ __u16 voice_setting;
+
+ __u16 pkt_type;
+ __u16 link_policy;
+ __u16 link_mode;
+
+ unsigned long quirks;
+
+ atomic_t cmd_cnt;
+ unsigned int acl_cnt;
+ unsigned int sco_cnt;
+
+ unsigned int acl_mtu;
+ unsigned int sco_mtu;
+ unsigned int acl_pkts;
+ unsigned int sco_pkts;
+
+ unsigned long cmd_last_tx;
+ unsigned long acl_last_tx;
+ unsigned long sco_last_tx;
+
+ struct tasklet_struct cmd_task;
+ struct tasklet_struct rx_task;
+ struct tasklet_struct tx_task;
+
+ struct sk_buff_head rx_q;
+ struct sk_buff_head raw_q;
+ struct sk_buff_head cmd_q;
+
+ struct sk_buff *sent_cmd;
+
+ struct semaphore req_lock;
+ wait_queue_head_t req_wait_q;
+ __u32 req_status;
+ __u32 req_result;
+
+ struct inquiry_cache inq_cache;
+ struct hci_conn_hash conn_hash;
+
+ struct hci_dev_stats stat;
+
+ struct sk_buff_head driver_init;
+
+ void *driver_data;
+ void *core_data;
+
+ atomic_t promisc;
+
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc;
+#endif
+
+ struct class_device class_dev;
+
+ struct module *owner;
+
+ int (*open)(struct hci_dev *hdev);
+ int (*close)(struct hci_dev *hdev);
+ int (*flush)(struct hci_dev *hdev);
+ int (*send)(struct sk_buff *skb);
+ void (*destruct)(struct hci_dev *hdev);
+ void (*notify)(struct hci_dev *hdev, unsigned int evt);
+ int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
+};
+
+struct hci_conn {
+ struct list_head list;
+
+ atomic_t refcnt;
+ spinlock_t lock;
+
+ bdaddr_t dst;
+ __u16 handle;
+ __u16 state;
+ __u8 type;
+ __u8 out;
+ __u8 dev_class[3];
+ __u32 link_mode;
+ unsigned long pend;
+
+ unsigned int sent;
+
+ struct sk_buff_head data_q;
+
+ struct timer_list timer;
+
+ struct hci_dev *hdev;
+ void *l2cap_data;
+ void *sco_data;
+ void *priv;
+
+ struct hci_conn *link;
+};
+
+extern struct hci_proto *hci_proto[];
+extern struct list_head hci_dev_list;
+extern struct list_head hci_cb_list;
+extern rwlock_t hci_dev_list_lock;
+extern rwlock_t hci_cb_list_lock;
+
+/* ----- Inquiry cache ----- */
+#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
+#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
+
+#define inquiry_cache_lock(c) spin_lock(&c->lock)
+#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
+#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
+#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
+
+static inline void inquiry_cache_init(struct hci_dev *hdev)
+{
+ struct inquiry_cache *c = &hdev->inq_cache;
+ spin_lock_init(&c->lock);
+ c->list = NULL;
+}
+
+static inline int inquiry_cache_empty(struct hci_dev *hdev)
+{
+ struct inquiry_cache *c = &hdev->inq_cache;
+ return (c->list == NULL);
+}
+
+static inline long inquiry_cache_age(struct hci_dev *hdev)
+{
+ struct inquiry_cache *c = &hdev->inq_cache;
+ return jiffies - c->timestamp;
+}
+
+static inline long inquiry_entry_age(struct inquiry_entry *e)
+{
+ return jiffies - e->timestamp;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
+void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
+
+/* ----- HCI Connections ----- */
+enum {
+ HCI_CONN_AUTH_PEND,
+ HCI_CONN_ENCRYPT_PEND,
+ HCI_CONN_RSWITCH_PEND
+};
+
+static inline void hci_conn_hash_init(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ INIT_LIST_HEAD(&h->list);
+ spin_lock_init(&h->lock);
+ h->acl_num = 0;
+ h->sco_num = 0;
+}
+
+static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ list_add(&c->list, &h->list);
+ if (c->type == ACL_LINK)
+ h->acl_num++;
+ else
+ h->sco_num++;
+}
+
+static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ list_del(&c->list);
+ if (c->type == ACL_LINK)
+ h->acl_num--;
+ else
+ h->sco_num--;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
+ __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct list_head *p;
+ struct hci_conn *c;
+
+ list_for_each(p, &h->list) {
+ c = list_entry(p, struct hci_conn, list);
+ if (c->handle == handle)
+ return c;
+ }
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
+ __u8 type, bdaddr_t *ba)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct list_head *p;
+ struct hci_conn *c;
+
+ list_for_each(p, &h->list) {
+ c = list_entry(p, struct hci_conn, list);
+ if (c->type == type && !bacmp(&c->dst, ba))
+ return c;
+ }
+ return NULL;
+}
+
+void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
+void hci_add_sco(struct hci_conn *conn, __u16 handle);
+
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+int hci_conn_del(struct hci_conn *conn);
+void hci_conn_hash_flush(struct hci_dev *hdev);
+
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
+int hci_conn_auth(struct hci_conn *conn);
+int hci_conn_encrypt(struct hci_conn *conn);
+int hci_conn_change_link_key(struct hci_conn *conn);
+int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
+
+static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout)
+{
+ mod_timer(&conn->timer, jiffies + timeout);
+}
+
+static inline void hci_conn_del_timer(struct hci_conn *conn)
+{
+ del_timer(&conn->timer);
+}
+
+static inline void hci_conn_hold(struct hci_conn *conn)
+{
+ atomic_inc(&conn->refcnt);
+ hci_conn_del_timer(conn);
+}
+
+static inline void hci_conn_put(struct hci_conn *conn)
+{
+ if (atomic_dec_and_test(&conn->refcnt)) {
+ if (conn->type == ACL_LINK) {
+ unsigned long timeo = (conn->out) ?
+ HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2;
+ hci_conn_set_timer(conn, timeo);
+ } else
+ hci_conn_set_timer(conn, HZ / 100);
+ }
+}
+
+/* ----- HCI tasks ----- */
+static inline void hci_sched_cmd(struct hci_dev *hdev)
+{
+ tasklet_schedule(&hdev->cmd_task);
+}
+
+static inline void hci_sched_rx(struct hci_dev *hdev)
+{
+ tasklet_schedule(&hdev->rx_task);
+}
+
+static inline void hci_sched_tx(struct hci_dev *hdev)
+{
+ tasklet_schedule(&hdev->tx_task);
+}
+
+/* ----- HCI Devices ----- */
+static inline void __hci_dev_put(struct hci_dev *d)
+{
+ if (atomic_dec_and_test(&d->refcnt))
+ d->destruct(d);
+}
+
+static inline void hci_dev_put(struct hci_dev *d)
+{
+ __hci_dev_put(d);
+ module_put(d->owner);
+}
+
+static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
+{
+ atomic_inc(&d->refcnt);
+ return d;
+}
+
+static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
+{
+ if (try_module_get(d->owner))
+ return __hci_dev_hold(d);
+ return NULL;
+}
+
+#define hci_dev_lock(d) spin_lock(&d->lock)
+#define hci_dev_unlock(d) spin_unlock(&d->lock)
+#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
+#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
+
+struct hci_dev *hci_dev_get(int index);
+struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
+
+struct hci_dev *hci_alloc_dev(void);
+void hci_free_dev(struct hci_dev *hdev);
+int hci_register_dev(struct hci_dev *hdev);
+int hci_unregister_dev(struct hci_dev *hdev);
+int hci_suspend_dev(struct hci_dev *hdev);
+int hci_resume_dev(struct hci_dev *hdev);
+int hci_dev_open(__u16 dev);
+int hci_dev_close(__u16 dev);
+int hci_dev_reset(__u16 dev);
+int hci_dev_reset_stat(__u16 dev);
+int hci_dev_cmd(unsigned int cmd, void __user *arg);
+int hci_get_dev_list(void __user *arg);
+int hci_get_dev_info(void __user *arg);
+int hci_get_conn_list(void __user *arg);
+int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
+int hci_inquiry(void __user *arg);
+
+void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
+
+/* Receive frame from HCI drivers */
+static inline int hci_recv_frame(struct sk_buff *skb)
+{
+ struct hci_dev *hdev = (struct hci_dev *) skb->dev;
+ if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
+ && !test_bit(HCI_INIT, &hdev->flags))) {
+ kfree_skb(skb);
+ return -ENXIO;
+ }
+
+ /* Incomming skb */
+ bt_cb(skb)->incoming = 1;
+
+ /* Time stamp */
+ do_gettimeofday(&skb->stamp);
+
+ /* Queue frame for rx task */
+ skb_queue_tail(&hdev->rx_q, skb);
+ hci_sched_rx(hdev);
+ return 0;
+}
+
+int hci_register_sysfs(struct hci_dev *hdev);
+void hci_unregister_sysfs(struct hci_dev *hdev);
+
+#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev))
+
+/* ----- LMP capabilities ----- */
+#define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH)
+#define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT)
+
+/* ----- HCI protocols ----- */
+struct hci_proto {
+ char *name;
+ unsigned int id;
+ unsigned long flags;
+
+ void *priv;
+
+ int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
+ int (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
+ int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+ int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
+ int (*auth_cfm) (struct hci_conn *conn, __u8 status);
+ int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
+};
+
+static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+{
+ register struct hci_proto *hp;
+ int mask = 0;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->connect_ind)
+ mask |= hp->connect_ind(hdev, bdaddr, type);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->connect_ind)
+ mask |= hp->connect_ind(hdev, bdaddr, type);
+
+ return mask;
+}
+
+static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
+{
+ register struct hci_proto *hp;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->connect_cfm)
+ hp->connect_cfm(conn, status);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->connect_cfm)
+ hp->connect_cfm(conn, status);
+}
+
+static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
+{
+ register struct hci_proto *hp;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->disconn_ind)
+ hp->disconn_ind(conn, reason);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->disconn_ind)
+ hp->disconn_ind(conn, reason);
+}
+
+static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
+{
+ register struct hci_proto *hp;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->auth_cfm)
+ hp->auth_cfm(conn, status);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->auth_cfm)
+ hp->auth_cfm(conn, status);
+}
+
+static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
+{
+ register struct hci_proto *hp;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->encrypt_cfm)
+ hp->encrypt_cfm(conn, status);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->encrypt_cfm)
+ hp->encrypt_cfm(conn, status);
+}
+
+int hci_register_proto(struct hci_proto *hproto);
+int hci_unregister_proto(struct hci_proto *hproto);
+
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+ struct list_head list;
+
+ char *name;
+
+ void (*auth_cfm) (struct hci_conn *conn, __u8 status);
+ void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
+
+static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct list_head *p;
+
+ hci_proto_auth_cfm(conn, status);
+
+ read_lock_bh(&hci_cb_list_lock);
+ list_for_each(p, &hci_cb_list) {
+ struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ if (cb->auth_cfm)
+ cb->auth_cfm(conn, status);
+ }
+ read_unlock_bh(&hci_cb_list_lock);
+}
+
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+{
+ struct list_head *p;
+
+ hci_proto_encrypt_cfm(conn, status);
+
+ read_lock_bh(&hci_cb_list_lock);
+ list_for_each(p, &hci_cb_list) {
+ struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ if (cb->encrypt_cfm)
+ cb->encrypt_cfm(conn, status, encrypt);
+ }
+ read_unlock_bh(&hci_cb_list_lock);
+}
+
+static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+{
+ struct list_head *p;
+
+ read_lock_bh(&hci_cb_list_lock);
+ list_for_each(p, &hci_cb_list) {
+ struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ if (cb->key_change_cfm)
+ cb->key_change_cfm(conn, status);
+ }
+ read_unlock_bh(&hci_cb_list_lock);
+}
+
+static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
+{
+ struct list_head *p;
+
+ read_lock_bh(&hci_cb_list_lock);
+ list_for_each(p, &hci_cb_list) {
+ struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ if (cb->role_switch_cfm)
+ cb->role_switch_cfm(conn, status, role);
+ }
+ read_unlock_bh(&hci_cb_list_lock);
+}
+
+int hci_register_cb(struct hci_cb *hcb);
+int hci_unregister_cb(struct hci_cb *hcb);
+
+int hci_register_notifier(struct notifier_block *nb);
+int hci_unregister_notifier(struct notifier_block *nb);
+
+int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
+int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
+
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
+
+void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
+
+/* ----- HCI Sockets ----- */
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+
+/* HCI info for socket */
+#define hci_pi(sk) ((struct hci_pinfo *) sk)
+
+struct hci_pinfo {
+ struct bt_sock bt;
+ struct hci_dev *hdev;
+ struct hci_filter filter;
+ __u32 cmsg_mask;
+};
+
+/* HCI security filter */
+#define HCI_SFLT_MAX_OGF 5
+
+struct hci_sec_filter {
+ __u32 type_mask;
+ __u32 event_mask[2];
+ __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
+};
+
+/* ----- HCI requests ----- */
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
+#define hci_req_lock(d) down(&d->req_lock)
+#define hci_req_unlock(d) up(&d->req_lock)
+
+void hci_req_complete(struct hci_dev *hdev, int result);
+
+#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
new file mode 100644
index 000000000000..8242a0ee1f58
--- /dev/null
+++ b/include/net/bluetooth/l2cap.h
@@ -0,0 +1,238 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __L2CAP_H
+#define __L2CAP_H
+
+/* L2CAP defaults */
+#define L2CAP_DEFAULT_MTU 672
+#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
+
+#define L2CAP_CONN_TIMEOUT (HZ * 40)
+
+/* L2CAP socket address */
+struct sockaddr_l2 {
+ sa_family_t l2_family;
+ unsigned short l2_psm;
+ bdaddr_t l2_bdaddr;
+};
+
+/* L2CAP socket options */
+#define L2CAP_OPTIONS 0x01
+struct l2cap_options {
+ __u16 omtu;
+ __u16 imtu;
+ __u16 flush_to;
+ __u8 mode;
+};
+
+#define L2CAP_CONNINFO 0x02
+struct l2cap_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define L2CAP_LM 0x03
+#define L2CAP_LM_MASTER 0x0001
+#define L2CAP_LM_AUTH 0x0002
+#define L2CAP_LM_ENCRYPT 0x0004
+#define L2CAP_LM_TRUSTED 0x0008
+#define L2CAP_LM_RELIABLE 0x0010
+#define L2CAP_LM_SECURE 0x0020
+
+/* L2CAP command codes */
+#define L2CAP_COMMAND_REJ 0x01
+#define L2CAP_CONN_REQ 0x02
+#define L2CAP_CONN_RSP 0x03
+#define L2CAP_CONF_REQ 0x04
+#define L2CAP_CONF_RSP 0x05
+#define L2CAP_DISCONN_REQ 0x06
+#define L2CAP_DISCONN_RSP 0x07
+#define L2CAP_ECHO_REQ 0x08
+#define L2CAP_ECHO_RSP 0x09
+#define L2CAP_INFO_REQ 0x0a
+#define L2CAP_INFO_RSP 0x0b
+
+/* L2CAP structures */
+struct l2cap_hdr {
+ __u16 len;
+ __u16 cid;
+} __attribute__ ((packed));
+#define L2CAP_HDR_SIZE 4
+
+struct l2cap_cmd_hdr {
+ __u8 code;
+ __u8 ident;
+ __u16 len;
+} __attribute__ ((packed));
+#define L2CAP_CMD_HDR_SIZE 4
+
+struct l2cap_cmd_rej {
+ __u16 reason;
+} __attribute__ ((packed));
+
+struct l2cap_conn_req {
+ __u16 psm;
+ __u16 scid;
+} __attribute__ ((packed));
+
+struct l2cap_conn_rsp {
+ __u16 dcid;
+ __u16 scid;
+ __u16 result;
+ __u16 status;
+} __attribute__ ((packed));
+
+/* connect result */
+#define L2CAP_CR_SUCCESS 0x0000
+#define L2CAP_CR_PEND 0x0001
+#define L2CAP_CR_BAD_PSM 0x0002
+#define L2CAP_CR_SEC_BLOCK 0x0003
+#define L2CAP_CR_NO_MEM 0x0004
+
+/* connect status */
+#define L2CAP_CS_NO_INFO 0x0000
+#define L2CAP_CS_AUTHEN_PEND 0x0001
+#define L2CAP_CS_AUTHOR_PEND 0x0002
+
+struct l2cap_conf_req {
+ __u16 dcid;
+ __u16 flags;
+ __u8 data[0];
+} __attribute__ ((packed));
+
+struct l2cap_conf_rsp {
+ __u16 scid;
+ __u16 flags;
+ __u16 result;
+ __u8 data[0];
+} __attribute__ ((packed));
+
+#define L2CAP_CONF_SUCCESS 0x00
+#define L2CAP_CONF_UNACCEPT 0x01
+
+struct l2cap_conf_opt {
+ __u8 type;
+ __u8 len;
+ __u8 val[0];
+} __attribute__ ((packed));
+#define L2CAP_CONF_OPT_SIZE 2
+
+#define L2CAP_CONF_MTU 0x01
+#define L2CAP_CONF_FLUSH_TO 0x02
+#define L2CAP_CONF_QOS 0x03
+#define L2CAP_CONF_RFC 0x04
+
+#define L2CAP_CONF_MAX_SIZE 22
+
+struct l2cap_disconn_req {
+ __u16 dcid;
+ __u16 scid;
+} __attribute__ ((packed));
+
+struct l2cap_disconn_rsp {
+ __u16 dcid;
+ __u16 scid;
+} __attribute__ ((packed));
+
+struct l2cap_info_req {
+ __u16 type;
+ __u8 data[0];
+} __attribute__ ((packed));
+
+struct l2cap_info_rsp {
+ __u16 type;
+ __u16 result;
+ __u8 data[0];
+} __attribute__ ((packed));
+
+/* info type */
+#define L2CAP_IT_CL_MTU 0x0001
+#define L2CAP_IT_FEAT_MASK 0x0002
+
+/* info result */
+#define L2CAP_IR_SUCCESS 0x0000
+#define L2CAP_IR_NOTSUPP 0x0001
+
+/* ----- L2CAP connections ----- */
+struct l2cap_chan_list {
+ struct sock *head;
+ rwlock_t lock;
+ long num;
+};
+
+struct l2cap_conn {
+ struct hci_conn *hcon;
+
+ bdaddr_t *dst;
+ bdaddr_t *src;
+
+ unsigned int mtu;
+
+ spinlock_t lock;
+
+ struct sk_buff *rx_skb;
+ __u32 rx_len;
+ __u8 rx_ident;
+ __u8 tx_ident;
+
+ struct l2cap_chan_list chan_list;
+};
+
+/* ----- L2CAP channel and socket info ----- */
+#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+
+struct l2cap_pinfo {
+ struct bt_sock bt;
+ __u16 psm;
+ __u16 dcid;
+ __u16 scid;
+
+ __u16 imtu;
+ __u16 omtu;
+ __u16 flush_to;
+
+ __u32 link_mode;
+
+ __u8 conf_state;
+ __u8 conf_retry;
+ __u16 conf_mtu;
+
+ __u8 ident;
+
+ __u16 sport;
+
+ struct l2cap_conn *conn;
+ struct sock *next_c;
+ struct sock *prev_c;
+};
+
+#define L2CAP_CONF_REQ_SENT 0x01
+#define L2CAP_CONF_INPUT_DONE 0x02
+#define L2CAP_CONF_OUTPUT_DONE 0x04
+#define L2CAP_CONF_MAX_RETRIES 2
+
+void l2cap_load(void);
+
+#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
new file mode 100644
index 000000000000..13669bad00b3
--- /dev/null
+++ b/include/net/bluetooth/rfcomm.h
@@ -0,0 +1,353 @@
+/*
+ RFCOMM implementation for Linux Bluetooth stack (BlueZ).
+ Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
+ Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __RFCOMM_H
+#define __RFCOMM_H
+
+#define RFCOMM_PSM 3
+
+#define RFCOMM_CONN_TIMEOUT (HZ * 30)
+#define RFCOMM_DISC_TIMEOUT (HZ * 20)
+#define RFCOMM_AUTH_TIMEOUT (HZ * 25)
+
+#define RFCOMM_DEFAULT_MTU 127
+#define RFCOMM_DEFAULT_CREDITS 7
+
+#define RFCOMM_MAX_L2CAP_MTU 1024
+#define RFCOMM_MAX_CREDITS 40
+
+#define RFCOMM_SKB_HEAD_RESERVE 8
+#define RFCOMM_SKB_TAIL_RESERVE 2
+#define RFCOMM_SKB_RESERVE (RFCOMM_SKB_HEAD_RESERVE + RFCOMM_SKB_TAIL_RESERVE)
+
+#define RFCOMM_SABM 0x2f
+#define RFCOMM_DISC 0x43
+#define RFCOMM_UA 0x63
+#define RFCOMM_DM 0x0f
+#define RFCOMM_UIH 0xef
+
+#define RFCOMM_TEST 0x08
+#define RFCOMM_FCON 0x28
+#define RFCOMM_FCOFF 0x18
+#define RFCOMM_MSC 0x38
+#define RFCOMM_RPN 0x24
+#define RFCOMM_RLS 0x14
+#define RFCOMM_PN 0x20
+#define RFCOMM_NSC 0x04
+
+#define RFCOMM_V24_FC 0x02
+#define RFCOMM_V24_RTC 0x04
+#define RFCOMM_V24_RTR 0x08
+#define RFCOMM_V24_IC 0x40
+#define RFCOMM_V24_DV 0x80
+
+#define RFCOMM_RPN_BR_2400 0x0
+#define RFCOMM_RPN_BR_4800 0x1
+#define RFCOMM_RPN_BR_7200 0x2
+#define RFCOMM_RPN_BR_9600 0x3
+#define RFCOMM_RPN_BR_19200 0x4
+#define RFCOMM_RPN_BR_38400 0x5
+#define RFCOMM_RPN_BR_57600 0x6
+#define RFCOMM_RPN_BR_115200 0x7
+#define RFCOMM_RPN_BR_230400 0x8
+
+#define RFCOMM_RPN_DATA_5 0x0
+#define RFCOMM_RPN_DATA_6 0x1
+#define RFCOMM_RPN_DATA_7 0x2
+#define RFCOMM_RPN_DATA_8 0x3
+
+#define RFCOMM_RPN_STOP_1 0
+#define RFCOMM_RPN_STOP_15 1
+
+#define RFCOMM_RPN_PARITY_NONE 0x0
+#define RFCOMM_RPN_PARITY_ODD 0x4
+#define RFCOMM_RPN_PARITY_EVEN 0x5
+#define RFCOMM_RPN_PARITY_MARK 0x6
+#define RFCOMM_RPN_PARITY_SPACE 0x7
+
+#define RFCOMM_RPN_FLOW_NONE 0x00
+
+#define RFCOMM_RPN_XON_CHAR 0x11
+#define RFCOMM_RPN_XOFF_CHAR 0x13
+
+#define RFCOMM_RPN_PM_BITRATE 0x0001
+#define RFCOMM_RPN_PM_DATA 0x0002
+#define RFCOMM_RPN_PM_STOP 0x0004
+#define RFCOMM_RPN_PM_PARITY 0x0008
+#define RFCOMM_RPN_PM_PARITY_TYPE 0x0010
+#define RFCOMM_RPN_PM_XON 0x0020
+#define RFCOMM_RPN_PM_XOFF 0x0040
+#define RFCOMM_RPN_PM_FLOW 0x3F00
+
+#define RFCOMM_RPN_PM_ALL 0x3F7F
+
+struct rfcomm_hdr {
+ u8 addr;
+ u8 ctrl;
+ u8 len; // Actual size can be 2 bytes
+} __attribute__ ((packed));
+
+struct rfcomm_cmd {
+ u8 addr;
+ u8 ctrl;
+ u8 len;
+ u8 fcs;
+} __attribute__ ((packed));
+
+struct rfcomm_mcc {
+ u8 type;
+ u8 len;
+} __attribute__ ((packed));
+
+struct rfcomm_pn {
+ u8 dlci;
+ u8 flow_ctrl;
+ u8 priority;
+ u8 ack_timer;
+ u16 mtu;
+ u8 max_retrans;
+ u8 credits;
+} __attribute__ ((packed));
+
+struct rfcomm_rpn {
+ u8 dlci;
+ u8 bit_rate;
+ u8 line_settings;
+ u8 flow_ctrl;
+ u8 xon_char;
+ u8 xoff_char;
+ u16 param_mask;
+} __attribute__ ((packed));
+
+struct rfcomm_rls {
+ u8 dlci;
+ u8 status;
+} __attribute__ ((packed));
+
+struct rfcomm_msc {
+ u8 dlci;
+ u8 v24_sig;
+} __attribute__ ((packed));
+
+/* ---- Core structures, flags etc ---- */
+
+struct rfcomm_session {
+ struct list_head list;
+ struct socket *sock;
+ unsigned long state;
+ unsigned long flags;
+ atomic_t refcnt;
+ int initiator;
+
+ /* Default DLC parameters */
+ int cfc;
+ uint mtu;
+
+ struct list_head dlcs;
+};
+
+struct rfcomm_dlc {
+ struct list_head list;
+ struct rfcomm_session *session;
+ struct sk_buff_head tx_queue;
+ struct timer_list timer;
+
+ spinlock_t lock;
+ unsigned long state;
+ unsigned long flags;
+ atomic_t refcnt;
+ u8 dlci;
+ u8 addr;
+ u8 priority;
+ u8 v24_sig;
+ u8 mscex;
+
+ u32 link_mode;
+
+ uint mtu;
+ uint cfc;
+ uint rx_credits;
+ uint tx_credits;
+
+ void *owner;
+
+ void (*data_ready)(struct rfcomm_dlc *d, struct sk_buff *skb);
+ void (*state_change)(struct rfcomm_dlc *d, int err);
+ void (*modem_status)(struct rfcomm_dlc *d, u8 v24_sig);
+};
+
+/* DLC and session flags */
+#define RFCOMM_RX_THROTTLED 0
+#define RFCOMM_TX_THROTTLED 1
+#define RFCOMM_TIMED_OUT 2
+#define RFCOMM_MSC_PENDING 3
+#define RFCOMM_AUTH_PENDING 4
+#define RFCOMM_AUTH_ACCEPT 5
+#define RFCOMM_AUTH_REJECT 6
+
+/* Scheduling flags and events */
+#define RFCOMM_SCHED_STATE 0
+#define RFCOMM_SCHED_RX 1
+#define RFCOMM_SCHED_TX 2
+#define RFCOMM_SCHED_TIMEO 3
+#define RFCOMM_SCHED_AUTH 4
+#define RFCOMM_SCHED_WAKEUP 31
+
+/* MSC exchange flags */
+#define RFCOMM_MSCEX_TX 1
+#define RFCOMM_MSCEX_RX 2
+#define RFCOMM_MSCEX_OK (RFCOMM_MSCEX_TX + RFCOMM_MSCEX_RX)
+
+/* CFC states */
+#define RFCOMM_CFC_UNKNOWN -1
+#define RFCOMM_CFC_DISABLED 0
+#define RFCOMM_CFC_ENABLED RFCOMM_MAX_CREDITS
+
+/* ---- RFCOMM DLCs (channels) ---- */
+struct rfcomm_dlc *rfcomm_dlc_alloc(int prio);
+void rfcomm_dlc_free(struct rfcomm_dlc *d);
+int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
+int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
+int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
+int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
+int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
+
+#define rfcomm_dlc_lock(d) spin_lock(&d->lock)
+#define rfcomm_dlc_unlock(d) spin_unlock(&d->lock)
+
+static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d)
+{
+ atomic_inc(&d->refcnt);
+}
+
+static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
+{
+ if (atomic_dec_and_test(&d->refcnt))
+ rfcomm_dlc_free(d);
+}
+
+extern void FASTCALL(__rfcomm_dlc_throttle(struct rfcomm_dlc *d));
+extern void FASTCALL(__rfcomm_dlc_unthrottle(struct rfcomm_dlc *d));
+
+static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
+{
+ if (!test_and_set_bit(RFCOMM_RX_THROTTLED, &d->flags))
+ __rfcomm_dlc_throttle(d);
+}
+
+static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
+{
+ if (test_and_clear_bit(RFCOMM_RX_THROTTLED, &d->flags))
+ __rfcomm_dlc_unthrottle(d);
+}
+
+/* ---- RFCOMM sessions ---- */
+void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst);
+
+static inline void rfcomm_session_hold(struct rfcomm_session *s)
+{
+ atomic_inc(&s->refcnt);
+}
+
+/* ---- RFCOMM chechsum ---- */
+extern u8 rfcomm_crc_table[];
+
+/* ---- RFCOMM sockets ---- */
+struct sockaddr_rc {
+ sa_family_t rc_family;
+ bdaddr_t rc_bdaddr;
+ u8 rc_channel;
+};
+
+#define RFCOMM_CONNINFO 0x02
+struct rfcomm_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+#define RFCOMM_LM 0x03
+#define RFCOMM_LM_MASTER 0x0001
+#define RFCOMM_LM_AUTH 0x0002
+#define RFCOMM_LM_ENCRYPT 0x0004
+#define RFCOMM_LM_TRUSTED 0x0008
+#define RFCOMM_LM_RELIABLE 0x0010
+#define RFCOMM_LM_SECURE 0x0020
+
+#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk)
+
+struct rfcomm_pinfo {
+ struct bt_sock bt;
+ struct rfcomm_dlc *dlc;
+ u8 channel;
+ u32 link_mode;
+};
+
+int rfcomm_init_sockets(void);
+void rfcomm_cleanup_sockets(void);
+
+int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d);
+
+/* ---- RFCOMM TTY ---- */
+#define RFCOMM_MAX_DEV 256
+
+#define RFCOMMCREATEDEV _IOW('R', 200, int)
+#define RFCOMMRELEASEDEV _IOW('R', 201, int)
+#define RFCOMMGETDEVLIST _IOR('R', 210, int)
+#define RFCOMMGETDEVINFO _IOR('R', 211, int)
+#define RFCOMMSTEALDLC _IOW('R', 220, int)
+
+#define RFCOMM_REUSE_DLC 0
+#define RFCOMM_RELEASE_ONHUP 1
+#define RFCOMM_HANGUP_NOW 2
+#define RFCOMM_TTY_ATTACHED 3
+
+struct rfcomm_dev_req {
+ s16 dev_id;
+ u32 flags;
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+
+};
+
+struct rfcomm_dev_info {
+ s16 id;
+ u32 flags;
+ u16 state;
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+};
+
+struct rfcomm_dev_list_req {
+ u16 dev_num;
+ struct rfcomm_dev_info dev_info[0];
+};
+
+int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+int rfcomm_init_ttys(void);
+void rfcomm_cleanup_ttys(void);
+
+extern struct proc_dir_entry *proc_bt_rfcomm;
+
+#endif /* __RFCOMM_H */
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
new file mode 100644
index 000000000000..e28a2a771471
--- /dev/null
+++ b/include/net/bluetooth/sco.h
@@ -0,0 +1,79 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __SCO_H
+#define __SCO_H
+
+/* SCO defaults */
+#define SCO_DEFAULT_MTU 500
+#define SCO_DEFAULT_FLUSH_TO 0xFFFF
+
+#define SCO_CONN_TIMEOUT (HZ * 40)
+#define SCO_DISCONN_TIMEOUT (HZ * 2)
+#define SCO_CONN_IDLE_TIMEOUT (HZ * 60)
+
+/* SCO socket address */
+struct sockaddr_sco {
+ sa_family_t sco_family;
+ bdaddr_t sco_bdaddr;
+};
+
+/* SCO socket options */
+#define SCO_OPTIONS 0x01
+struct sco_options {
+ __u16 mtu;
+};
+
+#define SCO_CONNINFO 0x02
+struct sco_conninfo {
+ __u16 hci_handle;
+ __u8 dev_class[3];
+};
+
+/* ---- SCO connections ---- */
+struct sco_conn {
+ struct hci_conn *hcon;
+
+ bdaddr_t *dst;
+ bdaddr_t *src;
+
+ spinlock_t lock;
+ struct sock *sk;
+
+ unsigned int mtu;
+};
+
+#define sco_conn_lock(c) spin_lock(&c->lock);
+#define sco_conn_unlock(c) spin_unlock(&c->lock);
+
+/* ----- SCO socket info ----- */
+#define sco_pi(sk) ((struct sco_pinfo *) sk)
+
+struct sco_pinfo {
+ struct bt_sock bt;
+ __u32 flags;
+ struct sco_conn *conn;
+};
+
+#endif /* __SCO_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
new file mode 100644
index 000000000000..e3ea7cc2c728
--- /dev/null
+++ b/include/net/checksum.h
@@ -0,0 +1,87 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IP, TCP, UDP and so on
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CHECKSUM_H
+#define _CHECKSUM_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+
+#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+unsigned int csum_and_copy_from_user (const unsigned char __user *src, unsigned char *dst,
+ int len, int sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_READ, src, len))
+ return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return sum;
+}
+#endif
+
+#ifndef HAVE_CSUM_COPY_USER
+static __inline__ unsigned int csum_and_copy_to_user
+(const unsigned char *src, unsigned char __user *dst, int len, unsigned int sum, int *err_ptr)
+{
+ sum = csum_partial(src, len, sum);
+
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (copy_to_user(dst, src, len) == 0)
+ return sum;
+ }
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return -1; /* invalid checksum */
+}
+#endif
+
+static inline unsigned int csum_add(unsigned int csum, unsigned int addend)
+{
+ csum += addend;
+ return csum + (csum < addend);
+}
+
+static inline unsigned int csum_sub(unsigned int csum, unsigned int addend)
+{
+ return csum_add(csum, ~addend);
+}
+
+static inline unsigned int
+csum_block_add(unsigned int csum, unsigned int csum2, int offset)
+{
+ if (offset&1)
+ csum2 = ((csum2&0xFF00FF)<<8)+((csum2>>8)&0xFF00FF);
+ return csum_add(csum, csum2);
+}
+
+static inline unsigned int
+csum_block_sub(unsigned int csum, unsigned int csum2, int offset)
+{
+ if (offset&1)
+ csum2 = ((csum2&0xFF00FF)<<8)+((csum2>>8)&0xFF00FF);
+ return csum_sub(csum, csum2);
+}
+
+#endif
diff --git a/include/net/compat.h b/include/net/compat.h
new file mode 100644
index 000000000000..9983fd857804
--- /dev/null
+++ b/include/net/compat.h
@@ -0,0 +1,39 @@
+#ifndef NET_COMPAT_H
+#define NET_COMPAT_H
+
+#include <linux/config.h>
+
+#if defined(CONFIG_COMPAT)
+
+#include <linux/compat.h>
+
+struct compat_msghdr {
+ compat_uptr_t msg_name; /* void * */
+ compat_int_t msg_namelen;
+ compat_uptr_t msg_iov; /* struct compat_iovec * */
+ compat_size_t msg_iovlen;
+ compat_uptr_t msg_control; /* void * */
+ compat_size_t msg_controllen;
+ compat_uint_t msg_flags;
+};
+
+struct compat_cmsghdr {
+ compat_size_t cmsg_len;
+ compat_int_t cmsg_level;
+ compat_int_t cmsg_type;
+};
+
+#else /* defined(CONFIG_COMPAT) */
+#define compat_msghdr msghdr /* to avoid compiler warnings */
+#endif /* defined(CONFIG_COMPAT) */
+
+extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
+extern int verify_compat_iovec(struct msghdr *, struct iovec *, char *, int);
+extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
+extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
+extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
+extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, unsigned char *,
+ int);
+
+#endif /* NET_COMPAT_H */
diff --git a/include/net/datalink.h b/include/net/datalink.h
new file mode 100644
index 000000000000..5797ba3d2eb5
--- /dev/null
+++ b/include/net/datalink.h
@@ -0,0 +1,18 @@
+#ifndef _NET_INET_DATALINK_H_
+#define _NET_INET_DATALINK_H_
+
+struct datalink_proto {
+ unsigned char type[8];
+
+ struct llc_sap *sap;
+
+ unsigned short header_length;
+
+ int (*rcvfunc)(struct sk_buff *, struct net_device *,
+ struct packet_type *);
+ int (*request)(struct datalink_proto *, struct sk_buff *,
+ unsigned char *);
+ struct list_head node;
+};
+
+#endif
diff --git a/include/net/dn.h b/include/net/dn.h
new file mode 100644
index 000000000000..5551c46db397
--- /dev/null
+++ b/include/net/dn.h
@@ -0,0 +1,236 @@
+#ifndef _NET_DN_H
+#define _NET_DN_H
+
+#include <linux/dn.h>
+#include <net/sock.h>
+#include <asm/byteorder.h>
+
+typedef unsigned short dn_address;
+
+#define dn_ntohs(x) le16_to_cpu((unsigned short)(x))
+#define dn_htons(x) cpu_to_le16((unsigned short)(x))
+
+struct dn_scp /* Session Control Port */
+{
+ unsigned char state;
+#define DN_O 1 /* Open */
+#define DN_CR 2 /* Connect Receive */
+#define DN_DR 3 /* Disconnect Reject */
+#define DN_DRC 4 /* Discon. Rej. Complete*/
+#define DN_CC 5 /* Connect Confirm */
+#define DN_CI 6 /* Connect Initiate */
+#define DN_NR 7 /* No resources */
+#define DN_NC 8 /* No communication */
+#define DN_CD 9 /* Connect Delivery */
+#define DN_RJ 10 /* Rejected */
+#define DN_RUN 11 /* Running */
+#define DN_DI 12 /* Disconnect Initiate */
+#define DN_DIC 13 /* Disconnect Complete */
+#define DN_DN 14 /* Disconnect Notificat */
+#define DN_CL 15 /* Closed */
+#define DN_CN 16 /* Closed Notification */
+
+ unsigned short addrloc;
+ unsigned short addrrem;
+ unsigned short numdat;
+ unsigned short numoth;
+ unsigned short numoth_rcv;
+ unsigned short numdat_rcv;
+ unsigned short ackxmt_dat;
+ unsigned short ackxmt_oth;
+ unsigned short ackrcv_dat;
+ unsigned short ackrcv_oth;
+ unsigned char flowrem_sw;
+ unsigned char flowloc_sw;
+#define DN_SEND 2
+#define DN_DONTSEND 1
+#define DN_NOCHANGE 0
+ unsigned short flowrem_dat;
+ unsigned short flowrem_oth;
+ unsigned short flowloc_dat;
+ unsigned short flowloc_oth;
+ unsigned char services_rem;
+ unsigned char services_loc;
+ unsigned char info_rem;
+ unsigned char info_loc;
+
+ unsigned short segsize_rem;
+ unsigned short segsize_loc;
+
+ unsigned char nonagle;
+ unsigned char multi_ireq;
+ unsigned char accept_mode;
+ unsigned long seg_total; /* Running total of current segment */
+
+ struct optdata_dn conndata_in;
+ struct optdata_dn conndata_out;
+ struct optdata_dn discdata_in;
+ struct optdata_dn discdata_out;
+ struct accessdata_dn accessdata;
+
+ struct sockaddr_dn addr; /* Local address */
+ struct sockaddr_dn peer; /* Remote address */
+
+ /*
+ * In this case the RTT estimation is not specified in the
+ * docs, nor is any back off algorithm. Here we follow well
+ * known tcp algorithms with a few small variations.
+ *
+ * snd_window: Max number of packets we send before we wait for
+ * an ack to come back. This will become part of a
+ * more complicated scheme when we support flow
+ * control.
+ *
+ * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling
+ * average.
+ * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the
+ * varience of the smoothed average (but calculated in
+ * a simpler way than for normal statistical varience
+ * calculations).
+ *
+ * nsp_rxtshift: Backoff counter. Value is zero normally, each time
+ * a packet is lost is increases by one until an ack
+ * is received. Its used to index an array of backoff
+ * multipliers.
+ */
+#define NSP_MIN_WINDOW 1
+#define NSP_MAX_WINDOW (0x07fe)
+ unsigned long max_window;
+ unsigned long snd_window;
+#define NSP_INITIAL_SRTT (HZ)
+ unsigned long nsp_srtt;
+#define NSP_INITIAL_RTTVAR (HZ*3)
+ unsigned long nsp_rttvar;
+#define NSP_MAXRXTSHIFT 12
+ unsigned long nsp_rxtshift;
+
+ /*
+ * Output queues, one for data, one for otherdata/linkservice
+ */
+ struct sk_buff_head data_xmit_queue;
+ struct sk_buff_head other_xmit_queue;
+
+ /*
+ * Input queue for other data
+ */
+ struct sk_buff_head other_receive_queue;
+ int other_report;
+
+ /*
+ * Stuff to do with the slow timer
+ */
+ unsigned long stamp; /* time of last transmit */
+ unsigned long persist;
+ int (*persist_fxn)(struct sock *sk);
+ unsigned long keepalive;
+ void (*keepalive_fxn)(struct sock *sk);
+
+ /*
+ * This stuff is for the fast timer for delayed acks
+ */
+ struct timer_list delack_timer;
+ int delack_pending;
+ void (*delack_fxn)(struct sock *sk);
+
+};
+
+static inline struct dn_scp *DN_SK(struct sock *sk)
+{
+ return (struct dn_scp *)(sk + 1);
+}
+
+/*
+ * src,dst : Source and Destination DECnet addresses
+ * hops : Number of hops through the network
+ * dst_port, src_port : NSP port numbers
+ * services, info : Useful data extracted from conninit messages
+ * rt_flags : Routing flags byte
+ * nsp_flags : NSP layer flags byte
+ * segsize : Size of segment
+ * segnum : Number, for data, otherdata and linkservice
+ * xmit_count : Number of times we've transmitted this skb
+ * stamp : Time stamp of most recent transmission, used in RTT calculations
+ * iif: Input interface number
+ *
+ * As a general policy, this structure keeps all addresses in network
+ * byte order, and all else in host byte order. Thus dst, src, dst_port
+ * and src_port are in network order. All else is in host order.
+ *
+ */
+#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
+struct dn_skb_cb {
+ unsigned short dst;
+ unsigned short src;
+ unsigned short hops;
+ unsigned short dst_port;
+ unsigned short src_port;
+ unsigned char services;
+ unsigned char info;
+ unsigned char rt_flags;
+ unsigned char nsp_flags;
+ unsigned short segsize;
+ unsigned short segnum;
+ unsigned short xmit_count;
+ unsigned long stamp;
+ int iif;
+};
+
+static inline dn_address dn_eth2dn(unsigned char *ethaddr)
+{
+ return ethaddr[4] | (ethaddr[5] << 8);
+}
+
+static inline dn_address dn_saddr2dn(struct sockaddr_dn *saddr)
+{
+ return *(dn_address *)saddr->sdn_nodeaddr;
+}
+
+static inline void dn_dn2eth(unsigned char *ethaddr, dn_address addr)
+{
+ ethaddr[0] = 0xAA;
+ ethaddr[1] = 0x00;
+ ethaddr[2] = 0x04;
+ ethaddr[3] = 0x00;
+ ethaddr[4] = (unsigned char)(addr & 0xff);
+ ethaddr[5] = (unsigned char)(addr >> 8);
+}
+
+static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
+{
+ fl->uli_u.dnports.sport = scp->addrloc;
+ fl->uli_u.dnports.dport = scp->addrrem;
+ fl->uli_u.dnports.objnum = scp->addr.sdn_objnum;
+ if (fl->uli_u.dnports.objnum == 0) {
+ fl->uli_u.dnports.objnamel = scp->addr.sdn_objnamel;
+ memcpy(fl->uli_u.dnports.objname, scp->addr.sdn_objname, 16);
+ }
+}
+
+extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
+
+#define DN_MENUVER_ACC 0x01
+#define DN_MENUVER_USR 0x02
+#define DN_MENUVER_PRX 0x04
+#define DN_MENUVER_UIC 0x08
+
+extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
+extern struct sock *dn_find_by_skb(struct sk_buff *skb);
+#define DN_ASCBUF_LEN 9
+extern char *dn_addr2asc(dn_address, char *);
+extern int dn_destroy_timer(struct sock *sk);
+
+extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
+extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
+
+extern void dn_start_slow_timer(struct sock *sk);
+extern void dn_stop_slow_timer(struct sock *sk);
+
+extern dn_address decnet_address;
+extern int decnet_debug_level;
+extern int decnet_time_wait;
+extern int decnet_dn_count;
+extern int decnet_di_count;
+extern int decnet_dr_count;
+extern int decnet_no_fc_max_cwnd;
+
+#endif /* _NET_DN_H */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
new file mode 100644
index 000000000000..86e8e86e624a
--- /dev/null
+++ b/include/net/dn_dev.h
@@ -0,0 +1,194 @@
+#ifndef _NET_DN_DEV_H
+#define _NET_DN_DEV_H
+
+
+struct dn_dev;
+
+struct dn_ifaddr {
+ struct dn_ifaddr *ifa_next;
+ struct dn_dev *ifa_dev;
+ dn_address ifa_local;
+ dn_address ifa_address;
+ unsigned char ifa_flags;
+ unsigned char ifa_scope;
+ char ifa_label[IFNAMSIZ];
+};
+
+#define DN_DEV_S_RU 0 /* Run - working normally */
+#define DN_DEV_S_CR 1 /* Circuit Rejected */
+#define DN_DEV_S_DS 2 /* Data Link Start */
+#define DN_DEV_S_RI 3 /* Routing Layer Initialize */
+#define DN_DEV_S_RV 4 /* Routing Layer Verify */
+#define DN_DEV_S_RC 5 /* Routing Layer Complete */
+#define DN_DEV_S_OF 6 /* Off */
+#define DN_DEV_S_HA 7 /* Halt */
+
+
+/*
+ * The dn_dev_parms structure contains the set of parameters
+ * for each device (hence inclusion in the dn_dev structure)
+ * and an array is used to store the default types of supported
+ * device (in dn_dev.c).
+ *
+ * The type field matches the ARPHRD_ constants and is used in
+ * searching the list for supported devices when new devices
+ * come up.
+ *
+ * The mode field is used to find out if a device is broadcast,
+ * multipoint, or pointopoint. Please note that DECnet thinks
+ * different ways about devices to the rest of the kernel
+ * so the normal IFF_xxx flags are invalid here. For devices
+ * which can be any combination of the previously mentioned
+ * attributes, you can set this on a per device basis by
+ * installing an up() routine.
+ *
+ * The device state field, defines the initial state in which the
+ * device will come up. In the dn_dev structure, it is the actual
+ * state.
+ *
+ * Things have changed here. I've killed timer1 since it's a user space
+ * issue for a user space routing deamon to sort out. The kernel does
+ * not need to be bothered with it.
+ *
+ * Timers:
+ * t2 - Rate limit timer, min time between routing and hello messages
+ * t3 - Hello timer, send hello messages when it expires
+ *
+ * Callbacks:
+ * up() - Called to initialize device, return value can veto use of
+ * device with DECnet.
+ * down() - Called to turn device off when it goes down
+ * timer3() - Called once for each ifaddr when timer 3 goes off
+ *
+ * sysctl - Hook for sysctl things
+ *
+ */
+struct dn_dev_parms {
+ int type; /* ARPHRD_xxx */
+ int mode; /* Broadcast, Unicast, Mulitpoint */
+#define DN_DEV_BCAST 1
+#define DN_DEV_UCAST 2
+#define DN_DEV_MPOINT 4
+ int state; /* Initial state */
+ int forwarding; /* 0=EndNode, 1=L1Router, 2=L2Router */
+ unsigned long t2; /* Default value of t2 */
+ unsigned long t3; /* Default value of t3 */
+ int priority; /* Priority to be a router */
+ char *name; /* Name for sysctl */
+ int ctl_name; /* Index for sysctl */
+ int (*up)(struct net_device *);
+ void (*down)(struct net_device *);
+ void (*timer3)(struct net_device *, struct dn_ifaddr *ifa);
+ void *sysctl;
+};
+
+
+struct dn_dev {
+ struct dn_ifaddr *ifa_list;
+ struct net_device *dev;
+ struct dn_dev_parms parms;
+ char use_long;
+ struct timer_list timer;
+ unsigned long t3;
+ struct neigh_parms *neigh_parms;
+ unsigned char addr[ETH_ALEN];
+ struct neighbour *router; /* Default router on circuit */
+ struct neighbour *peer; /* Peer on pointopoint links */
+ unsigned long uptime; /* Time device went up in jiffies */
+};
+
+struct dn_short_packet
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned short dstnode __attribute__((packed));
+ unsigned short srcnode __attribute__((packed));
+ unsigned char forward __attribute__((packed));
+};
+
+struct dn_long_packet
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned char d_area __attribute__((packed));
+ unsigned char d_subarea __attribute__((packed));
+ unsigned char d_id[6] __attribute__((packed));
+ unsigned char s_area __attribute__((packed));
+ unsigned char s_subarea __attribute__((packed));
+ unsigned char s_id[6] __attribute__((packed));
+ unsigned char nl2 __attribute__((packed));
+ unsigned char visit_ct __attribute__((packed));
+ unsigned char s_class __attribute__((packed));
+ unsigned char pt __attribute__((packed));
+};
+
+/*------------------------- DRP - Routing messages ---------------------*/
+
+struct endnode_hello_message
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned char tiver[3] __attribute__((packed));
+ unsigned char id[6] __attribute__((packed));
+ unsigned char iinfo __attribute__((packed));
+ unsigned short blksize __attribute__((packed));
+ unsigned char area __attribute__((packed));
+ unsigned char seed[8] __attribute__((packed));
+ unsigned char neighbor[6] __attribute__((packed));
+ unsigned short timer __attribute__((packed));
+ unsigned char mpd __attribute__((packed));
+ unsigned char datalen __attribute__((packed));
+ unsigned char data[2] __attribute__((packed));
+};
+
+struct rtnode_hello_message
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned char tiver[3] __attribute__((packed));
+ unsigned char id[6] __attribute__((packed));
+ unsigned char iinfo __attribute__((packed));
+ unsigned short blksize __attribute__((packed));
+ unsigned char priority __attribute__((packed));
+ unsigned char area __attribute__((packed));
+ unsigned short timer __attribute__((packed));
+ unsigned char mpd __attribute__((packed));
+};
+
+
+extern void dn_dev_init(void);
+extern void dn_dev_cleanup(void);
+
+extern int dn_dev_ioctl(unsigned int cmd, void __user *arg);
+
+extern void dn_dev_devices_off(void);
+extern void dn_dev_devices_on(void);
+
+extern void dn_dev_init_pkt(struct sk_buff *skb);
+extern void dn_dev_veri_pkt(struct sk_buff *skb);
+extern void dn_dev_hello(struct sk_buff *skb);
+
+extern void dn_dev_up(struct net_device *);
+extern void dn_dev_down(struct net_device *);
+
+extern int dn_dev_set_default(struct net_device *dev, int force);
+extern struct net_device *dn_dev_get_default(void);
+extern int dn_dev_bind_default(dn_address *addr);
+
+extern int register_dnaddr_notifier(struct notifier_block *nb);
+extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+
+static inline int dn_dev_islocal(struct net_device *dev, dn_address addr)
+{
+ struct dn_dev *dn_db = dev->dn_ptr;
+ struct dn_ifaddr *ifa;
+
+ if (dn_db == NULL) {
+ printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
+ return 0;
+ }
+
+ for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next)
+ if ((addr ^ ifa->ifa_local) == 0)
+ return 1;
+
+ return 0;
+}
+
+#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
new file mode 100644
index 000000000000..cd3c96d9601b
--- /dev/null
+++ b/include/net/dn_fib.h
@@ -0,0 +1,204 @@
+#ifndef _NET_DN_FIB_H
+#define _NET_DN_FIB_H
+
+/* WARNING: The ordering of these elements must match ordering
+ * of RTA_* rtnetlink attribute numbers.
+ */
+struct dn_kern_rta
+{
+ void *rta_dst;
+ void *rta_src;
+ int *rta_iif;
+ int *rta_oif;
+ void *rta_gw;
+ u32 *rta_priority;
+ void *rta_prefsrc;
+ struct rtattr *rta_mx;
+ struct rtattr *rta_mp;
+ unsigned char *rta_protoinfo;
+ u32 *rta_flow;
+ struct rta_cacheinfo *rta_ci;
+ struct rta_session *rta_sess;
+};
+
+struct dn_fib_res {
+ struct dn_fib_rule *r;
+ struct dn_fib_info *fi;
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+};
+
+struct dn_fib_nh {
+ struct net_device *nh_dev;
+ unsigned nh_flags;
+ unsigned char nh_scope;
+ int nh_weight;
+ int nh_power;
+ int nh_oif;
+ u32 nh_gw;
+};
+
+struct dn_fib_info {
+ struct dn_fib_info *fib_next;
+ struct dn_fib_info *fib_prev;
+ int fib_treeref;
+ atomic_t fib_clntref;
+ int fib_dead;
+ unsigned fib_flags;
+ int fib_protocol;
+ dn_address fib_prefsrc;
+ __u32 fib_priority;
+ __u32 fib_metrics[RTAX_MAX];
+#define dn_fib_mtu fib_metrics[RTAX_MTU-1]
+#define dn_fib_window fib_metrics[RTAX_WINDOW-1]
+#define dn_fib_rtt fib_metrics[RTAX_RTT-1]
+#define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1]
+ int fib_nhs;
+ int fib_power;
+ struct dn_fib_nh fib_nh[0];
+#define dn_fib_dev fib_nh[0].nh_dev
+};
+
+
+#define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
+#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
+
+#define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
+#define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
+#define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
+#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
+
+typedef struct {
+ u16 datum;
+} dn_fib_key_t;
+
+typedef struct {
+ u16 datum;
+} dn_fib_hash_t;
+
+typedef struct {
+ u16 datum;
+} dn_fib_idx_t;
+
+struct dn_fib_node {
+ struct dn_fib_node *fn_next;
+ struct dn_fib_info *fn_info;
+#define DN_FIB_INFO(f) ((f)->fn_info)
+ dn_fib_key_t fn_key;
+ u8 fn_type;
+ u8 fn_scope;
+ u8 fn_state;
+};
+
+
+struct dn_fib_table {
+ int n;
+
+ int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
+ struct dn_kern_rta *rta, struct nlmsghdr *n,
+ struct netlink_skb_parms *req);
+ int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
+ struct dn_kern_rta *rta, struct nlmsghdr *n,
+ struct netlink_skb_parms *req);
+ int (*lookup)(struct dn_fib_table *t, const struct flowi *fl,
+ struct dn_fib_res *res);
+ int (*flush)(struct dn_fib_table *t);
+ int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
+
+ unsigned char data[0];
+};
+
+#ifdef CONFIG_DECNET_ROUTER
+/*
+ * dn_fib.c
+ */
+extern void dn_fib_init(void);
+extern void dn_fib_cleanup(void);
+
+extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
+ struct dn_kern_rta *rta,
+ const struct nlmsghdr *nlh, int *errp);
+extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
+ const struct flowi *fl,
+ struct dn_fib_res *res);
+extern void dn_fib_release_info(struct dn_fib_info *fi);
+extern u16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
+extern void dn_fib_flush(void);
+extern void dn_fib_select_multipath(const struct flowi *fl,
+ struct dn_fib_res *res);
+extern int dn_fib_sync_down(dn_address local, struct net_device *dev,
+ int force);
+extern int dn_fib_sync_up(struct net_device *dev);
+
+/*
+ * dn_tables.c
+ */
+extern struct dn_fib_table *dn_fib_get_table(int n, int creat);
+extern struct dn_fib_table *dn_fib_empty_table(void);
+extern void dn_fib_table_init(void);
+extern void dn_fib_table_cleanup(void);
+
+/*
+ * dn_rules.c
+ */
+extern void dn_fib_rules_init(void);
+extern void dn_fib_rules_cleanup(void);
+extern void dn_fib_rule_put(struct dn_fib_rule *);
+extern __u16 dn_fib_rules_policy(__u16 saddr, struct dn_fib_res *res, unsigned *flags);
+extern unsigned dnet_addr_type(__u16 addr);
+extern int dn_fib_lookup(const struct flowi *fl, struct dn_fib_res *res);
+
+/*
+ * rtnetlink interface
+ */
+extern int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
+
+extern int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb);
+
+extern void dn_fib_free_info(struct dn_fib_info *fi);
+
+static inline void dn_fib_info_put(struct dn_fib_info *fi)
+{
+ if (atomic_dec_and_test(&fi->fib_clntref))
+ dn_fib_free_info(fi);
+}
+
+static inline void dn_fib_res_put(struct dn_fib_res *res)
+{
+ if (res->fi)
+ dn_fib_info_put(res->fi);
+ if (res->r)
+ dn_fib_rule_put(res->r);
+}
+
+extern struct dn_fib_table *dn_fib_tables[];
+
+#else /* Endnode */
+
+#define dn_fib_init() do { } while(0)
+#define dn_fib_cleanup() do { } while(0)
+
+#define dn_fib_lookup(fl, res) (-ESRCH)
+#define dn_fib_info_put(fi) do { } while(0)
+#define dn_fib_select_multipath(fl, res) do { } while(0)
+#define dn_fib_rules_policy(saddr,res,flags) (0)
+#define dn_fib_res_put(res) do { } while(0)
+
+#endif /* CONFIG_DECNET_ROUTER */
+
+static inline u16 dnet_make_mask(int n)
+{
+ if (n)
+ return htons(~((1<<(16-n))-1));
+ return 0;
+}
+
+#endif /* _NET_DN_FIB_H */
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
new file mode 100644
index 000000000000..4b1eb038d637
--- /dev/null
+++ b/include/net/dn_neigh.h
@@ -0,0 +1,28 @@
+#ifndef _NET_DN_NEIGH_H
+#define _NET_DN_NEIGH_H
+
+/*
+ * The position of the first two fields of
+ * this structure are critical - SJW
+ */
+struct dn_neigh {
+ struct neighbour n;
+ dn_address addr;
+ unsigned long flags;
+#define DN_NDFLAG_R1 0x0001 /* Router L1 */
+#define DN_NDFLAG_R2 0x0002 /* Router L2 */
+#define DN_NDFLAG_P3 0x0004 /* Phase III Node */
+ unsigned long blksize;
+ unsigned char priority;
+};
+
+extern void dn_neigh_init(void);
+extern void dn_neigh_cleanup(void);
+extern int dn_neigh_router_hello(struct sk_buff *skb);
+extern int dn_neigh_endnode_hello(struct sk_buff *skb);
+extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
+extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+
+extern struct neigh_table dn_neigh_table;
+
+#endif /* _NET_DN_NEIGH_H */
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
new file mode 100644
index 000000000000..6bbeafa73e8b
--- /dev/null
+++ b/include/net/dn_nsp.h
@@ -0,0 +1,209 @@
+#ifndef _NET_DN_NSP_H
+#define _NET_DN_NSP_H
+/******************************************************************************
+ (c) 1995-1998 E.M. Serrat emserrat@geocities.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*******************************************************************************/
+/* dn_nsp.c functions prototyping */
+
+extern void dn_nsp_send_data_ack(struct sock *sk);
+extern void dn_nsp_send_oth_ack(struct sock *sk);
+extern void dn_nsp_delayed_ack(struct sock *sk);
+extern void dn_send_conn_ack(struct sock *sk);
+extern void dn_send_conn_conf(struct sock *sk, int gfp);
+extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
+ unsigned short reason, int gfp);
+extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
+ unsigned short reason);
+extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
+extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
+
+extern void dn_nsp_output(struct sock *sk);
+extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
+extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oob);
+extern unsigned long dn_nsp_persist(struct sock *sk);
+extern int dn_nsp_xmit_timeout(struct sock *sk);
+
+extern int dn_nsp_rx(struct sk_buff *);
+extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri);
+extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
+
+#define NSP_REASON_OK 0 /* No error */
+#define NSP_REASON_NR 1 /* No resources */
+#define NSP_REASON_UN 2 /* Unrecognised node name */
+#define NSP_REASON_SD 3 /* Node shutting down */
+#define NSP_REASON_ID 4 /* Invalid destination end user */
+#define NSP_REASON_ER 5 /* End user lacks resources */
+#define NSP_REASON_OB 6 /* Object too busy */
+#define NSP_REASON_US 7 /* Unspecified error */
+#define NSP_REASON_TP 8 /* Third-Party abort */
+#define NSP_REASON_EA 9 /* End user has aborted the link */
+#define NSP_REASON_IF 10 /* Invalid node name format */
+#define NSP_REASON_LS 11 /* Local node shutdown */
+#define NSP_REASON_LL 32 /* Node lacks logical-link resources */
+#define NSP_REASON_LE 33 /* End user lacks logical-link resources */
+#define NSP_REASON_UR 34 /* Unacceptable RQSTRID or PASSWORD field */
+#define NSP_REASON_UA 36 /* Unacceptable ACCOUNT field */
+#define NSP_REASON_TM 38 /* End user timed out logical link */
+#define NSP_REASON_NU 39 /* Node unreachable */
+#define NSP_REASON_NL 41 /* No-link message */
+#define NSP_REASON_DC 42 /* Disconnect confirm */
+#define NSP_REASON_IO 43 /* Image data field overflow */
+
+#define NSP_DISCINIT 0x38
+#define NSP_DISCCONF 0x48
+
+/*------------------------- NSP - messages ------------------------------*/
+/* Data Messages */
+/*---------------*/
+
+/* Data Messages (data segment/interrupt/link service) */
+
+struct nsp_data_seg_msg
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned short dstaddr __attribute__((packed));
+ unsigned short srcaddr __attribute__((packed));
+};
+
+struct nsp_data_opt_msg
+{
+ unsigned short acknum __attribute__((packed));
+ unsigned short segnum __attribute__((packed));
+ unsigned short lsflgs __attribute__((packed));
+};
+
+struct nsp_data_opt_msg1
+{
+ unsigned short acknum __attribute__((packed));
+ unsigned short segnum __attribute__((packed));
+};
+
+
+/* Acknowledgment Message (data/other data) */
+struct nsp_data_ack_msg
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned short dstaddr __attribute__((packed));
+ unsigned short srcaddr __attribute__((packed));
+ unsigned short acknum __attribute__((packed));
+};
+
+/* Connect Acknowledgment Message */
+struct nsp_conn_ack_msg
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned short dstaddr __attribute__((packed));
+};
+
+
+/* Connect Initiate/Retransmit Initiate/Connect Confirm */
+struct nsp_conn_init_msg
+{
+ unsigned char msgflg __attribute__((packed));
+#define NSP_CI 0x18 /* Connect Initiate */
+#define NSP_RCI 0x68 /* Retrans. Conn Init */
+ unsigned short dstaddr __attribute__((packed));
+ unsigned short srcaddr __attribute__((packed));
+ unsigned char services __attribute__((packed));
+#define NSP_FC_NONE 0x00 /* Flow Control None */
+#define NSP_FC_SRC 0x04 /* Seg Req. Count */
+#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
+#define NSP_FC_MASK 0x0c /* FC type mask */
+ unsigned char info __attribute__((packed));
+ unsigned short segsize __attribute__((packed));
+};
+
+/* Disconnect Initiate/Disconnect Confirm */
+struct nsp_disconn_init_msg
+{
+ unsigned char msgflg __attribute__((packed));
+ unsigned short dstaddr __attribute__((packed));
+ unsigned short srcaddr __attribute__((packed));
+ unsigned short reason __attribute__((packed));
+};
+
+
+
+struct srcobj_fmt
+{
+ char format __attribute__((packed));
+ unsigned char task __attribute__((packed));
+ unsigned short grpcode __attribute__((packed));
+ unsigned short usrcode __attribute__((packed));
+ char dlen __attribute__((packed));
+};
+
+/*
+ * A collection of functions for manipulating the sequence
+ * numbers used in NSP. Similar in operation to the functions
+ * of the same name in TCP.
+ */
+static __inline__ int dn_before(unsigned short seq1, unsigned short seq2)
+{
+ seq1 &= 0x0fff;
+ seq2 &= 0x0fff;
+
+ return (int)((seq1 - seq2) & 0x0fff) > 2048;
+}
+
+
+static __inline__ int dn_after(unsigned short seq1, unsigned short seq2)
+{
+ seq1 &= 0x0fff;
+ seq2 &= 0x0fff;
+
+ return (int)((seq2 - seq1) & 0x0fff) > 2048;
+}
+
+static __inline__ int dn_equal(unsigned short seq1, unsigned short seq2)
+{
+ return ((seq1 ^ seq2) & 0x0fff) == 0;
+}
+
+static __inline__ int dn_before_or_equal(unsigned short seq1, unsigned short seq2)
+{
+ return (dn_before(seq1, seq2) || dn_equal(seq1, seq2));
+}
+
+static __inline__ void seq_add(unsigned short *seq, unsigned short off)
+{
+ (*seq) += off;
+ (*seq) &= 0x0fff;
+}
+
+static __inline__ int seq_next(unsigned short seq1, unsigned short seq2)
+{
+ return dn_equal(seq1 + 1, seq2);
+}
+
+/*
+ * Can we delay the ack ?
+ */
+static __inline__ int sendack(unsigned short seq)
+{
+ return (int)((seq & 0x1000) ? 0 : 1);
+}
+
+/*
+ * Is socket congested ?
+ */
+static __inline__ int dn_congested(struct sock *sk)
+{
+ return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
+}
+
+#define DN_MAX_NSP_DATA_HEADER (11)
+
+#endif /* _NET_DN_NSP_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
new file mode 100644
index 000000000000..d084721db198
--- /dev/null
+++ b/include/net/dn_route.h
@@ -0,0 +1,112 @@
+#ifndef _NET_DN_ROUTE_H
+#define _NET_DN_ROUTE_H
+
+/******************************************************************************
+ (c) 1995-1998 E.M. Serrat emserrat@geocities.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*******************************************************************************/
+
+extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri);
+extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
+extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
+extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern void dn_rt_cache_flush(int delay);
+
+/* Masks for flags field */
+#define DN_RT_F_PID 0x07 /* Mask for packet type */
+#define DN_RT_F_PF 0x80 /* Padding Follows */
+#define DN_RT_F_VER 0x40 /* Version =0 discard packet if ==1 */
+#define DN_RT_F_IE 0x20 /* Intra Ethernet, Reserved in short pkt */
+#define DN_RT_F_RTS 0x10 /* Packet is being returned to sender */
+#define DN_RT_F_RQR 0x08 /* Return packet to sender upon non-delivery */
+
+/* Mask for types of routing packets */
+#define DN_RT_PKT_MSK 0x06
+/* Types of routing packets */
+#define DN_RT_PKT_SHORT 0x02 /* Short routing packet */
+#define DN_RT_PKT_LONG 0x06 /* Long routing packet */
+
+/* Mask for control/routing selection */
+#define DN_RT_PKT_CNTL 0x01 /* Set to 1 if a control packet */
+/* Types of control packets */
+#define DN_RT_CNTL_MSK 0x0f /* Mask for control packets */
+#define DN_RT_PKT_INIT 0x01 /* Initialisation packet */
+#define DN_RT_PKT_VERI 0x03 /* Verification Message */
+#define DN_RT_PKT_HELO 0x05 /* Hello and Test Message */
+#define DN_RT_PKT_L1RT 0x07 /* Level 1 Routing Message */
+#define DN_RT_PKT_L2RT 0x09 /* Level 2 Routing Message */
+#define DN_RT_PKT_ERTH 0x0b /* Ethernet Router Hello */
+#define DN_RT_PKT_EEDH 0x0d /* Ethernet EndNode Hello */
+
+/* Values for info field in hello message */
+#define DN_RT_INFO_TYPE 0x03 /* Type mask */
+#define DN_RT_INFO_L1RT 0x02 /* L1 Router */
+#define DN_RT_INFO_L2RT 0x01 /* L2 Router */
+#define DN_RT_INFO_ENDN 0x03 /* EndNode */
+#define DN_RT_INFO_VERI 0x04 /* Verification Reqd. */
+#define DN_RT_INFO_RJCT 0x08 /* Reject Flag, Reserved */
+#define DN_RT_INFO_VFLD 0x10 /* Verification Failed, Reserved */
+#define DN_RT_INFO_NOML 0x20 /* No Multicast traffic accepted */
+#define DN_RT_INFO_BLKR 0x40 /* Blocking Requested */
+
+/*
+ * The fl structure is what we used to look up the route.
+ * The rt_saddr & rt_daddr entries are the same as key.saddr & key.daddr
+ * except for local input routes, where the rt_saddr = fl.fld_dst and
+ * rt_daddr = fl.fld_src to allow the route to be used for returning
+ * packets to the originating host.
+ */
+struct dn_route {
+ union {
+ struct dst_entry dst;
+ struct dn_route *rt_next;
+ } u;
+
+ __u16 rt_saddr;
+ __u16 rt_daddr;
+ __u16 rt_gateway;
+ __u16 rt_local_src; /* Source used for forwarding packets */
+ __u16 rt_src_map;
+ __u16 rt_dst_map;
+
+ unsigned rt_flags;
+ unsigned rt_type;
+
+ struct flowi fl;
+};
+
+extern void dn_route_init(void);
+extern void dn_route_cleanup(void);
+
+#include <net/sock.h>
+#include <linux/if_arp.h>
+
+static inline void dn_rt_send(struct sk_buff *skb)
+{
+ dev_queue_xmit(skb);
+}
+
+static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src)
+{
+ struct net_device *dev = skb->dev;
+
+ if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
+ dst = NULL;
+
+ if (!dev->hard_header || (dev->hard_header(skb, dev, ETH_P_DNA_RT,
+ dst, src, skb->len) >= 0))
+ dn_rt_send(skb);
+ else
+ kfree_skb(skb);
+}
+
+#endif /* _NET_DN_ROUTE_H */
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
new file mode 100644
index 000000000000..a79c9e075f7f
--- /dev/null
+++ b/include/net/dsfield.h
@@ -0,0 +1,54 @@
+/* include/net/dsfield.h - Manipulation of the Differentiated Services field */
+
+/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
+
+
+#ifndef __NET_DSFIELD_H
+#define __NET_DSFIELD_H
+
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <asm/byteorder.h>
+
+
+static inline __u8 ipv4_get_dsfield(struct iphdr *iph)
+{
+ return iph->tos;
+}
+
+
+static inline __u8 ipv6_get_dsfield(struct ipv6hdr *ipv6h)
+{
+ return ntohs(*(__u16 *) ipv6h) >> 4;
+}
+
+
+static inline void ipv4_change_dsfield(struct iphdr *iph,__u8 mask,
+ __u8 value)
+{
+ __u32 check = ntohs(iph->check);
+ __u8 dsfield;
+
+ dsfield = (iph->tos & mask) | value;
+ check += iph->tos;
+ if ((check+1) >> 16) check = (check+1) & 0xffff;
+ check -= dsfield;
+ check += check >> 16; /* adjust carry */
+ iph->check = htons(check);
+ iph->tos = dsfield;
+}
+
+
+static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
+ __u8 value)
+{
+ __u16 tmp;
+
+ tmp = ntohs(*(__u16 *) ipv6h);
+ tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4);
+ *(__u16 *) ipv6h = htons(tmp);
+}
+
+
+#endif
diff --git a/include/net/dst.h b/include/net/dst.h
new file mode 100644
index 000000000000..50adc9136530
--- /dev/null
+++ b/include/net/dst.h
@@ -0,0 +1,279 @@
+/*
+ * net/dst.h Protocol independent destination cache definitions.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ */
+
+#ifndef _NET_DST_H
+#define _NET_DST_H
+
+#include <linux/config.h>
+#include <linux/rtnetlink.h>
+#include <linux/rcupdate.h>
+#include <linux/jiffies.h>
+#include <net/neighbour.h>
+#include <asm/processor.h>
+
+/*
+ * 0 - no debugging messages
+ * 1 - rare events and bugs (default)
+ * 2 - trace mode.
+ */
+#define RT_CACHE_DEBUG 0
+
+#define DST_GC_MIN (HZ/10)
+#define DST_GC_INC (HZ/2)
+#define DST_GC_MAX (120*HZ)
+
+/* Each dst_entry has reference count and sits in some parent list(s).
+ * When it is removed from parent list, it is "freed" (dst_free).
+ * After this it enters dead state (dst->obsolete > 0) and if its refcnt
+ * is zero, it can be destroyed immediately, otherwise it is added
+ * to gc list and garbage collector periodically checks the refcnt.
+ */
+
+struct sk_buff;
+
+struct dst_entry
+{
+ struct dst_entry *next;
+ atomic_t __refcnt; /* client references */
+ int __use;
+ struct dst_entry *child;
+ struct net_device *dev;
+ int obsolete;
+ int flags;
+#define DST_HOST 1
+#define DST_NOXFRM 2
+#define DST_NOPOLICY 4
+#define DST_NOHASH 8
+#define DST_BALANCED 0x10
+ unsigned long lastuse;
+ unsigned long expires;
+
+ unsigned short header_len; /* more space at head required */
+ unsigned short trailer_len; /* space to reserve at tail */
+
+ u32 metrics[RTAX_MAX];
+ struct dst_entry *path;
+
+ unsigned long rate_last; /* rate limiting for ICMP */
+ unsigned long rate_tokens;
+
+ int error;
+
+ struct neighbour *neighbour;
+ struct hh_cache *hh;
+ struct xfrm_state *xfrm;
+
+ int (*input)(struct sk_buff*);
+ int (*output)(struct sk_buff*);
+
+#ifdef CONFIG_NET_CLS_ROUTE
+ __u32 tclassid;
+#endif
+
+ struct dst_ops *ops;
+ struct rcu_head rcu_head;
+
+ char info[0];
+};
+
+
+struct dst_ops
+{
+ unsigned short family;
+ unsigned short protocol;
+ unsigned gc_thresh;
+
+ int (*gc)(void);
+ struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
+ void (*destroy)(struct dst_entry *);
+ void (*ifdown)(struct dst_entry *,
+ struct net_device *dev, int how);
+ struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*link_failure)(struct sk_buff *);
+ void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
+ int (*get_mss)(struct dst_entry *dst, u32 mtu);
+ int entry_size;
+
+ atomic_t entries;
+ kmem_cache_t *kmem_cachep;
+};
+
+#ifdef __KERNEL__
+
+static inline u32
+dst_metric(const struct dst_entry *dst, int metric)
+{
+ return dst->metrics[metric-1];
+}
+
+static inline u32 dst_mtu(const struct dst_entry *dst)
+{
+ u32 mtu = dst_metric(dst, RTAX_MTU);
+ /*
+ * Alexey put it here, so ask him about it :)
+ */
+ barrier();
+ return mtu;
+}
+
+static inline u32
+dst_allfrag(const struct dst_entry *dst)
+{
+ int ret = dst_metric(dst, RTAX_FEATURES) & RTAX_FEATURE_ALLFRAG;
+ /* Yes, _exactly_. This is paranoia. */
+ barrier();
+ return ret;
+}
+
+static inline int
+dst_metric_locked(struct dst_entry *dst, int metric)
+{
+ return dst_metric(dst, RTAX_LOCK) & (1<<metric);
+}
+
+static inline void dst_hold(struct dst_entry * dst)
+{
+ atomic_inc(&dst->__refcnt);
+}
+
+static inline
+struct dst_entry * dst_clone(struct dst_entry * dst)
+{
+ if (dst)
+ atomic_inc(&dst->__refcnt);
+ return dst;
+}
+
+static inline
+void dst_release(struct dst_entry * dst)
+{
+ if (dst) {
+ WARN_ON(atomic_read(&dst->__refcnt) < 1);
+ smp_mb__before_atomic_dec();
+ atomic_dec(&dst->__refcnt);
+ }
+}
+
+/* Children define the path of the packet through the
+ * Linux networking. Thus, destinations are stackable.
+ */
+
+static inline struct dst_entry *dst_pop(struct dst_entry *dst)
+{
+ struct dst_entry *child = dst_clone(dst->child);
+
+ dst_release(dst);
+ return child;
+}
+
+extern void * dst_alloc(struct dst_ops * ops);
+extern void __dst_free(struct dst_entry * dst);
+extern struct dst_entry *dst_destroy(struct dst_entry * dst);
+
+static inline void dst_free(struct dst_entry * dst)
+{
+ if (dst->obsolete > 1)
+ return;
+ if (!atomic_read(&dst->__refcnt)) {
+ dst = dst_destroy(dst);
+ if (!dst)
+ return;
+ }
+ __dst_free(dst);
+}
+
+static inline void dst_rcu_free(struct rcu_head *head)
+{
+ struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
+ dst_free(dst);
+}
+
+static inline void dst_confirm(struct dst_entry *dst)
+{
+ if (dst)
+ neigh_confirm(dst->neighbour);
+}
+
+static inline void dst_negative_advice(struct dst_entry **dst_p)
+{
+ struct dst_entry * dst = *dst_p;
+ if (dst && dst->ops->negative_advice)
+ *dst_p = dst->ops->negative_advice(dst);
+}
+
+static inline void dst_link_failure(struct sk_buff *skb)
+{
+ struct dst_entry * dst = skb->dst;
+ if (dst && dst->ops && dst->ops->link_failure)
+ dst->ops->link_failure(skb);
+}
+
+static inline void dst_set_expires(struct dst_entry *dst, int timeout)
+{
+ unsigned long expires = jiffies + timeout;
+
+ if (expires == 0)
+ expires = 1;
+
+ if (dst->expires == 0 || time_before(expires, dst->expires))
+ dst->expires = expires;
+}
+
+/* Output packet to network from transport. */
+static inline int dst_output(struct sk_buff *skb)
+{
+ int err;
+
+ for (;;) {
+ err = skb->dst->output(skb);
+
+ if (likely(err == 0))
+ return err;
+ if (unlikely(err != NET_XMIT_BYPASS))
+ return err;
+ }
+}
+
+/* Input packet from network to transport. */
+static inline int dst_input(struct sk_buff *skb)
+{
+ int err;
+
+ for (;;) {
+ err = skb->dst->input(skb);
+
+ if (likely(err == 0))
+ return err;
+ /* Oh, Jamal... Seems, I will not forgive you this mess. :-) */
+ if (unlikely(err != NET_XMIT_BYPASS))
+ return err;
+ }
+}
+
+static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
+{
+ if (dst->obsolete)
+ dst = dst->ops->check(dst, cookie);
+ return dst;
+}
+
+extern void dst_init(void);
+
+struct flowi;
+#ifndef CONFIG_XFRM
+static inline int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
+ struct sock *sk, int flags)
+{
+ return 0;
+}
+#else
+extern int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
+ struct sock *sk, int flags);
+#endif
+#endif
+
+#endif /* _NET_DST_H */
diff --git a/include/net/esp.h b/include/net/esp.h
new file mode 100644
index 000000000000..90cd94fad7d9
--- /dev/null
+++ b/include/net/esp.h
@@ -0,0 +1,59 @@
+#ifndef _NET_ESP_H
+#define _NET_ESP_H
+
+#include <net/xfrm.h>
+#include <asm/scatterlist.h>
+
+#define ESP_NUM_FAST_SG 4
+
+struct esp_data
+{
+ struct scatterlist sgbuf[ESP_NUM_FAST_SG];
+
+ /* Confidentiality */
+ struct {
+ u8 *key; /* Key */
+ int key_len; /* Key length */
+ u8 *ivec; /* ivec buffer */
+ /* ivlen is offset from enc_data, where encrypted data start.
+ * It is logically different of crypto_tfm_alg_ivsize(tfm).
+ * We assume that it is either zero (no ivec), or
+ * >= crypto_tfm_alg_ivsize(tfm). */
+ int ivlen;
+ int padlen; /* 0..255 */
+ struct crypto_tfm *tfm; /* crypto handle */
+ } conf;
+
+ /* Integrity. It is active when icv_full_len != 0 */
+ struct {
+ u8 *key; /* Key */
+ int key_len; /* Length of the key */
+ u8 *work_icv;
+ int icv_full_len;
+ int icv_trunc_len;
+ void (*icv)(struct esp_data*,
+ struct sk_buff *skb,
+ int offset, int len, u8 *icv);
+ struct crypto_tfm *tfm;
+ } auth;
+};
+
+extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len);
+extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+
+static inline void
+esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
+ int len, u8 *auth_data)
+{
+ struct crypto_tfm *tfm = esp->auth.tfm;
+ char *icv = esp->auth.work_icv;
+
+ memset(auth_data, 0, esp->auth.icv_trunc_len);
+ crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
+ skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
+ crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
+ memcpy(auth_data, icv, esp->auth.icv_trunc_len);
+}
+
+#endif
diff --git a/include/net/flow.h b/include/net/flow.h
new file mode 100644
index 000000000000..9a5c94b1a0ec
--- /dev/null
+++ b/include/net/flow.h
@@ -0,0 +1,95 @@
+/*
+ *
+ * Generic internet FLOW.
+ *
+ */
+
+#ifndef _NET_FLOW_H
+#define _NET_FLOW_H
+
+#include <linux/in6.h>
+#include <asm/atomic.h>
+
+struct flowi {
+ int oif;
+ int iif;
+
+ union {
+ struct {
+ __u32 daddr;
+ __u32 saddr;
+ __u32 fwmark;
+ __u8 tos;
+ __u8 scope;
+ } ip4_u;
+
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ __u32 flowlabel;
+ } ip6_u;
+
+ struct {
+ __u16 daddr;
+ __u16 saddr;
+ __u32 fwmark;
+ __u8 scope;
+ } dn_u;
+ } nl_u;
+#define fld_dst nl_u.dn_u.daddr
+#define fld_src nl_u.dn_u.saddr
+#define fld_fwmark nl_u.dn_u.fwmark
+#define fld_scope nl_u.dn_u.scope
+#define fl6_dst nl_u.ip6_u.daddr
+#define fl6_src nl_u.ip6_u.saddr
+#define fl6_flowlabel nl_u.ip6_u.flowlabel
+#define fl4_dst nl_u.ip4_u.daddr
+#define fl4_src nl_u.ip4_u.saddr
+#define fl4_fwmark nl_u.ip4_u.fwmark
+#define fl4_tos nl_u.ip4_u.tos
+#define fl4_scope nl_u.ip4_u.scope
+
+ __u8 proto;
+ __u8 flags;
+#define FLOWI_FLAG_MULTIPATHOLDROUTE 0x01
+ union {
+ struct {
+ __u16 sport;
+ __u16 dport;
+ } ports;
+
+ struct {
+ __u8 type;
+ __u8 code;
+ } icmpt;
+
+ struct {
+ __u16 sport;
+ __u16 dport;
+ __u8 objnum;
+ __u8 objnamel; /* Not 16 bits since max val is 16 */
+ __u8 objname[16]; /* Not zero terminated */
+ } dnports;
+
+ __u32 spi;
+ } uli_u;
+#define fl_ip_sport uli_u.ports.sport
+#define fl_ip_dport uli_u.ports.dport
+#define fl_icmp_type uli_u.icmpt.type
+#define fl_icmp_code uli_u.icmpt.code
+#define fl_ipsec_spi uli_u.spi
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+#define FLOW_DIR_IN 0
+#define FLOW_DIR_OUT 1
+#define FLOW_DIR_FWD 2
+
+typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir,
+ void **objp, atomic_t **obj_refp);
+
+extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
+ flow_resolve_t resolver);
+extern void flow_cache_flush(void);
+extern atomic_t flow_cache_genid;
+
+#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
new file mode 100644
index 000000000000..0b95cf031d6e
--- /dev/null
+++ b/include/net/gen_stats.h
@@ -0,0 +1,49 @@
+#ifndef __NET_GEN_STATS_H
+#define __NET_GEN_STATS_H
+
+#include <linux/gen_stats.h>
+#include <linux/socket.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+
+struct gnet_dump
+{
+ spinlock_t * lock;
+ struct sk_buff * skb;
+ struct rtattr * tail;
+
+ /* Backward compatability */
+ int compat_tc_stats;
+ int compat_xstats;
+ void * xstats;
+ int xstats_len;
+ struct tc_stats tc_stats;
+};
+
+extern int gnet_stats_start_copy(struct sk_buff *skb, int type,
+ spinlock_t *lock, struct gnet_dump *d);
+
+extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+ int tc_stats_type,int xstats_type,
+ spinlock_t *lock, struct gnet_dump *d);
+
+extern int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic *b);
+extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
+ struct gnet_stats_rate_est *r);
+extern int gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue *q);
+extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+
+extern int gnet_stats_finish_copy(struct gnet_dump *d);
+
+extern int gen_new_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt);
+extern void gen_kill_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est);
+extern int gen_replace_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt);
+
+#endif
diff --git a/include/net/icmp.h b/include/net/icmp.h
new file mode 100644
index 000000000000..3fc192478aa2
--- /dev/null
+++ b/include/net/icmp.h
@@ -0,0 +1,60 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP module.
+ *
+ * Version: @(#)icmp.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ICMP_H
+#define _ICMP_H
+
+#include <linux/config.h>
+#include <linux/icmp.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+#include <net/snmp.h>
+#include <linux/ip.h>
+
+struct icmp_err {
+ int errno;
+ unsigned fatal:1;
+};
+
+extern struct icmp_err icmp_err_convert[];
+DECLARE_SNMP_STAT(struct icmp_mib, icmp_statistics);
+#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field)
+#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field)
+#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field)
+
+extern void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info);
+extern int icmp_rcv(struct sk_buff *skb);
+extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+extern void icmp_init(struct net_proto_family *ops);
+
+/* Move into dst.h ? */
+extern int xrlim_allow(struct dst_entry *dst, int timeout);
+
+struct raw_sock {
+ /* inet_sock has to be the first member */
+ struct inet_sock inet;
+ struct icmp_filter filter;
+};
+
+static inline struct raw_sock *raw_sk(const struct sock *sk)
+{
+ return (struct raw_sock *)sk;
+}
+
+#endif /* _ICMP_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
new file mode 100644
index 000000000000..e97a9accb71d
--- /dev/null
+++ b/include/net/if_inet6.h
@@ -0,0 +1,285 @@
+/*
+ * inet6 interface/address list definitions
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_IF_INET6_H
+#define _NET_IF_INET6_H
+
+#include <net/snmp.h>
+#include <linux/ipv6.h>
+
+/* inet6_dev.if_flags */
+
+#define IF_RA_OTHERCONF 0x80
+#define IF_RA_MANAGED 0x40
+#define IF_RA_RCVD 0x20
+#define IF_RS_SENT 0x10
+
+/* prefix flags */
+#define IF_PREFIX_ONLINK 0x01
+#define IF_PREFIX_AUTOCONF 0x02
+
+#ifdef __KERNEL__
+
+struct inet6_ifaddr
+{
+ struct in6_addr addr;
+ __u32 prefix_len;
+
+ __u32 valid_lft;
+ __u32 prefered_lft;
+ unsigned long cstamp; /* created timestamp */
+ unsigned long tstamp; /* updated timestamp */
+ atomic_t refcnt;
+ spinlock_t lock;
+
+ __u8 probes;
+ __u8 flags;
+
+ __u16 scope;
+
+ struct timer_list timer;
+
+ struct inet6_dev *idev;
+ struct rt6_info *rt;
+
+ struct inet6_ifaddr *lst_next; /* next addr in addr_lst */
+ struct inet6_ifaddr *if_next; /* next addr in inet6_dev */
+
+#ifdef CONFIG_IPV6_PRIVACY
+ struct inet6_ifaddr *tmp_next; /* next addr in tempaddr_lst */
+ struct inet6_ifaddr *ifpub;
+ int regen_count;
+#endif
+
+ int dead;
+};
+
+struct ip6_sf_socklist
+{
+ unsigned int sl_max;
+ unsigned int sl_count;
+ struct in6_addr sl_addr[0];
+};
+
+#define IP6_SFLSIZE(count) (sizeof(struct ip6_sf_socklist) + \
+ (count) * sizeof(struct in6_addr))
+
+#define IP6_SFBLOCK 10 /* allocate this many at once */
+
+struct ipv6_mc_socklist
+{
+ struct in6_addr addr;
+ int ifindex;
+ struct ipv6_mc_socklist *next;
+ unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
+ struct ip6_sf_socklist *sflist;
+};
+
+struct ip6_sf_list
+{
+ struct ip6_sf_list *sf_next;
+ struct in6_addr sf_addr;
+ unsigned long sf_count[2]; /* include/exclude counts */
+ unsigned char sf_gsresp; /* include in g & s response? */
+ unsigned char sf_oldin; /* change state */
+ unsigned char sf_crcount; /* retrans. left to send */
+};
+
+#define MAF_TIMER_RUNNING 0x01
+#define MAF_LAST_REPORTER 0x02
+#define MAF_LOADED 0x04
+#define MAF_NOREPORT 0x08
+#define MAF_GSQUERY 0x10
+
+struct ifmcaddr6
+{
+ struct in6_addr mca_addr;
+ struct inet6_dev *idev;
+ struct ifmcaddr6 *next;
+ struct ip6_sf_list *mca_sources;
+ struct ip6_sf_list *mca_tomb;
+ unsigned int mca_sfmode;
+ unsigned long mca_sfcount[2];
+ struct timer_list mca_timer;
+ unsigned mca_flags;
+ int mca_users;
+ atomic_t mca_refcnt;
+ spinlock_t mca_lock;
+ unsigned char mca_crcount;
+ unsigned long mca_cstamp;
+ unsigned long mca_tstamp;
+};
+
+/* Anycast stuff */
+
+struct ipv6_ac_socklist
+{
+ struct in6_addr acl_addr;
+ int acl_ifindex;
+ struct ipv6_ac_socklist *acl_next;
+};
+
+struct ifacaddr6
+{
+ struct in6_addr aca_addr;
+ struct inet6_dev *aca_idev;
+ struct rt6_info *aca_rt;
+ struct ifacaddr6 *aca_next;
+ int aca_users;
+ atomic_t aca_refcnt;
+ spinlock_t aca_lock;
+ unsigned long aca_cstamp;
+ unsigned long aca_tstamp;
+};
+
+#define IFA_HOST IPV6_ADDR_LOOPBACK
+#define IFA_LINK IPV6_ADDR_LINKLOCAL
+#define IFA_SITE IPV6_ADDR_SITELOCAL
+#define IFA_GLOBAL 0x0000U
+
+struct ipv6_devstat {
+ struct proc_dir_entry *proc_dir_entry;
+ DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6);
+};
+
+struct inet6_dev
+{
+ struct net_device *dev;
+
+ struct inet6_ifaddr *addr_list;
+
+ struct ifmcaddr6 *mc_list;
+ struct ifmcaddr6 *mc_tomb;
+ rwlock_t mc_lock;
+ unsigned long mc_v1_seen;
+ unsigned long mc_maxdelay;
+ unsigned char mc_qrv;
+ unsigned char mc_gq_running;
+ unsigned char mc_ifc_count;
+ struct timer_list mc_gq_timer; /* general query timer */
+ struct timer_list mc_ifc_timer; /* interface change timer */
+
+ struct ifacaddr6 *ac_list;
+ rwlock_t lock;
+ atomic_t refcnt;
+ __u32 if_flags;
+ int dead;
+
+#ifdef CONFIG_IPV6_PRIVACY
+ u8 rndid[8];
+ u8 entropy[8];
+ struct timer_list regen_timer;
+ struct inet6_ifaddr *tempaddr_list;
+ __u8 work_eui64[8];
+ __u8 work_digest[16];
+#endif
+
+ struct neigh_parms *nd_parms;
+ struct inet6_dev *next;
+ struct ipv6_devconf cnf;
+ struct ipv6_devstat stats;
+ unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
+};
+
+extern struct ipv6_devconf ipv6_devconf;
+
+static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf)
+{
+ /*
+ * +-------+-------+-------+-------+-------+-------+
+ * | 33 | 33 | DST13 | DST14 | DST15 | DST16 |
+ * +-------+-------+-------+-------+-------+-------+
+ */
+
+ buf[0]= 0x33;
+ buf[1]= 0x33;
+
+ memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32));
+}
+
+static inline void ipv6_tr_mc_map(struct in6_addr *addr, char *buf)
+{
+ /* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */
+
+ if (((addr->s6_addr[0] == 0xFF) &&
+ ((addr->s6_addr[1] == 0x01) || (addr->s6_addr[1] == 0x02)) &&
+ (addr->s6_addr16[1] == 0) &&
+ (addr->s6_addr32[1] == 0) &&
+ (addr->s6_addr32[2] == 0) &&
+ (addr->s6_addr16[6] == 0) &&
+ (addr->s6_addr[15] == 1)) ||
+ ((addr->s6_addr[0] == 0xFF) &&
+ (addr->s6_addr[1] == 0x02) &&
+ (addr->s6_addr16[1] == 0) &&
+ (addr->s6_addr32[1] == 0) &&
+ (addr->s6_addr16[4] == 0) &&
+ (addr->s6_addr[10] == 0) &&
+ (addr->s6_addr[11] == 1) &&
+ (addr->s6_addr[12] == 0xff)))
+ {
+ buf[0]=0xC0;
+ buf[1]=0x00;
+ buf[2]=0x01;
+ buf[3]=0x00;
+ buf[4]=0x00;
+ buf[5]=0x00;
+ /* All routers FF0x::2 */
+ } else if ((addr->s6_addr[0] ==0xff) &&
+ ((addr->s6_addr[1] & 0xF0) == 0) &&
+ (addr->s6_addr16[1] == 0) &&
+ (addr->s6_addr32[1] == 0) &&
+ (addr->s6_addr32[2] == 0) &&
+ (addr->s6_addr16[6] == 0) &&
+ (addr->s6_addr[15] == 2))
+ {
+ buf[0]=0xC0;
+ buf[1]=0x00;
+ buf[2]=0x02;
+ buf[3]=0x00;
+ buf[4]=0x00;
+ buf[5]=0x00;
+ } else {
+ unsigned char i ;
+
+ i = addr->s6_addr[15] & 7 ;
+ buf[0]=0xC0;
+ buf[1]=0x00;
+ buf[2]=0x00;
+ buf[3]=0x01 << i ;
+ buf[4]=0x00;
+ buf[5]=0x00;
+ }
+}
+
+static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf)
+{
+ buf[0] = 0x00;
+}
+
+static inline void ipv6_ib_mc_map(struct in6_addr *addr, char *buf)
+{
+ buf[0] = 0; /* Reserved */
+ buf[1] = 0xff; /* Multicast QPN */
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ buf[4] = 0xff;
+ buf[5] = 0x12; /* link local scope */
+ buf[6] = 0x60; /* IPv6 signature */
+ buf[7] = 0x1b;
+ buf[8] = 0; /* P_Key */
+ buf[9] = 0;
+ memcpy(buf + 10, addr->s6_addr + 6, 10);
+}
+#endif
+#endif
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
new file mode 100644
index 000000000000..fbc1f4d140d8
--- /dev/null
+++ b/include/net/inet_common.h
@@ -0,0 +1,44 @@
+#ifndef _INET_COMMON_H
+#define _INET_COMMON_H
+
+extern struct proto_ops inet_stream_ops;
+extern struct proto_ops inet_dgram_ops;
+
+/*
+ * INET4 prototypes used by INET6
+ */
+
+extern void inet_remove_sock(struct sock *sk1);
+extern void inet_put_sock(unsigned short num,
+ struct sock *sk);
+extern int inet_release(struct socket *sock);
+extern int inet_stream_connect(struct socket *sock,
+ struct sockaddr * uaddr,
+ int addr_len, int flags);
+extern int inet_dgram_connect(struct socket *sock,
+ struct sockaddr * uaddr,
+ int addr_len, int flags);
+extern int inet_accept(struct socket *sock,
+ struct socket *newsock, int flags);
+extern int inet_sendmsg(struct kiocb *iocb,
+ struct socket *sock,
+ struct msghdr *msg,
+ size_t size);
+extern int inet_shutdown(struct socket *sock, int how);
+extern unsigned int inet_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
+extern int inet_listen(struct socket *sock, int backlog);
+
+extern void inet_sock_destruct(struct sock *sk);
+extern atomic_t inet_sock_nr;
+
+extern int inet_bind(struct socket *sock,
+ struct sockaddr *uaddr, int addr_len);
+extern int inet_getname(struct socket *sock,
+ struct sockaddr *uaddr,
+ int *uaddr_len, int peer);
+extern int inet_ioctl(struct socket *sock,
+ unsigned int cmd, unsigned long arg);
+
+#endif
+
+
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
new file mode 100644
index 000000000000..f87845e2e965
--- /dev/null
+++ b/include/net/inet_ecn.h
@@ -0,0 +1,108 @@
+#ifndef _INET_ECN_H_
+#define _INET_ECN_H_
+
+#include <linux/ip.h>
+#include <net/dsfield.h>
+
+enum {
+ INET_ECN_NOT_ECT = 0,
+ INET_ECN_ECT_1 = 1,
+ INET_ECN_ECT_0 = 2,
+ INET_ECN_CE = 3,
+ INET_ECN_MASK = 3,
+};
+
+static inline int INET_ECN_is_ce(__u8 dsfield)
+{
+ return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
+}
+
+static inline int INET_ECN_is_not_ect(__u8 dsfield)
+{
+ return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT;
+}
+
+static inline int INET_ECN_is_capable(__u8 dsfield)
+{
+ return (dsfield & INET_ECN_ECT_0);
+}
+
+static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
+{
+ outer &= ~INET_ECN_MASK;
+ outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) :
+ INET_ECN_ECT_0;
+ return outer;
+}
+
+#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
+#define INET_ECN_dontxmit(sk) \
+ do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+
+#define IP6_ECN_flow_init(label) do { \
+ (label) &= ~htonl(INET_ECN_MASK << 20); \
+ } while (0)
+
+#define IP6_ECN_flow_xmit(sk, label) do { \
+ if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
+ (label) |= __constant_htons(INET_ECN_ECT_0 << 4); \
+ } while (0)
+
+static inline void IP_ECN_set_ce(struct iphdr *iph)
+{
+ u32 check = iph->check;
+ u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
+
+ /*
+ * After the last operation we have (in binary):
+ * INET_ECN_NOT_ECT => 01
+ * INET_ECN_ECT_1 => 10
+ * INET_ECN_ECT_0 => 11
+ * INET_ECN_CE => 00
+ */
+ if (!(ecn & 2))
+ return;
+
+ /*
+ * The following gives us:
+ * INET_ECN_ECT_1 => check += htons(0xFFFD)
+ * INET_ECN_ECT_0 => check += htons(0xFFFE)
+ */
+ check += htons(0xFFFB) + htons(ecn);
+
+ iph->check = check + (check>=0xFFFF);
+ iph->tos |= INET_ECN_CE;
+}
+
+static inline void IP_ECN_clear(struct iphdr *iph)
+{
+ iph->tos &= ~INET_ECN_MASK;
+}
+
+static inline void ipv4_copy_dscp(struct iphdr *outer, struct iphdr *inner)
+{
+ u32 dscp = ipv4_get_dsfield(outer) & ~INET_ECN_MASK;
+ ipv4_change_dsfield(inner, INET_ECN_MASK, dscp);
+}
+
+struct ipv6hdr;
+
+static inline void IP6_ECN_set_ce(struct ipv6hdr *iph)
+{
+ if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
+ return;
+ *(u32*)iph |= htonl(INET_ECN_CE << 20);
+}
+
+static inline void IP6_ECN_clear(struct ipv6hdr *iph)
+{
+ *(u32*)iph &= ~htonl(INET_ECN_MASK << 20);
+}
+
+static inline void ipv6_copy_dscp(struct ipv6hdr *outer, struct ipv6hdr *inner)
+{
+ u32 dscp = ipv6_get_dsfield(outer) & ~INET_ECN_MASK;
+ ipv6_change_dsfield(inner, INET_ECN_MASK, dscp);
+}
+
+#endif
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
new file mode 100644
index 000000000000..7fda471002b6
--- /dev/null
+++ b/include/net/inetpeer.h
@@ -0,0 +1,66 @@
+/*
+ * INETPEER - A storage for permanent information about peers
+ *
+ * Version: $Id: inetpeer.h,v 1.2 2002/01/12 07:54:56 davem Exp $
+ *
+ * Authors: Andrey V. Savochkin <saw@msu.ru>
+ */
+
+#ifndef _NET_INETPEER_H
+#define _NET_INETPEER_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+struct inet_peer
+{
+ struct inet_peer *avl_left, *avl_right;
+ struct inet_peer *unused_next, **unused_prevp;
+ unsigned long dtime; /* the time of last use of not
+ * referenced entries */
+ atomic_t refcnt;
+ __u32 v4daddr; /* peer's address */
+ __u16 avl_height;
+ __u16 ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ unsigned long tcp_ts_stamp;
+};
+
+void inet_initpeers(void) __init;
+
+/* can be called with or without local BH being disabled */
+struct inet_peer *inet_getpeer(__u32 daddr, int create);
+
+extern spinlock_t inet_peer_unused_lock;
+extern struct inet_peer **inet_peer_unused_tailp;
+/* can be called from BH context or outside */
+static inline void inet_putpeer(struct inet_peer *p)
+{
+ spin_lock_bh(&inet_peer_unused_lock);
+ if (atomic_dec_and_test(&p->refcnt)) {
+ p->unused_prevp = inet_peer_unused_tailp;
+ p->unused_next = NULL;
+ *inet_peer_unused_tailp = p;
+ inet_peer_unused_tailp = &p->unused_next;
+ p->dtime = jiffies;
+ }
+ spin_unlock_bh(&inet_peer_unused_lock);
+}
+
+extern spinlock_t inet_peer_idlock;
+/* can be called with or without local BH being disabled */
+static inline __u16 inet_getid(struct inet_peer *p, int more)
+{
+ __u16 id;
+
+ spin_lock_bh(&inet_peer_idlock);
+ id = p->ip_id_count;
+ p->ip_id_count += 1 + more;
+ spin_unlock_bh(&inet_peer_idlock);
+ return id;
+}
+
+#endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
new file mode 100644
index 000000000000..b4db1375da2c
--- /dev/null
+++ b/include/net/ip.h
@@ -0,0 +1,353 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP module.
+ *
+ * Version: @(#)ip.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Changes:
+ * Mike McLagan : Routing by source
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _IP_H
+#define _IP_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/in_route.h>
+#include <net/route.h>
+#include <net/arp.h>
+#include <net/snmp.h>
+
+struct sock;
+
+struct inet_skb_parm
+{
+ struct ip_options opt; /* Compiled IP options */
+ unsigned char flags;
+
+#define IPSKB_MASQUERADED 1
+#define IPSKB_TRANSLATED 2
+#define IPSKB_FORWARDED 4
+#define IPSKB_XFRM_TUNNEL_SIZE 8
+};
+
+struct ipcm_cookie
+{
+ u32 addr;
+ int oif;
+ struct ip_options *opt;
+};
+
+#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
+
+struct ip_ra_chain
+{
+ struct ip_ra_chain *next;
+ struct sock *sk;
+ void (*destructor)(struct sock *);
+};
+
+extern struct ip_ra_chain *ip_ra_chain;
+extern rwlock_t ip_ra_lock;
+
+/* IP flags. */
+#define IP_CE 0x8000 /* Flag: "Congestion" */
+#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
+#define IP_MF 0x2000 /* Flag: "More Fragments" */
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+
+#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
+
+extern void ip_mc_dropsocket(struct sock *);
+extern void ip_mc_dropdevice(struct net_device *dev);
+extern int igmp_mc_proc_init(void);
+
+/*
+ * Functions provided by ip.c
+ */
+
+extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ u32 saddr, u32 daddr,
+ struct ip_options *opt);
+extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt);
+extern int ip_local_deliver(struct sk_buff *skb);
+extern int ip_mr_input(struct sk_buff *skb);
+extern int ip_output(struct sk_buff *skb);
+extern int ip_mc_output(struct sk_buff *skb);
+extern int ip_fragment(struct sk_buff *skb, int (*out)(struct sk_buff*));
+extern int ip_do_nat(struct sk_buff *skb);
+extern void ip_send_check(struct iphdr *ip);
+extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok);
+extern void ip_init(void);
+extern int ip_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int len, int protolen,
+ struct ipcm_cookie *ipc,
+ struct rtable *rt,
+ unsigned int flags);
+extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
+extern ssize_t ip_append_page(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+extern int ip_push_pending_frames(struct sock *sk);
+extern void ip_flush_pending_frames(struct sock *sk);
+
+/* datagram.c */
+extern int ip4_datagram_connect(struct sock *sk,
+ struct sockaddr *uaddr, int addr_len);
+
+/*
+ * Map a multicast IP onto multicast MAC for type Token Ring.
+ * This conforms to RFC1469 Option 2 Multicasting i.e.
+ * using a functional address to transmit / receive
+ * multicast packets.
+ */
+
+static inline void ip_tr_mc_map(u32 addr, char *buf)
+{
+ buf[0]=0xC0;
+ buf[1]=0x00;
+ buf[2]=0x00;
+ buf[3]=0x04;
+ buf[4]=0x00;
+ buf[5]=0x00;
+}
+
+struct ip_reply_arg {
+ struct kvec iov[1];
+ u32 csum;
+ int csumoffset; /* u16 offset of csum in iov[0].iov_base */
+ /* -1 if not needed */
+};
+
+void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
+ unsigned int len);
+
+extern int ip_finish_output(struct sk_buff *skb);
+
+struct ipv4_config
+{
+ int log_martians;
+ int autoconfig;
+ int no_pmtu_disc;
+};
+
+extern struct ipv4_config ipv4_config;
+DECLARE_SNMP_STAT(struct ipstats_mib, ip_statistics);
+#define IP_INC_STATS(field) SNMP_INC_STATS(ip_statistics, field)
+#define IP_INC_STATS_BH(field) SNMP_INC_STATS_BH(ip_statistics, field)
+#define IP_INC_STATS_USER(field) SNMP_INC_STATS_USER(ip_statistics, field)
+DECLARE_SNMP_STAT(struct linux_mib, net_statistics);
+#define NET_INC_STATS(field) SNMP_INC_STATS(net_statistics, field)
+#define NET_INC_STATS_BH(field) SNMP_INC_STATS_BH(net_statistics, field)
+#define NET_INC_STATS_USER(field) SNMP_INC_STATS_USER(net_statistics, field)
+#define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd)
+#define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd)
+
+extern int sysctl_local_port_range[2];
+extern int sysctl_ip_default_ttl;
+
+#ifdef CONFIG_INET
+/* The function in 2.2 was invalid, producing wrong result for
+ * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
+static inline
+int ip_decrease_ttl(struct iphdr *iph)
+{
+ u32 check = iph->check;
+ check += htons(0x0100);
+ iph->check = check + (check>=0xFFFF);
+ return --iph->ttl;
+}
+
+static inline
+int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
+{
+ return (inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
+ (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
+ !(dst_metric(dst, RTAX_LOCK)&(1<<RTAX_MTU))));
+}
+
+extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+
+static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
+{
+ if (iph->frag_off & htons(IP_DF)) {
+ /* This is only to work around buggy Windows95/2000
+ * VJ compression implementations. If the ID field
+ * does not change, they drop every other packet in
+ * a TCP stream using header compression.
+ */
+ iph->id = (sk && inet_sk(sk)->daddr) ?
+ htons(inet_sk(sk)->id++) : 0;
+ } else
+ __ip_select_ident(iph, dst, 0);
+}
+
+static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
+{
+ if (iph->frag_off & htons(IP_DF)) {
+ if (sk && inet_sk(sk)->daddr) {
+ iph->id = htons(inet_sk(sk)->id);
+ inet_sk(sk)->id += 1 + more;
+ } else
+ iph->id = 0;
+ } else
+ __ip_select_ident(iph, dst, more);
+}
+
+/*
+ * Map a multicast IP onto multicast MAC for type ethernet.
+ */
+
+static inline void ip_eth_mc_map(u32 addr, char *buf)
+{
+ addr=ntohl(addr);
+ buf[0]=0x01;
+ buf[1]=0x00;
+ buf[2]=0x5e;
+ buf[5]=addr&0xFF;
+ addr>>=8;
+ buf[4]=addr&0xFF;
+ addr>>=8;
+ buf[3]=addr&0x7F;
+}
+
+/*
+ * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
+ * Leave P_Key as 0 to be filled in by driver.
+ */
+
+static inline void ip_ib_mc_map(u32 addr, char *buf)
+{
+ buf[0] = 0; /* Reserved */
+ buf[1] = 0xff; /* Multicast QPN */
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ addr = ntohl(addr);
+ buf[4] = 0xff;
+ buf[5] = 0x12; /* link local scope */
+ buf[6] = 0x40; /* IPv4 signature */
+ buf[7] = 0x1b;
+ buf[8] = 0; /* P_Key */
+ buf[9] = 0;
+ buf[10] = 0;
+ buf[11] = 0;
+ buf[12] = 0;
+ buf[13] = 0;
+ buf[14] = 0;
+ buf[15] = 0;
+ buf[19] = addr & 0xff;
+ addr >>= 8;
+ buf[18] = addr & 0xff;
+ addr >>= 8;
+ buf[17] = addr & 0xff;
+ addr >>= 8;
+ buf[16] = addr & 0x0f;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#include <linux/ipv6.h>
+#endif
+
+static __inline__ void inet_reset_saddr(struct sock *sk)
+{
+ inet_sk(sk)->rcv_saddr = inet_sk(sk)->saddr = 0;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ if (sk->sk_family == PF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ memset(&np->saddr, 0, sizeof(np->saddr));
+ memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+ }
+#endif
+}
+
+#endif
+
+extern int ip_call_ra_chain(struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_fragment.o
+ */
+
+enum ip_defrag_users
+{
+ IP_DEFRAG_LOCAL_DELIVER,
+ IP_DEFRAG_CALL_RA_CHAIN,
+ IP_DEFRAG_CONNTRACK_IN,
+ IP_DEFRAG_CONNTRACK_OUT,
+ IP_DEFRAG_NAT_OUT,
+ IP_DEFRAG_VS_IN,
+ IP_DEFRAG_VS_OUT,
+ IP_DEFRAG_VS_FWD
+};
+
+struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user);
+extern int ip_frag_nqueues;
+extern atomic_t ip_frag_mem;
+
+/*
+ * Functions provided by ip_forward.c
+ */
+
+extern int ip_forward(struct sk_buff *skb);
+extern int ip_net_unreachable(struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_options.c
+ */
+
+extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, u32 daddr, struct rtable *rt, int is_frag);
+extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+extern void ip_options_fragment(struct sk_buff *skb);
+extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb);
+extern int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, int user);
+extern void ip_options_undo(struct ip_options * opt);
+extern void ip_forward_options(struct sk_buff *skb);
+extern int ip_options_rcv_srr(struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_sockglue.c
+ */
+
+extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+extern int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc);
+extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen);
+extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
+extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
+
+extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
+extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+ u16 port, u32 info, u8 *payload);
+extern void ip_local_error(struct sock *sk, int err, u32 daddr, u16 dport,
+ u32 info);
+
+/* sysctl helpers - any sysctl which holds a value that ends up being
+ * fed into the routing cache should use these handlers.
+ */
+int ipv4_doint_and_flush(ctl_table *ctl, int write,
+ struct file* filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen,
+ void **context);
+
+#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
new file mode 100644
index 000000000000..3dfc885bdf25
--- /dev/null
+++ b/include/net/ip6_checksum.h
@@ -0,0 +1,94 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IPv6
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Fixes:
+ *
+ * Ralf Baechle : generic ipv6 checksum
+ * <ralf@waldorf-gmbh.de>
+ */
+
+#ifndef _CHECKSUM_IPV6_H
+#define _CHECKSUM_IPV6_H
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+#include <linux/in6.h>
+
+#ifndef _HAVE_ARCH_IPV6_CSUM
+
+static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ __u16 len,
+ unsigned short proto,
+ unsigned int csum)
+{
+
+ int carry;
+ __u32 ulen;
+ __u32 uproto;
+
+ csum += saddr->s6_addr32[0];
+ carry = (csum < saddr->s6_addr32[0]);
+ csum += carry;
+
+ csum += saddr->s6_addr32[1];
+ carry = (csum < saddr->s6_addr32[1]);
+ csum += carry;
+
+ csum += saddr->s6_addr32[2];
+ carry = (csum < saddr->s6_addr32[2]);
+ csum += carry;
+
+ csum += saddr->s6_addr32[3];
+ carry = (csum < saddr->s6_addr32[3]);
+ csum += carry;
+
+ csum += daddr->s6_addr32[0];
+ carry = (csum < daddr->s6_addr32[0]);
+ csum += carry;
+
+ csum += daddr->s6_addr32[1];
+ carry = (csum < daddr->s6_addr32[1]);
+ csum += carry;
+
+ csum += daddr->s6_addr32[2];
+ carry = (csum < daddr->s6_addr32[2]);
+ csum += carry;
+
+ csum += daddr->s6_addr32[3];
+ carry = (csum < daddr->s6_addr32[3]);
+ csum += carry;
+
+ ulen = htonl((__u32) len);
+ csum += ulen;
+ carry = (csum < ulen);
+ csum += carry;
+
+ uproto = htonl(proto);
+ csum += uproto;
+ carry = (csum < uproto);
+ csum += carry;
+
+ return csum_fold(csum);
+}
+
+#endif
+#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
new file mode 100644
index 000000000000..319904518194
--- /dev/null
+++ b/include/net/ip6_fib.h
@@ -0,0 +1,185 @@
+/*
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _IP6_FIB_H
+#define _IP6_FIB_H
+
+#ifdef __KERNEL__
+
+#include <linux/ipv6_route.h>
+
+#include <net/dst.h>
+#include <net/flow.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+
+struct rt6_info;
+
+struct fib6_node
+{
+ struct fib6_node *parent;
+ struct fib6_node *left;
+ struct fib6_node *right;
+
+ struct fib6_node *subtree;
+
+ struct rt6_info *leaf;
+
+ __u16 fn_bit; /* bit key */
+ __u16 fn_flags;
+ __u32 fn_sernum;
+};
+
+
+/*
+ * routing information
+ *
+ */
+
+struct rt6key
+{
+ struct in6_addr addr;
+ int plen;
+};
+
+struct rt6_info
+{
+ union {
+ struct dst_entry dst;
+ struct rt6_info *next;
+ } u;
+
+ struct inet6_dev *rt6i_idev;
+
+#define rt6i_dev u.dst.dev
+#define rt6i_nexthop u.dst.neighbour
+#define rt6i_expires u.dst.expires
+
+ struct fib6_node *rt6i_node;
+
+ struct in6_addr rt6i_gateway;
+
+ u32 rt6i_flags;
+ u32 rt6i_metric;
+ atomic_t rt6i_ref;
+
+ struct rt6key rt6i_dst;
+ struct rt6key rt6i_src;
+
+ u8 rt6i_protocol;
+};
+
+struct fib6_walker_t
+{
+ struct fib6_walker_t *prev, *next;
+ struct fib6_node *root, *node;
+ struct rt6_info *leaf;
+ unsigned char state;
+ unsigned char prune;
+ int (*func)(struct fib6_walker_t *);
+ void *args;
+};
+
+extern struct fib6_walker_t fib6_walker_list;
+extern rwlock_t fib6_walker_lock;
+
+static inline void fib6_walker_link(struct fib6_walker_t *w)
+{
+ write_lock_bh(&fib6_walker_lock);
+ w->next = fib6_walker_list.next;
+ w->prev = &fib6_walker_list;
+ w->next->prev = w;
+ w->prev->next = w;
+ write_unlock_bh(&fib6_walker_lock);
+}
+
+static inline void fib6_walker_unlink(struct fib6_walker_t *w)
+{
+ write_lock_bh(&fib6_walker_lock);
+ w->next->prev = w->prev;
+ w->prev->next = w->next;
+ w->prev = w->next = w;
+ write_unlock_bh(&fib6_walker_lock);
+}
+
+struct rt6_statistics {
+ __u32 fib_nodes;
+ __u32 fib_route_nodes;
+ __u32 fib_rt_alloc; /* permanent routes */
+ __u32 fib_rt_entries; /* rt entries in table */
+ __u32 fib_rt_cache; /* cache routes */
+ __u32 fib_discarded_routes;
+};
+
+#define RTN_TL_ROOT 0x0001
+#define RTN_ROOT 0x0002 /* tree root node */
+#define RTN_RTINFO 0x0004 /* node with valid routing info */
+
+/*
+ * priority levels (or metrics)
+ *
+ */
+
+#define RTPRI_FIREWALL 8 /* Firewall control information */
+#define RTPRI_FLOW 16 /* Flow based forwarding rules */
+#define RTPRI_KERN_CTL 32 /* Kernel control routes */
+
+#define RTPRI_USER_MIN 256 /* Mimimum user priority */
+#define RTPRI_USER_MAX 1024 /* Maximum user priority */
+
+#define RTPRI_KERN_DFLT 4096 /* Kernel default routes */
+
+#define MAX_FLOW_BACKTRACE 32
+
+
+typedef void (*f_pnode)(struct fib6_node *fn, void *);
+
+extern struct fib6_node ip6_routing_table;
+
+/*
+ * exported functions
+ */
+
+extern struct fib6_node *fib6_lookup(struct fib6_node *root,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr);
+
+struct fib6_node *fib6_locate(struct fib6_node *root,
+ struct in6_addr *daddr, int dst_len,
+ struct in6_addr *saddr, int src_len);
+
+extern void fib6_clean_tree(struct fib6_node *root,
+ int (*func)(struct rt6_info *, void *arg),
+ int prune, void *arg);
+
+extern int fib6_walk(struct fib6_walker_t *w);
+extern int fib6_walk_continue(struct fib6_walker_t *w);
+
+extern int fib6_add(struct fib6_node *root,
+ struct rt6_info *rt,
+ struct nlmsghdr *nlh,
+ void *rtattr);
+
+extern int fib6_del(struct rt6_info *rt,
+ struct nlmsghdr *nlh,
+ void *rtattr);
+
+extern void inet6_rt_notify(int event, struct rt6_info *rt,
+ struct nlmsghdr *nlh);
+
+extern void fib6_run_gc(unsigned long dummy);
+
+extern void fib6_gc_cleanup(void);
+
+extern void fib6_init(void);
+#endif
+#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
new file mode 100644
index 000000000000..d5d1dd10cdb8
--- /dev/null
+++ b/include/net/ip6_route.h
@@ -0,0 +1,141 @@
+#ifndef _NET_IP6_ROUTE_H
+#define _NET_IP6_ROUTE_H
+
+#define IP6_RT_PRIO_FW 16
+#define IP6_RT_PRIO_USER 1024
+#define IP6_RT_PRIO_ADDRCONF 256
+#define IP6_RT_PRIO_KERN 512
+#define IP6_RT_FLOW_MASK 0x00ff
+
+#ifdef __KERNEL__
+
+#include <net/flow.h>
+#include <net/ip6_fib.h>
+#include <net/sock.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct pol_chain {
+ int type;
+ int priority;
+ struct fib6_node *rules;
+ struct pol_chain *next;
+};
+
+extern struct rt6_info ip6_null_entry;
+
+extern int ip6_rt_gc_interval;
+
+extern void ip6_route_input(struct sk_buff *skb);
+
+extern struct dst_entry * ip6_route_output(struct sock *sk,
+ struct flowi *fl);
+
+extern int ip6_route_me_harder(struct sk_buff *skb);
+
+extern void ip6_route_init(void);
+extern void ip6_route_cleanup(void);
+
+extern int ipv6_route_ioctl(unsigned int cmd, void __user *arg);
+
+extern int ip6_route_add(struct in6_rtmsg *rtmsg,
+ struct nlmsghdr *,
+ void *rtattr);
+extern int ip6_ins_rt(struct rt6_info *,
+ struct nlmsghdr *,
+ void *rtattr);
+extern int ip6_del_rt(struct rt6_info *,
+ struct nlmsghdr *,
+ void *rtattr);
+
+extern int ip6_rt_addr_add(struct in6_addr *addr,
+ struct net_device *dev,
+ int anycast);
+
+extern int ip6_rt_addr_del(struct in6_addr *addr,
+ struct net_device *dev);
+
+extern void rt6_sndmsg(int type, struct in6_addr *dst,
+ struct in6_addr *src,
+ struct in6_addr *gw,
+ struct net_device *dev,
+ int dstlen, int srclen,
+ int metric, __u32 flags);
+
+extern struct rt6_info *rt6_lookup(struct in6_addr *daddr,
+ struct in6_addr *saddr,
+ int oif, int flags);
+
+extern struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
+ struct neighbour *neigh,
+ struct in6_addr *addr,
+ int (*output)(struct sk_buff *));
+extern int ndisc_dst_gc(int *more);
+extern void fib6_force_start_gc(void);
+
+extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ const struct in6_addr *addr,
+ int anycast);
+
+/*
+ * support functions for ND
+ *
+ */
+extern struct rt6_info * rt6_get_dflt_router(struct in6_addr *addr,
+ struct net_device *dev);
+extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr,
+ struct net_device *dev);
+
+extern void rt6_purge_dflt_routers(void);
+
+extern void rt6_reset_dflt_pointer(struct rt6_info *rt);
+
+extern void rt6_redirect(struct in6_addr *dest,
+ struct in6_addr *saddr,
+ struct neighbour *neigh,
+ u8 *lladdr,
+ int on_link);
+
+extern void rt6_pmtu_discovery(struct in6_addr *daddr,
+ struct in6_addr *saddr,
+ struct net_device *dev,
+ u32 pmtu);
+
+struct nlmsghdr;
+struct netlink_callback;
+extern int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb);
+extern int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet6_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+
+extern void rt6_ifdown(struct net_device *dev);
+extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
+
+extern rwlock_t rt6_lock;
+
+/*
+ * Store a destination cache entry in a socket
+ */
+static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
+ struct in6_addr *daddr)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ write_lock(&sk->sk_dst_lock);
+ __sk_dst_set(sk, dst);
+ np->daddr_cache = daddr;
+ np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ write_unlock(&sk->sk_dst_lock);
+}
+
+static inline int ipv6_unicast_destination(struct sk_buff *skb)
+{
+ struct rt6_info *rt = (struct rt6_info *) skb->dst;
+
+ return rt->rt6i_flags & RTF_LOCAL;
+}
+
+#endif
+#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
new file mode 100644
index 000000000000..29c9da707c7a
--- /dev/null
+++ b/include/net/ip6_tunnel.h
@@ -0,0 +1,40 @@
+/*
+ * $Id$
+ */
+
+#ifndef _NET_IP6_TUNNEL_H
+#define _NET_IP6_TUNNEL_H
+
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <linux/ip6_tunnel.h>
+
+/* capable of sending packets */
+#define IP6_TNL_F_CAP_XMIT 0x10000
+/* capable of receiving packets */
+#define IP6_TNL_F_CAP_RCV 0x20000
+
+#define IP6_TNL_MAX 128
+
+/* IPv6 tunnel */
+
+struct ip6_tnl {
+ struct ip6_tnl *next; /* next tunnel in list */
+ struct net_device *dev; /* virtual device associated with tunnel */
+ struct net_device_stats stat; /* statistics for tunnel device */
+ int recursion; /* depth of hard_start_xmit recursion */
+ struct ip6_tnl_parm parms; /* tunnel configuration paramters */
+ struct flowi fl; /* flowi template for xmit */
+ struct dst_entry *dst_cache; /* cached dst */
+ u32 dst_cookie;
+};
+
+/* Tunnel encapsulation limit destination sub-option */
+
+struct ipv6_tlv_tnl_enc_lim {
+ __u8 type; /* type-code for option */
+ __u8 length; /* option length */
+ __u8 encap_limit; /* tunnel encapsulation limit */
+} __attribute__ ((packed));
+
+#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
new file mode 100644
index 000000000000..e5a5f6b62f88
--- /dev/null
+++ b/include/net/ip_fib.h
@@ -0,0 +1,284 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Forwarding Information Base.
+ *
+ * Authors: A.N.Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_IP_FIB_H
+#define _NET_IP_FIB_H
+
+#include <linux/config.h>
+#include <net/flow.h>
+#include <linux/seq_file.h>
+
+/* WARNING: The ordering of these elements must match ordering
+ * of RTA_* rtnetlink attribute numbers.
+ */
+struct kern_rta {
+ void *rta_dst;
+ void *rta_src;
+ int *rta_iif;
+ int *rta_oif;
+ void *rta_gw;
+ u32 *rta_priority;
+ void *rta_prefsrc;
+ struct rtattr *rta_mx;
+ struct rtattr *rta_mp;
+ unsigned char *rta_protoinfo;
+ u32 *rta_flow;
+ struct rta_cacheinfo *rta_ci;
+ struct rta_session *rta_sess;
+ u32 *rta_mp_alg;
+};
+
+struct fib_info;
+
+struct fib_nh {
+ struct net_device *nh_dev;
+ struct hlist_node nh_hash;
+ struct fib_info *nh_parent;
+ unsigned nh_flags;
+ unsigned char nh_scope;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ int nh_weight;
+ int nh_power;
+#endif
+#ifdef CONFIG_NET_CLS_ROUTE
+ __u32 nh_tclassid;
+#endif
+ int nh_oif;
+ u32 nh_gw;
+};
+
+/*
+ * This structure contains data shared by many of routes.
+ */
+
+struct fib_info {
+ struct hlist_node fib_hash;
+ struct hlist_node fib_lhash;
+ int fib_treeref;
+ atomic_t fib_clntref;
+ int fib_dead;
+ unsigned fib_flags;
+ int fib_protocol;
+ u32 fib_prefsrc;
+ u32 fib_priority;
+ u32 fib_metrics[RTAX_MAX];
+#define fib_mtu fib_metrics[RTAX_MTU-1]
+#define fib_window fib_metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+ int fib_nhs;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ int fib_power;
+#endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ u32 fib_mp_alg;
+#endif
+ struct fib_nh fib_nh[0];
+#define fib_dev fib_nh[0].nh_dev
+};
+
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+struct fib_rule;
+#endif
+
+struct fib_result {
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ __u32 network;
+ __u32 netmask;
+#endif
+ struct fib_info *fi;
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ struct fib_rule *r;
+#endif
+};
+
+
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+
+#define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
+#define FIB_RES_RESET(res) ((res).nh_sel = 0)
+
+#else /* CONFIG_IP_ROUTE_MULTIPATH */
+
+#define FIB_RES_NH(res) ((res).fi->fib_nh[0])
+#define FIB_RES_RESET(res)
+
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+
+#define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res))
+#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
+#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
+#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
+
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+#define FIB_RES_NETWORK(res) ((res).network)
+#define FIB_RES_NETMASK(res) ((res).netmask)
+#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
+#define FIB_RES_NETWORK(res) (0)
+#define FIB_RES_NETMASK(res) (0)
+#endif /* CONFIG_IP_ROUTE_MULTIPATH_WRANDOM */
+
+struct fib_table {
+ unsigned char tb_id;
+ unsigned tb_stamp;
+ int (*tb_lookup)(struct fib_table *tb, const struct flowi *flp, struct fib_result *res);
+ int (*tb_insert)(struct fib_table *table, struct rtmsg *r,
+ struct kern_rta *rta, struct nlmsghdr *n,
+ struct netlink_skb_parms *req);
+ int (*tb_delete)(struct fib_table *table, struct rtmsg *r,
+ struct kern_rta *rta, struct nlmsghdr *n,
+ struct netlink_skb_parms *req);
+ int (*tb_dump)(struct fib_table *table, struct sk_buff *skb,
+ struct netlink_callback *cb);
+ int (*tb_flush)(struct fib_table *table);
+ void (*tb_select_default)(struct fib_table *table,
+ const struct flowi *flp, struct fib_result *res);
+
+ unsigned char tb_data[0];
+};
+
+#ifndef CONFIG_IP_MULTIPLE_TABLES
+
+extern struct fib_table *ip_fib_local_table;
+extern struct fib_table *ip_fib_main_table;
+
+static inline struct fib_table *fib_get_table(int id)
+{
+ if (id != RT_TABLE_LOCAL)
+ return ip_fib_main_table;
+ return ip_fib_local_table;
+}
+
+static inline struct fib_table *fib_new_table(int id)
+{
+ return fib_get_table(id);
+}
+
+static inline int fib_lookup(const struct flowi *flp, struct fib_result *res)
+{
+ if (ip_fib_local_table->tb_lookup(ip_fib_local_table, flp, res) &&
+ ip_fib_main_table->tb_lookup(ip_fib_main_table, flp, res))
+ return -ENETUNREACH;
+ return 0;
+}
+
+static inline void fib_select_default(const struct flowi *flp, struct fib_result *res)
+{
+ if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
+ ip_fib_main_table->tb_select_default(ip_fib_main_table, flp, res);
+}
+
+#else /* CONFIG_IP_MULTIPLE_TABLES */
+#define ip_fib_local_table (fib_tables[RT_TABLE_LOCAL])
+#define ip_fib_main_table (fib_tables[RT_TABLE_MAIN])
+
+extern struct fib_table * fib_tables[RT_TABLE_MAX+1];
+extern int fib_lookup(const struct flowi *flp, struct fib_result *res);
+extern struct fib_table *__fib_new_table(int id);
+extern void fib_rule_put(struct fib_rule *r);
+
+static inline struct fib_table *fib_get_table(int id)
+{
+ if (id == 0)
+ id = RT_TABLE_MAIN;
+
+ return fib_tables[id];
+}
+
+static inline struct fib_table *fib_new_table(int id)
+{
+ if (id == 0)
+ id = RT_TABLE_MAIN;
+
+ return fib_tables[id] ? : __fib_new_table(id);
+}
+
+extern void fib_select_default(const struct flowi *flp, struct fib_result *res);
+
+#endif /* CONFIG_IP_MULTIPLE_TABLES */
+
+/* Exported by fib_frontend.c */
+extern void ip_fib_init(void);
+extern int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb);
+extern int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
+ struct net_device *dev, u32 *spec_dst, u32 *itag);
+extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
+
+/* Exported by fib_semantics.c */
+extern int ip_fib_check_default(u32 gw, struct net_device *dev);
+extern int fib_sync_down(u32 local, struct net_device *dev, int force);
+extern int fib_sync_up(struct net_device *dev);
+extern int fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
+ struct kern_rta *rta, struct rtentry *r);
+extern u32 __fib_res_prefsrc(struct fib_result *res);
+
+/* Exported by fib_hash.c */
+extern struct fib_table *fib_hash_init(int id);
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+/* Exported by fib_rules.c */
+
+extern int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
+extern int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb);
+#ifdef CONFIG_NET_CLS_ROUTE
+extern u32 fib_rules_tclass(struct fib_result *res);
+#endif
+extern void fib_rules_init(void);
+#endif
+
+static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+{
+#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ u32 rtag;
+#endif
+ *itag = FIB_RES_NH(*res).nh_tclassid<<16;
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ rtag = fib_rules_tclass(res);
+ if (*itag == 0)
+ *itag = (rtag<<16);
+ *itag |= (rtag>>16);
+#endif
+#endif
+}
+
+extern void free_fib_info(struct fib_info *fi);
+
+static inline void fib_info_put(struct fib_info *fi)
+{
+ if (atomic_dec_and_test(&fi->fib_clntref))
+ free_fib_info(fi);
+}
+
+static inline void fib_res_put(struct fib_result *res)
+{
+ if (res->fi)
+ fib_info_put(res->fi);
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ if (res->r)
+ fib_rule_put(res->r);
+#endif
+}
+
+#endif /* _NET_FIB_H */
diff --git a/include/net/ip_mp_alg.h b/include/net/ip_mp_alg.h
new file mode 100644
index 000000000000..77225735cbd4
--- /dev/null
+++ b/include/net/ip_mp_alg.h
@@ -0,0 +1,99 @@
+/* ip_mp_alg.h: IPV4 multipath algorithm support.
+ *
+ * Copyright (C) 2004, 2005 Einar Lueck <elueck@de.ibm.com>
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+#ifndef _NET_IP_MP_ALG_H
+#define _NET_IP_MP_ALG_H
+
+#include <linux/config.h>
+#include <linux/ip_mp_alg.h>
+#include <net/flow.h>
+#include <net/route.h>
+
+struct fib_nh;
+
+struct ip_mp_alg_ops {
+ void (*mp_alg_select_route)(const struct flowi *flp,
+ struct rtable *rth, struct rtable **rp);
+ void (*mp_alg_flush)(void);
+ void (*mp_alg_set_nhinfo)(__u32 network, __u32 netmask,
+ unsigned char prefixlen,
+ const struct fib_nh *nh);
+ void (*mp_alg_remove)(struct rtable *rth);
+};
+
+extern int multipath_alg_register(struct ip_mp_alg_ops *, enum ip_mp_alg);
+extern void multipath_alg_unregister(struct ip_mp_alg_ops *, enum ip_mp_alg);
+
+extern struct ip_mp_alg_ops *ip_mp_alg_table[];
+
+static inline int multipath_select_route(const struct flowi *flp,
+ struct rtable *rth,
+ struct rtable **rp)
+{
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ struct ip_mp_alg_ops *ops = ip_mp_alg_table[rth->rt_multipath_alg];
+
+ /* mp_alg_select_route _MUST_ be implemented */
+ if (ops && (rth->u.dst.flags & DST_BALANCED)) {
+ ops->mp_alg_select_route(flp, rth, rp);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static inline void multipath_flush(void)
+{
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ int i;
+
+ for (i = IP_MP_ALG_NONE; i <= IP_MP_ALG_MAX; i++) {
+ struct ip_mp_alg_ops *ops = ip_mp_alg_table[i];
+
+ if (ops && ops->mp_alg_flush)
+ ops->mp_alg_flush();
+ }
+#endif
+}
+
+static inline void multipath_set_nhinfo(struct rtable *rth,
+ __u32 network, __u32 netmask,
+ unsigned char prefixlen,
+ const struct fib_nh *nh)
+{
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ struct ip_mp_alg_ops *ops = ip_mp_alg_table[rth->rt_multipath_alg];
+
+ if (ops && ops->mp_alg_set_nhinfo)
+ ops->mp_alg_set_nhinfo(network, netmask, prefixlen, nh);
+#endif
+}
+
+static inline void multipath_remove(struct rtable *rth)
+{
+#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ struct ip_mp_alg_ops *ops = ip_mp_alg_table[rth->rt_multipath_alg];
+
+ if (ops && ops->mp_alg_remove &&
+ (rth->u.dst.flags & DST_BALANCED))
+ ops->mp_alg_remove(rth);
+#endif
+}
+
+static inline int multipath_comparekeys(const struct flowi *flp1,
+ const struct flowi *flp2)
+{
+ return flp1->fl4_dst == flp2->fl4_dst &&
+ flp1->fl4_src == flp2->fl4_src &&
+ flp1->oif == flp2->oif &&
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ flp1->fl4_fwmark == flp2->fl4_fwmark &&
+#endif
+ !((flp1->fl4_tos ^ flp2->fl4_tos) &
+ (IPTOS_RT_MASK | RTO_ONLINK));
+}
+
+#endif /* _NET_IP_MP_ALG_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
new file mode 100644
index 000000000000..52da5d26617a
--- /dev/null
+++ b/include/net/ip_vs.h
@@ -0,0 +1,999 @@
+/*
+ * IP Virtual Server
+ * data structure and functionality definitions
+ */
+
+#ifndef _IP_VS_H
+#define _IP_VS_H
+
+#include <asm/types.h> /* For __uXX types */
+
+#define IP_VS_VERSION_CODE 0x010201
+#define NVERSION(version) \
+ (version >> 16) & 0xFF, \
+ (version >> 8) & 0xFF, \
+ version & 0xFF
+
+/*
+ * Virtual Service Flags
+ */
+#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */
+#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */
+
+/*
+ * Destination Server Flags
+ */
+#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */
+#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */
+
+/*
+ * IPVS sync daemon states
+ */
+#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */
+#define IP_VS_STATE_MASTER 0x0001 /* started as master */
+#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */
+
+/*
+ * IPVS socket options
+ */
+#define IP_VS_BASE_CTL (64+1024+64) /* base */
+
+#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */
+#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1)
+#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2)
+#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3)
+#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4)
+#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5)
+#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6)
+#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7)
+#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8)
+#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9)
+#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10)
+#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11)
+#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12)
+#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13)
+#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14)
+#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15)
+#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO
+
+#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL
+#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1)
+#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2)
+#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3)
+#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4)
+#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */
+#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6)
+#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7)
+#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON
+
+
+/*
+ * IPVS Connection Flags
+ */
+#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */
+#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */
+#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */
+#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */
+#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */
+#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */
+#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */
+#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */
+#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */
+#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */
+#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */
+#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */
+#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */
+#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */
+
+/* Move it to better place one day, for now keep it unique */
+#define NFC_IPVS_PROPERTY 0x10000
+
+#define IP_VS_SCHEDNAME_MAXLEN 16
+#define IP_VS_IFNAME_MAXLEN 16
+
+
+/*
+ * The struct ip_vs_service_user and struct ip_vs_dest_user are
+ * used to set IPVS rules through setsockopt.
+ */
+struct ip_vs_service_user {
+ /* virtual service addresses */
+ u_int16_t protocol;
+ u_int32_t addr; /* virtual ip address */
+ u_int16_t port;
+ u_int32_t fwmark; /* firwall mark of service */
+
+ /* virtual service options */
+ char sched_name[IP_VS_SCHEDNAME_MAXLEN];
+ unsigned flags; /* virtual service flags */
+ unsigned timeout; /* persistent timeout in sec */
+ u_int32_t netmask; /* persistent netmask */
+};
+
+
+struct ip_vs_dest_user {
+ /* destination server address */
+ u_int32_t addr;
+ u_int16_t port;
+
+ /* real server options */
+ unsigned conn_flags; /* connection flags */
+ int weight; /* destination weight */
+
+ /* thresholds for active connections */
+ u_int32_t u_threshold; /* upper threshold */
+ u_int32_t l_threshold; /* lower threshold */
+};
+
+
+/*
+ * IPVS statistics object (for user space)
+ */
+struct ip_vs_stats_user
+{
+ __u32 conns; /* connections scheduled */
+ __u32 inpkts; /* incoming packets */
+ __u32 outpkts; /* outgoing packets */
+ __u64 inbytes; /* incoming bytes */
+ __u64 outbytes; /* outgoing bytes */
+
+ __u32 cps; /* current connection rate */
+ __u32 inpps; /* current in packet rate */
+ __u32 outpps; /* current out packet rate */
+ __u32 inbps; /* current in byte rate */
+ __u32 outbps; /* current out byte rate */
+};
+
+
+/* The argument to IP_VS_SO_GET_INFO */
+struct ip_vs_getinfo {
+ /* version number */
+ unsigned int version;
+
+ /* size of connection hash table */
+ unsigned int size;
+
+ /* number of virtual services */
+ unsigned int num_services;
+};
+
+
+/* The argument to IP_VS_SO_GET_SERVICE */
+struct ip_vs_service_entry {
+ /* which service: user fills in these */
+ u_int16_t protocol;
+ u_int32_t addr; /* virtual address */
+ u_int16_t port;
+ u_int32_t fwmark; /* firwall mark of service */
+
+ /* service options */
+ char sched_name[IP_VS_SCHEDNAME_MAXLEN];
+ unsigned flags; /* virtual service flags */
+ unsigned timeout; /* persistent timeout */
+ u_int32_t netmask; /* persistent netmask */
+
+ /* number of real servers */
+ unsigned int num_dests;
+
+ /* statistics */
+ struct ip_vs_stats_user stats;
+};
+
+
+struct ip_vs_dest_entry {
+ u_int32_t addr; /* destination address */
+ u_int16_t port;
+ unsigned conn_flags; /* connection flags */
+ int weight; /* destination weight */
+
+ u_int32_t u_threshold; /* upper threshold */
+ u_int32_t l_threshold; /* lower threshold */
+
+ u_int32_t activeconns; /* active connections */
+ u_int32_t inactconns; /* inactive connections */
+ u_int32_t persistconns; /* persistent connections */
+
+ /* statistics */
+ struct ip_vs_stats_user stats;
+};
+
+
+/* The argument to IP_VS_SO_GET_DESTS */
+struct ip_vs_get_dests {
+ /* which service: user fills in these */
+ u_int16_t protocol;
+ u_int32_t addr; /* virtual address */
+ u_int16_t port;
+ u_int32_t fwmark; /* firwall mark of service */
+
+ /* number of real servers */
+ unsigned int num_dests;
+
+ /* the real servers */
+ struct ip_vs_dest_entry entrytable[0];
+};
+
+
+/* The argument to IP_VS_SO_GET_SERVICES */
+struct ip_vs_get_services {
+ /* number of virtual services */
+ unsigned int num_services;
+
+ /* service table */
+ struct ip_vs_service_entry entrytable[0];
+};
+
+
+/* The argument to IP_VS_SO_GET_TIMEOUT */
+struct ip_vs_timeout_user {
+ int tcp_timeout;
+ int tcp_fin_timeout;
+ int udp_timeout;
+};
+
+
+/* The argument to IP_VS_SO_GET_DAEMON */
+struct ip_vs_daemon_user {
+ /* sync daemon state (master/backup) */
+ int state;
+
+ /* multicast interface name */
+ char mcast_ifn[IP_VS_IFNAME_MAXLEN];
+
+ /* SyncID we belong to */
+ int syncid;
+};
+
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/list.h> /* for struct list_head */
+#include <linux/spinlock.h> /* for struct rwlock_t */
+#include <linux/skbuff.h> /* for struct sk_buff */
+#include <linux/ip.h> /* for struct iphdr */
+#include <asm/atomic.h> /* for struct atomic_t */
+#include <linux/netdevice.h> /* for struct neighbour */
+#include <net/dst.h> /* for struct dst_entry */
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/compiler.h>
+
+
+#ifdef CONFIG_IP_VS_DEBUG
+extern int ip_vs_get_debug_level(void);
+#define IP_VS_DBG(level, msg...) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG "IPVS: " msg); \
+ } while (0)
+#define IP_VS_DBG_RL(msg...) \
+ do { \
+ if (net_ratelimit()) \
+ printk(KERN_DEBUG "IPVS: " msg); \
+ } while (0)
+#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ pp->debug_packet(pp, skb, ofs, msg); \
+ } while (0)
+#define IP_VS_DBG_RL_PKT(level, pp, skb, ofs, msg) \
+ do { \
+ if (level <= ip_vs_get_debug_level() && \
+ net_ratelimit()) \
+ pp->debug_packet(pp, skb, ofs, msg); \
+ } while (0)
+#else /* NO DEBUGGING at ALL */
+#define IP_VS_DBG(level, msg...) do {} while (0)
+#define IP_VS_DBG_RL(msg...) do {} while (0)
+#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0)
+#define IP_VS_DBG_RL_PKT(level, pp, skb, ofs, msg) do {} while (0)
+#endif
+
+#define IP_VS_BUG() BUG()
+#define IP_VS_ERR(msg...) printk(KERN_ERR "IPVS: " msg)
+#define IP_VS_INFO(msg...) printk(KERN_INFO "IPVS: " msg)
+#define IP_VS_WARNING(msg...) \
+ printk(KERN_WARNING "IPVS: " msg)
+#define IP_VS_ERR_RL(msg...) \
+ do { \
+ if (net_ratelimit()) \
+ printk(KERN_ERR "IPVS: " msg); \
+ } while (0)
+
+#ifdef CONFIG_IP_VS_DEBUG
+#define EnterFunction(level) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG "Enter: %s, %s line %i\n", \
+ __FUNCTION__, __FILE__, __LINE__); \
+ } while (0)
+#define LeaveFunction(level) \
+ do { \
+ if (level <= ip_vs_get_debug_level()) \
+ printk(KERN_DEBUG "Leave: %s, %s line %i\n", \
+ __FUNCTION__, __FILE__, __LINE__); \
+ } while (0)
+#else
+#define EnterFunction(level) do {} while (0)
+#define LeaveFunction(level) do {} while (0)
+#endif
+
+#define IP_VS_WAIT_WHILE(expr) while (expr) { cpu_relax(); }
+
+
+/*
+ * The port number of FTP service (in network order).
+ */
+#define FTPPORT __constant_htons(21)
+#define FTPDATA __constant_htons(20)
+
+/*
+ * IPVS sysctl variables under the /proc/sys/net/ipv4/vs/
+ */
+#define NET_IPV4_VS 21
+
+enum {
+ NET_IPV4_VS_DEBUG_LEVEL=1,
+ NET_IPV4_VS_AMEMTHRESH=2,
+ NET_IPV4_VS_AMDROPRATE=3,
+ NET_IPV4_VS_DROP_ENTRY=4,
+ NET_IPV4_VS_DROP_PACKET=5,
+ NET_IPV4_VS_SECURE_TCP=6,
+ NET_IPV4_VS_TO_ES=7,
+ NET_IPV4_VS_TO_SS=8,
+ NET_IPV4_VS_TO_SR=9,
+ NET_IPV4_VS_TO_FW=10,
+ NET_IPV4_VS_TO_TW=11,
+ NET_IPV4_VS_TO_CL=12,
+ NET_IPV4_VS_TO_CW=13,
+ NET_IPV4_VS_TO_LA=14,
+ NET_IPV4_VS_TO_LI=15,
+ NET_IPV4_VS_TO_SA=16,
+ NET_IPV4_VS_TO_UDP=17,
+ NET_IPV4_VS_TO_ICMP=18,
+ NET_IPV4_VS_LBLC_EXPIRE=19,
+ NET_IPV4_VS_LBLCR_EXPIRE=20,
+ NET_IPV4_VS_CACHE_BYPASS=22,
+ NET_IPV4_VS_EXPIRE_NODEST_CONN=23,
+ NET_IPV4_VS_SYNC_THRESHOLD=24,
+ NET_IPV4_VS_NAT_ICMP_SEND=25,
+ NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE=26,
+ NET_IPV4_VS_LAST
+};
+
+/*
+ * TCP State Values
+ */
+enum {
+ IP_VS_TCP_S_NONE = 0,
+ IP_VS_TCP_S_ESTABLISHED,
+ IP_VS_TCP_S_SYN_SENT,
+ IP_VS_TCP_S_SYN_RECV,
+ IP_VS_TCP_S_FIN_WAIT,
+ IP_VS_TCP_S_TIME_WAIT,
+ IP_VS_TCP_S_CLOSE,
+ IP_VS_TCP_S_CLOSE_WAIT,
+ IP_VS_TCP_S_LAST_ACK,
+ IP_VS_TCP_S_LISTEN,
+ IP_VS_TCP_S_SYNACK,
+ IP_VS_TCP_S_LAST
+};
+
+/*
+ * UDP State Values
+ */
+enum {
+ IP_VS_UDP_S_NORMAL,
+ IP_VS_UDP_S_LAST,
+};
+
+/*
+ * ICMP State Values
+ */
+enum {
+ IP_VS_ICMP_S_NORMAL,
+ IP_VS_ICMP_S_LAST,
+};
+
+/*
+ * Delta sequence info structure
+ * Each ip_vs_conn has 2 (output AND input seq. changes).
+ * Only used in the VS/NAT.
+ */
+struct ip_vs_seq {
+ __u32 init_seq; /* Add delta from this seq */
+ __u32 delta; /* Delta in sequence numbers */
+ __u32 previous_delta; /* Delta in sequence numbers
+ before last resized pkt */
+};
+
+
+/*
+ * IPVS statistics object
+ */
+struct ip_vs_stats
+{
+ __u32 conns; /* connections scheduled */
+ __u32 inpkts; /* incoming packets */
+ __u32 outpkts; /* outgoing packets */
+ __u64 inbytes; /* incoming bytes */
+ __u64 outbytes; /* outgoing bytes */
+
+ __u32 cps; /* current connection rate */
+ __u32 inpps; /* current in packet rate */
+ __u32 outpps; /* current out packet rate */
+ __u32 inbps; /* current in byte rate */
+ __u32 outbps; /* current out byte rate */
+
+ spinlock_t lock; /* spin lock */
+};
+
+struct ip_vs_conn;
+struct ip_vs_app;
+
+struct ip_vs_protocol {
+ struct ip_vs_protocol *next;
+ char *name;
+ __u16 protocol;
+ int dont_defrag;
+ atomic_t appcnt; /* counter of proto app incs */
+ int *timeout_table; /* protocol timeout table */
+
+ void (*init)(struct ip_vs_protocol *pp);
+
+ void (*exit)(struct ip_vs_protocol *pp);
+
+ int (*conn_schedule)(struct sk_buff *skb,
+ struct ip_vs_protocol *pp,
+ int *verdict, struct ip_vs_conn **cpp);
+
+ struct ip_vs_conn *
+ (*conn_in_get)(const struct sk_buff *skb,
+ struct ip_vs_protocol *pp,
+ const struct iphdr *iph,
+ unsigned int proto_off,
+ int inverse);
+
+ struct ip_vs_conn *
+ (*conn_out_get)(const struct sk_buff *skb,
+ struct ip_vs_protocol *pp,
+ const struct iphdr *iph,
+ unsigned int proto_off,
+ int inverse);
+
+ int (*snat_handler)(struct sk_buff **pskb,
+ struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
+
+ int (*dnat_handler)(struct sk_buff **pskb,
+ struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
+
+ int (*csum_check)(struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+ const char *(*state_name)(int state);
+
+ int (*state_transition)(struct ip_vs_conn *cp, int direction,
+ const struct sk_buff *skb,
+ struct ip_vs_protocol *pp);
+
+ int (*register_app)(struct ip_vs_app *inc);
+
+ void (*unregister_app)(struct ip_vs_app *inc);
+
+ int (*app_conn_bind)(struct ip_vs_conn *cp);
+
+ void (*debug_packet)(struct ip_vs_protocol *pp,
+ const struct sk_buff *skb,
+ int offset,
+ const char *msg);
+
+ void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+
+ int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+};
+
+extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+
+/*
+ * IP_VS structure allocated for each dynamically scheduled connection
+ */
+struct ip_vs_conn {
+ struct list_head c_list; /* hashed list heads */
+
+ /* Protocol, addresses and port numbers */
+ __u32 caddr; /* client address */
+ __u32 vaddr; /* virtual address */
+ __u32 daddr; /* destination address */
+ __u16 cport;
+ __u16 vport;
+ __u16 dport;
+ __u16 protocol; /* Which protocol (TCP/UDP) */
+
+ /* counter and timer */
+ atomic_t refcnt; /* reference count */
+ struct timer_list timer; /* Expiration timer */
+ volatile unsigned long timeout; /* timeout */
+
+ /* Flags and state transition */
+ spinlock_t lock; /* lock for state transition */
+ volatile __u16 flags; /* status flags */
+ volatile __u16 state; /* state info */
+
+ /* Control members */
+ struct ip_vs_conn *control; /* Master control connection */
+ atomic_t n_control; /* Number of controlled ones */
+ struct ip_vs_dest *dest; /* real server */
+ atomic_t in_pkts; /* incoming packet counter */
+
+ /* packet transmitter for different forwarding methods. If it
+ mangles the packet, it must return NF_DROP or better NF_STOLEN,
+ otherwise this must be changed to a sk_buff **.
+ */
+ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp);
+
+ /* Note: we can group the following members into a structure,
+ in order to save more space, and the following members are
+ only used in VS/NAT anyway */
+ struct ip_vs_app *app; /* bound ip_vs_app object */
+ void *app_data; /* Application private data */
+ struct ip_vs_seq in_seq; /* incoming seq. struct */
+ struct ip_vs_seq out_seq; /* outgoing seq. struct */
+};
+
+
+/*
+ * The information about the virtual service offered to the net
+ * and the forwarding entries
+ */
+struct ip_vs_service {
+ struct list_head s_list; /* for normal service table */
+ struct list_head f_list; /* for fwmark-based service table */
+ atomic_t refcnt; /* reference counter */
+ atomic_t usecnt; /* use counter */
+
+ __u16 protocol; /* which protocol (TCP/UDP) */
+ __u32 addr; /* IP address for virtual service */
+ __u16 port; /* port number for the service */
+ __u32 fwmark; /* firewall mark of the service */
+ unsigned flags; /* service status flags */
+ unsigned timeout; /* persistent timeout in ticks */
+ __u32 netmask; /* grouping granularity */
+
+ struct list_head destinations; /* real server d-linked list */
+ __u32 num_dests; /* number of servers */
+ struct ip_vs_stats stats; /* statistics for the service */
+ struct ip_vs_app *inc; /* bind conns to this app inc */
+
+ /* for scheduling */
+ struct ip_vs_scheduler *scheduler; /* bound scheduler object */
+ rwlock_t sched_lock; /* lock sched_data */
+ void *sched_data; /* scheduler application data */
+};
+
+
+/*
+ * The real server destination forwarding entry
+ * with ip address, port number, and so on.
+ */
+struct ip_vs_dest {
+ struct list_head n_list; /* for the dests in the service */
+ struct list_head d_list; /* for table with all the dests */
+
+ __u32 addr; /* IP address of the server */
+ __u16 port; /* port number of the server */
+ volatile unsigned flags; /* dest status flags */
+ atomic_t conn_flags; /* flags to copy to conn */
+ atomic_t weight; /* server weight */
+
+ atomic_t refcnt; /* reference counter */
+ struct ip_vs_stats stats; /* statistics */
+
+ /* connection counters and thresholds */
+ atomic_t activeconns; /* active connections */
+ atomic_t inactconns; /* inactive connections */
+ atomic_t persistconns; /* persistent connections */
+ __u32 u_threshold; /* upper threshold */
+ __u32 l_threshold; /* lower threshold */
+
+ /* for destination cache */
+ spinlock_t dst_lock; /* lock of dst_cache */
+ struct dst_entry *dst_cache; /* destination cache entry */
+ u32 dst_rtos; /* RT_TOS(tos) for dst */
+
+ /* for virtual service */
+ struct ip_vs_service *svc; /* service it belongs to */
+ __u16 protocol; /* which protocol (TCP/UDP) */
+ __u32 vaddr; /* virtual IP address */
+ __u16 vport; /* virtual port number */
+ __u32 vfwmark; /* firewall mark of service */
+};
+
+
+/*
+ * The scheduler object
+ */
+struct ip_vs_scheduler {
+ struct list_head n_list; /* d-linked list head */
+ char *name; /* scheduler name */
+ atomic_t refcnt; /* reference counter */
+ struct module *module; /* THIS_MODULE/NULL */
+
+ /* scheduler initializing service */
+ int (*init_service)(struct ip_vs_service *svc);
+ /* scheduling service finish */
+ int (*done_service)(struct ip_vs_service *svc);
+ /* scheduler updating service */
+ int (*update_service)(struct ip_vs_service *svc);
+
+ /* selecting a server from the given service */
+ struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc,
+ const struct sk_buff *skb);
+};
+
+
+/*
+ * The application module object (a.k.a. app incarnation)
+ */
+struct ip_vs_app
+{
+ struct list_head a_list; /* member in app list */
+ int type; /* IP_VS_APP_TYPE_xxx */
+ char *name; /* application module name */
+ __u16 protocol;
+ struct module *module; /* THIS_MODULE/NULL */
+ struct list_head incs_list; /* list of incarnations */
+
+ /* members for application incarnations */
+ struct list_head p_list; /* member in proto app list */
+ struct ip_vs_app *app; /* its real application */
+ __u16 port; /* port number in net order */
+ atomic_t usecnt; /* usage counter */
+
+ /* output hook: return false if can't linearize. diff set for TCP. */
+ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
+ struct sk_buff **, int *diff);
+
+ /* input hook: return false if can't linearize. diff set for TCP. */
+ int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *,
+ struct sk_buff **, int *diff);
+
+ /* ip_vs_app initializer */
+ int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+
+ /* ip_vs_app finish */
+ int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+
+
+ /* not used now */
+ int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *,
+ struct ip_vs_protocol *);
+
+ void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+
+ int * timeout_table;
+ int * timeouts;
+ int timeouts_size;
+
+ int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app,
+ int *verdict, struct ip_vs_conn **cpp);
+
+ struct ip_vs_conn *
+ (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
+ const struct iphdr *iph, unsigned int proto_off,
+ int inverse);
+
+ struct ip_vs_conn *
+ (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
+ const struct iphdr *iph, unsigned int proto_off,
+ int inverse);
+
+ int (*state_transition)(struct ip_vs_conn *cp, int direction,
+ const struct sk_buff *skb,
+ struct ip_vs_app *app);
+
+ void (*timeout_change)(struct ip_vs_app *app, int flags);
+};
+
+
+/*
+ * IPVS core functions
+ * (from ip_vs_core.c)
+ */
+extern const char *ip_vs_proto_name(unsigned proto);
+extern void ip_vs_init_hash_table(struct list_head *table, int rows);
+#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0]))
+
+#define IP_VS_APP_TYPE_UNSPEC 0
+#define IP_VS_APP_TYPE_FTP 1
+
+/*
+ * ip_vs_conn handling functions
+ * (from ip_vs_conn.c)
+ */
+
+/*
+ * IPVS connection entry hash table
+ */
+#ifndef CONFIG_IP_VS_TAB_BITS
+#define CONFIG_IP_VS_TAB_BITS 12
+#endif
+/* make sure that IP_VS_CONN_TAB_BITS is located in [8, 20] */
+#if CONFIG_IP_VS_TAB_BITS < 8
+#define IP_VS_CONN_TAB_BITS 8
+#endif
+#if CONFIG_IP_VS_TAB_BITS > 20
+#define IP_VS_CONN_TAB_BITS 20
+#endif
+#if 8 <= CONFIG_IP_VS_TAB_BITS && CONFIG_IP_VS_TAB_BITS <= 20
+#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS
+#endif
+#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS)
+#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1)
+
+enum {
+ IP_VS_DIR_INPUT = 0,
+ IP_VS_DIR_OUTPUT,
+ IP_VS_DIR_INPUT_ONLY,
+ IP_VS_DIR_LAST,
+};
+
+extern struct ip_vs_conn *ip_vs_conn_in_get
+(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
+extern struct ip_vs_conn *ip_vs_conn_out_get
+(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
+
+/* put back the conn without restarting its timer */
+static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
+{
+ atomic_dec(&cp->refcnt);
+}
+extern void ip_vs_conn_put(struct ip_vs_conn *cp);
+extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __u16 cport);
+
+extern struct ip_vs_conn *
+ip_vs_conn_new(int proto, __u32 caddr, __u16 cport, __u32 vaddr, __u16 vport,
+ __u32 daddr, __u16 dport, unsigned flags,
+ struct ip_vs_dest *dest);
+extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+
+extern const char * ip_vs_state_name(__u16 proto, int state);
+
+extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern int ip_vs_check_template(struct ip_vs_conn *ct);
+extern void ip_vs_secure_tcp_set(int on);
+extern void ip_vs_random_dropentry(void);
+extern int ip_vs_conn_init(void);
+extern void ip_vs_conn_cleanup(void);
+
+static inline void ip_vs_control_del(struct ip_vs_conn *cp)
+{
+ struct ip_vs_conn *ctl_cp = cp->control;
+ if (!ctl_cp) {
+ IP_VS_ERR("request control DEL for uncontrolled: "
+ "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n",
+ NIPQUAD(cp->caddr),ntohs(cp->cport),
+ NIPQUAD(cp->vaddr),ntohs(cp->vport));
+ return;
+ }
+
+ IP_VS_DBG(7, "DELeting control for: "
+ "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n",
+ NIPQUAD(cp->caddr),ntohs(cp->cport),
+ NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport));
+
+ cp->control = NULL;
+ if (atomic_read(&ctl_cp->n_control) == 0) {
+ IP_VS_ERR("BUG control DEL with n=0 : "
+ "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n",
+ NIPQUAD(cp->caddr),ntohs(cp->cport),
+ NIPQUAD(cp->vaddr),ntohs(cp->vport));
+ return;
+ }
+ atomic_dec(&ctl_cp->n_control);
+}
+
+static inline void
+ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
+{
+ if (cp->control) {
+ IP_VS_ERR("request control ADD for already controlled: "
+ "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n",
+ NIPQUAD(cp->caddr),ntohs(cp->cport),
+ NIPQUAD(cp->vaddr),ntohs(cp->vport));
+ ip_vs_control_del(cp);
+ }
+
+ IP_VS_DBG(7, "ADDing control for: "
+ "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n",
+ NIPQUAD(cp->caddr),ntohs(cp->cport),
+ NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport));
+
+ cp->control = ctl_cp;
+ atomic_inc(&ctl_cp->n_control);
+}
+
+
+/*
+ * IPVS application functions
+ * (from ip_vs_app.c)
+ */
+#define IP_VS_APP_MAX_PORTS 8
+extern int register_ip_vs_app(struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
+extern int
+register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
+extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
+
+extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb);
+extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb);
+extern int ip_vs_skb_replace(struct sk_buff *skb, int pri,
+ char *o_buf, int o_len, char *n_buf, int n_len);
+extern int ip_vs_app_init(void);
+extern void ip_vs_app_cleanup(void);
+
+
+/*
+ * IPVS protocol functions (from ip_vs_proto.c)
+ */
+extern int ip_vs_protocol_init(void);
+extern void ip_vs_protocol_cleanup(void);
+extern void ip_vs_protocol_timeout_change(int flags);
+extern int *ip_vs_create_timeout_table(int *table, int size);
+extern int
+ip_vs_set_state_timeout(int *table, int num, char **names, char *name, int to);
+extern void
+ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
+ int offset, const char *msg);
+
+extern struct ip_vs_protocol ip_vs_protocol_tcp;
+extern struct ip_vs_protocol ip_vs_protocol_udp;
+extern struct ip_vs_protocol ip_vs_protocol_icmp;
+extern struct ip_vs_protocol ip_vs_protocol_esp;
+extern struct ip_vs_protocol ip_vs_protocol_ah;
+
+
+/*
+ * Registering/unregistering scheduler functions
+ * (from ip_vs_sched.c)
+ */
+extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *scheduler);
+extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc);
+extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+extern struct ip_vs_conn *
+ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb);
+extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+ struct ip_vs_protocol *pp);
+
+
+/*
+ * IPVS control data and functions (from ip_vs_ctl.c)
+ */
+extern int sysctl_ip_vs_cache_bypass;
+extern int sysctl_ip_vs_expire_nodest_conn;
+extern int sysctl_ip_vs_expire_quiescent_template;
+extern int sysctl_ip_vs_sync_threshold[2];
+extern int sysctl_ip_vs_nat_icmp_send;
+extern struct ip_vs_stats ip_vs_stats;
+
+extern struct ip_vs_service *
+ip_vs_service_get(__u32 fwmark, __u16 protocol, __u32 vaddr, __u16 vport);
+
+static inline void ip_vs_service_put(struct ip_vs_service *svc)
+{
+ atomic_dec(&svc->usecnt);
+}
+
+extern struct ip_vs_dest *
+ip_vs_lookup_real_service(__u16 protocol, __u32 daddr, __u16 dport);
+extern int ip_vs_use_count_inc(void);
+extern void ip_vs_use_count_dec(void);
+extern int ip_vs_control_init(void);
+extern void ip_vs_control_cleanup(void);
+
+
+/*
+ * IPVS sync daemon data and function prototypes
+ * (from ip_vs_sync.c)
+ */
+extern volatile int ip_vs_sync_state;
+extern volatile int ip_vs_master_syncid;
+extern volatile int ip_vs_backup_syncid;
+extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
+extern int stop_sync_thread(int state);
+extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+
+
+/*
+ * IPVS rate estimator prototypes (from ip_vs_est.c)
+ */
+extern int ip_vs_new_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+
+/*
+ * Various IPVS packet transmitters (from ip_vs_xmit.c)
+ */
+extern int ip_vs_null_xmit
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_bypass_xmit
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_nat_xmit
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_tunnel_xmit
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_dr_xmit
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_icmp_xmit
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset);
+extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
+
+
+/*
+ * This is a simple mechanism to ignore packets when
+ * we are loaded. Just set ip_vs_drop_rate to 'n' and
+ * we start to drop 1/rate of the packets
+ */
+extern int ip_vs_drop_rate;
+extern int ip_vs_drop_counter;
+
+static __inline__ int ip_vs_todrop(void)
+{
+ if (!ip_vs_drop_rate) return 0;
+ if (--ip_vs_drop_counter > 0) return 0;
+ ip_vs_drop_counter = ip_vs_drop_rate;
+ return 1;
+}
+
+/*
+ * ip_vs_fwd_tag returns the forwarding tag of the connection
+ */
+#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
+
+extern __inline__ char ip_vs_fwd_tag(struct ip_vs_conn *cp)
+{
+ char fwd;
+
+ switch (IP_VS_FWD_METHOD(cp)) {
+ case IP_VS_CONN_F_MASQ:
+ fwd = 'M'; break;
+ case IP_VS_CONN_F_LOCALNODE:
+ fwd = 'L'; break;
+ case IP_VS_CONN_F_TUNNEL:
+ fwd = 'T'; break;
+ case IP_VS_CONN_F_DROUTE:
+ fwd = 'R'; break;
+ case IP_VS_CONN_F_BYPASS:
+ fwd = 'B'; break;
+ default:
+ fwd = '?'; break;
+ }
+ return fwd;
+}
+
+extern int ip_vs_make_skb_writable(struct sk_buff **pskb, int len);
+extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
+
+extern u16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+
+static inline u16 ip_vs_check_diff(u32 old, u32 new, u16 oldsum)
+{
+ u32 diff[2] = { old, new };
+
+ return csum_fold(csum_partial((char *) diff, sizeof(diff),
+ oldsum ^ 0xFFFF));
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_VS_H */
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
new file mode 100644
index 000000000000..e651a57ecdd5
--- /dev/null
+++ b/include/net/ipcomp.h
@@ -0,0 +1,11 @@
+#ifndef _NET_IPCOMP_H
+#define _NET_IPCOMP_H
+
+#define IPCOMP_SCRATCH_SIZE 65400
+
+struct ipcomp_data {
+ u16 threshold;
+ struct crypto_tfm **tfms;
+};
+
+#endif
diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h
new file mode 100644
index 000000000000..2a1fe996fbc6
--- /dev/null
+++ b/include/net/ipconfig.h
@@ -0,0 +1,27 @@
+/*
+ * $Id: ipconfig.h,v 1.4 2001/04/30 04:51:46 davem Exp $
+ *
+ * Copyright (C) 1997 Martin Mares
+ *
+ * Automatic IP Layer Configuration
+ */
+
+/* The following are initdata: */
+
+extern int ic_proto_enabled; /* Protocols enabled (see IC_xxx) */
+extern int ic_set_manually; /* IPconfig parameters set manually */
+
+extern u32 ic_myaddr; /* My IP address */
+extern u32 ic_gateway; /* Gateway IP address */
+
+extern u32 ic_servaddr; /* Boot server IP address */
+
+extern u32 root_server_addr; /* Address of NFS server */
+extern u8 root_server_path[]; /* Path to mount as root */
+
+
+/* bits in ic_proto_{enabled,used} */
+#define IC_PROTO 0xFF /* Protocols mask: */
+#define IC_BOOTP 0x01 /* BOOTP (or DHCP, see below) */
+#define IC_RARP 0x02 /* RARP */
+#define IC_USE_DHCP 0x100 /* If on, use DHCP instead of BOOTP */
diff --git a/include/net/ipip.h b/include/net/ipip.h
new file mode 100644
index 000000000000..f490c3cbe377
--- /dev/null
+++ b/include/net/ipip.h
@@ -0,0 +1,51 @@
+#ifndef __NET_IPIP_H
+#define __NET_IPIP_H 1
+
+#include <linux/if_tunnel.h>
+
+/* Keep error state on tunnel for 30 sec */
+#define IPTUNNEL_ERR_TIMEO (30*HZ)
+
+struct ip_tunnel
+{
+ struct ip_tunnel *next;
+ struct net_device *dev;
+ struct net_device_stats stat;
+
+ int recursion; /* Depth of hard_start_xmit recursion */
+ int err_count; /* Number of arrived ICMP errors */
+ unsigned long err_time; /* Time when the last ICMP error arrived */
+
+ /* These four fields used only by GRE */
+ __u32 i_seqno; /* The last seen seqno */
+ __u32 o_seqno; /* The last output seqno */
+ int hlen; /* Precalculated GRE header length */
+ int mlink;
+
+ struct ip_tunnel_parm parms;
+};
+
+#define IPTUNNEL_XMIT() do { \
+ int err; \
+ int pkt_len = skb->len; \
+ \
+ skb->ip_summed = CHECKSUM_NONE; \
+ iph->tot_len = htons(skb->len); \
+ ip_select_ident(iph, &rt->u.dst, NULL); \
+ ip_send_check(iph); \
+ \
+ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);\
+ if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { \
+ stats->tx_bytes += pkt_len; \
+ stats->tx_packets++; \
+ } else { \
+ stats->tx_errors++; \
+ stats->tx_aborted_errors++; \
+ } \
+} while (0)
+
+
+extern int sit_init(void);
+extern void sit_cleanup(void);
+
+#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
new file mode 100644
index 000000000000..87c45cbfbaf6
--- /dev/null
+++ b/include/net/ipv6.h
@@ -0,0 +1,472 @@
+/*
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ *
+ * $Id: ipv6.h,v 1.1 2002/05/20 15:13:07 jgrimm Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_IPV6_H
+#define _NET_IPV6_H
+
+#include <linux/ipv6.h>
+#include <linux/hardirq.h>
+#include <net/ndisc.h>
+#include <net/flow.h>
+#include <net/snmp.h>
+
+#define SIN6_LEN_RFC2133 24
+
+#define IPV6_MAXPLEN 65535
+
+/*
+ * NextHeader field of IPv6 header
+ */
+
+#define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
+#define NEXTHDR_TCP 6 /* TCP segment. */
+#define NEXTHDR_UDP 17 /* UDP message. */
+#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
+#define NEXTHDR_ROUTING 43 /* Routing header. */
+#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
+#define NEXTHDR_ESP 50 /* Encapsulating security payload. */
+#define NEXTHDR_AUTH 51 /* Authentication header. */
+#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
+#define NEXTHDR_NONE 59 /* No next header */
+#define NEXTHDR_DEST 60 /* Destination options header. */
+
+#define NEXTHDR_MAX 255
+
+
+
+#define IPV6_DEFAULT_HOPLIMIT 64
+#define IPV6_DEFAULT_MCASTHOPS 1
+
+/*
+ * Addr type
+ *
+ * type - unicast | multicast
+ * scope - local | site | global
+ * v4 - compat
+ * v4mapped
+ * any
+ * loopback
+ */
+
+#define IPV6_ADDR_ANY 0x0000U
+
+#define IPV6_ADDR_UNICAST 0x0001U
+#define IPV6_ADDR_MULTICAST 0x0002U
+
+#define IPV6_ADDR_LOOPBACK 0x0010U
+#define IPV6_ADDR_LINKLOCAL 0x0020U
+#define IPV6_ADDR_SITELOCAL 0x0040U
+
+#define IPV6_ADDR_COMPATv4 0x0080U
+
+#define IPV6_ADDR_SCOPE_MASK 0x00f0U
+
+#define IPV6_ADDR_MAPPED 0x1000U
+#define IPV6_ADDR_RESERVED 0x2000U /* reserved address space */
+
+/*
+ * Addr scopes
+ */
+#ifdef __KERNEL__
+#define IPV6_ADDR_MC_SCOPE(a) \
+ ((a)->s6_addr[1] & 0x0f) /* nonstandard */
+#define __IPV6_ADDR_SCOPE_INVALID -1
+#endif
+#define IPV6_ADDR_SCOPE_NODELOCAL 0x01
+#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
+#define IPV6_ADDR_SCOPE_SITELOCAL 0x05
+#define IPV6_ADDR_SCOPE_ORGLOCAL 0x08
+#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
+
+/*
+ * fragmentation header
+ */
+
+struct frag_hdr {
+ unsigned char nexthdr;
+ unsigned char reserved;
+ unsigned short frag_off;
+ __u32 identification;
+};
+
+#define IP6_MF 0x0001
+
+#ifdef __KERNEL__
+
+#include <net/sock.h>
+
+/* sysctls */
+extern int sysctl_ipv6_bindv6only;
+extern int sysctl_mld_max_msf;
+
+/* MIBs */
+DECLARE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
+#define IP6_INC_STATS(field) SNMP_INC_STATS(ipv6_statistics, field)
+#define IP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(ipv6_statistics, field)
+#define IP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(ipv6_statistics, field)
+DECLARE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
+#define ICMP6_INC_STATS(idev, field) ({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS(idev->stats.icmpv6, field); \
+ SNMP_INC_STATS(icmpv6_statistics, field); \
+})
+#define ICMP6_INC_STATS_BH(idev, field) ({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_BH((_idev)->stats.icmpv6, field); \
+ SNMP_INC_STATS_BH(icmpv6_statistics, field); \
+})
+#define ICMP6_INC_STATS_USER(idev, field) ({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_USER(_idev->stats.icmpv6, field); \
+ SNMP_INC_STATS_USER(icmpv6_statistics, field); \
+})
+#define ICMP6_INC_STATS_OFFSET_BH(idev, field, offset) ({ \
+ struct inet6_dev *_idev = idev; \
+ __typeof__(offset) _offset = (offset); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_OFFSET_BH(_idev->stats.icmpv6, field, _offset); \
+ SNMP_INC_STATS_OFFSET_BH(icmpv6_statistics, field, _offset); \
+})
+DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6);
+#define UDP6_INC_STATS(field) SNMP_INC_STATS(udp_stats_in6, field)
+#define UDP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_stats_in6, field)
+#define UDP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_stats_in6, field)
+extern atomic_t inet6_sock_nr;
+
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
+int snmp6_alloc_dev(struct inet6_dev *idev);
+int snmp6_free_dev(struct inet6_dev *idev);
+int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign);
+void snmp6_mib_free(void *ptr[2]);
+
+struct ip6_ra_chain
+{
+ struct ip6_ra_chain *next;
+ struct sock *sk;
+ int sel;
+ void (*destructor)(struct sock *);
+};
+
+extern struct ip6_ra_chain *ip6_ra_chain;
+extern rwlock_t ip6_ra_lock;
+
+/*
+ This structure is prepared by protocol, when parsing
+ ancillary data and passed to IPv6.
+ */
+
+struct ipv6_txoptions
+{
+ /* Length of this structure */
+ int tot_len;
+
+ /* length of extension headers */
+
+ __u16 opt_flen; /* after fragment hdr */
+ __u16 opt_nflen; /* before fragment hdr */
+
+ struct ipv6_opt_hdr *hopopt;
+ struct ipv6_opt_hdr *dst0opt;
+ struct ipv6_rt_hdr *srcrt; /* Routing Header */
+ struct ipv6_opt_hdr *auth;
+ struct ipv6_opt_hdr *dst1opt;
+
+ /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
+};
+
+struct ip6_flowlabel
+{
+ struct ip6_flowlabel *next;
+ u32 label;
+ struct in6_addr dst;
+ struct ipv6_txoptions *opt;
+ atomic_t users;
+ unsigned long linger;
+ u8 share;
+ u32 owner;
+ unsigned long lastuse;
+ unsigned long expires;
+};
+
+#define IPV6_FLOWINFO_MASK __constant_htonl(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK __constant_htonl(0x000FFFFF)
+
+struct ipv6_fl_socklist
+{
+ struct ipv6_fl_socklist *next;
+ struct ip6_flowlabel *fl;
+};
+
+extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, u32 label);
+extern struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
+ struct ip6_flowlabel * fl,
+ struct ipv6_txoptions * fopt);
+extern void fl6_free_socklist(struct sock *sk);
+extern int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+extern void ip6_flowlabel_init(void);
+extern void ip6_flowlabel_cleanup(void);
+
+static inline void fl6_sock_release(struct ip6_flowlabel *fl)
+{
+ if (fl)
+ atomic_dec(&fl->users);
+}
+
+extern int ip6_ra_control(struct sock *sk, int sel,
+ void (*destructor)(struct sock *));
+
+
+extern int ipv6_parse_hopopts(struct sk_buff *skb, int);
+
+extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
+
+extern int ip6_frag_nqueues;
+extern atomic_t ip6_frag_mem;
+
+#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */
+
+/*
+ * Function prototype for build_xmit
+ */
+
+typedef int (*inet_getfrag_t) (const void *data,
+ struct in6_addr *addr,
+ char *,
+ unsigned int, unsigned int);
+
+
+extern int ipv6_addr_type(const struct in6_addr *addr);
+
+static inline int ipv6_addr_scope(const struct in6_addr *addr)
+{
+ return ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK;
+}
+
+static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
+{
+ return memcmp((const void *) a1, (const void *) a2, sizeof(struct in6_addr));
+}
+
+static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
+{
+ memcpy((void *) a1, (const void *) a2, sizeof(struct in6_addr));
+}
+
+static inline void ipv6_addr_prefix(struct in6_addr *pfx,
+ const struct in6_addr *addr,
+ int plen)
+{
+ /* caller must guarantee 0 <= plen <= 128 */
+ int o = plen >> 3,
+ b = plen & 0x7;
+
+ memcpy(pfx->s6_addr, addr, o);
+ if (b != 0) {
+ pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
+ o++;
+ }
+ if (o < 16)
+ memset(pfx->s6_addr + o, 0, 16 - o);
+}
+
+#ifndef __HAVE_ARCH_ADDR_SET
+static inline void ipv6_addr_set(struct in6_addr *addr,
+ __u32 w1, __u32 w2,
+ __u32 w3, __u32 w4)
+{
+ addr->s6_addr32[0] = w1;
+ addr->s6_addr32[1] = w2;
+ addr->s6_addr32[2] = w3;
+ addr->s6_addr32[3] = w4;
+}
+#endif
+
+static inline int ipv6_addr_equal(const struct in6_addr *a1,
+ const struct in6_addr *a2)
+{
+ return (a1->s6_addr32[0] == a2->s6_addr32[0] &&
+ a1->s6_addr32[1] == a2->s6_addr32[1] &&
+ a1->s6_addr32[2] == a2->s6_addr32[2] &&
+ a1->s6_addr32[3] == a2->s6_addr32[3]);
+}
+
+static inline int __ipv6_prefix_equal(const u32 *a1, const u32 *a2,
+ unsigned int prefixlen)
+{
+ unsigned pdw, pbi;
+
+ /* check complete u32 in prefix */
+ pdw = prefixlen >> 5;
+ if (pdw && memcmp(a1, a2, pdw << 2))
+ return 0;
+
+ /* check incomplete u32 in prefix */
+ pbi = prefixlen & 0x1f;
+ if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
+ return 0;
+
+ return 1;
+}
+
+static inline int ipv6_prefix_equal(const struct in6_addr *a1,
+ const struct in6_addr *a2,
+ unsigned int prefixlen)
+{
+ return __ipv6_prefix_equal(a1->s6_addr32, a2->s6_addr32,
+ prefixlen);
+}
+
+static inline int ipv6_addr_any(const struct in6_addr *a)
+{
+ return ((a->s6_addr32[0] | a->s6_addr32[1] |
+ a->s6_addr32[2] | a->s6_addr32[3] ) == 0);
+}
+
+/*
+ * Prototypes exported by ipv6
+ */
+
+/*
+ * rcv function (called from netdevice level)
+ */
+
+extern int ipv6_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt);
+
+/*
+ * upper-layer output functions
+ */
+extern int ip6_xmit(struct sock *sk,
+ struct sk_buff *skb,
+ struct flowi *fl,
+ struct ipv6_txoptions *opt,
+ int ipfragok);
+
+extern int ip6_nd_hdr(struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ int proto, int len);
+
+extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+extern int ip6_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
+ void *from,
+ int length,
+ int transhdrlen,
+ int hlimit,
+ struct ipv6_txoptions *opt,
+ struct flowi *fl,
+ struct rt6_info *rt,
+ unsigned int flags);
+
+extern int ip6_push_pending_frames(struct sock *sk);
+
+extern void ip6_flush_pending_frames(struct sock *sk);
+
+extern int ip6_dst_lookup(struct sock *sk,
+ struct dst_entry **dst,
+ struct flowi *fl);
+
+/*
+ * skb processing functions
+ */
+
+extern int ip6_output(struct sk_buff *skb);
+extern int ip6_forward(struct sk_buff *skb);
+extern int ip6_input(struct sk_buff *skb);
+extern int ip6_mc_input(struct sk_buff *skb);
+
+/*
+ * Extension header (options) processing
+ */
+
+extern u8 * ipv6_build_nfrag_opts(struct sk_buff *skb,
+ u8 *prev_hdr,
+ struct ipv6_txoptions *opt,
+ struct in6_addr *daddr,
+ u32 jumbolen);
+extern u8 * ipv6_build_frag_opts(struct sk_buff *skb,
+ u8 *prev_hdr,
+ struct ipv6_txoptions *opt);
+extern void ipv6_push_nfrag_opts(struct sk_buff *skb,
+ struct ipv6_txoptions *opt,
+ u8 *proto,
+ struct in6_addr **daddr_p);
+extern void ipv6_push_frag_opts(struct sk_buff *skb,
+ struct ipv6_txoptions *opt,
+ u8 *proto);
+
+extern int ipv6_skip_exthdr(const struct sk_buff *, int start,
+ u8 *nexthdrp, int len);
+
+extern int ipv6_ext_hdr(u8 nexthdr);
+
+extern struct ipv6_txoptions * ipv6_invert_rthdr(struct sock *sk,
+ struct ipv6_rt_hdr *hdr);
+
+
+/*
+ * socket options (ipv6_sockglue.c)
+ */
+
+extern int ipv6_setsockopt(struct sock *sk, int level,
+ int optname,
+ char __user *optval,
+ int optlen);
+extern int ipv6_getsockopt(struct sock *sk, int level,
+ int optname,
+ char __user *optval,
+ int __user *optlen);
+
+extern void ipv6_packet_init(void);
+
+extern void ipv6_packet_cleanup(void);
+
+extern int ip6_datagram_connect(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
+extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, u16 port,
+ u32 info, u8 *payload);
+extern void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info);
+
+extern int inet6_release(struct socket *sock);
+extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len);
+extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer);
+extern int inet6_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+
+/*
+ * reassembly.c
+ */
+extern int sysctl_ip6frag_high_thresh;
+extern int sysctl_ip6frag_low_thresh;
+extern int sysctl_ip6frag_time;
+extern int sysctl_ip6frag_secret_interval;
+
+#endif /* __KERNEL__ */
+#endif /* _NET_IPV6_H */
+
+
+
diff --git a/include/net/ipx.h b/include/net/ipx.h
new file mode 100644
index 000000000000..5c0cf33826c5
--- /dev/null
+++ b/include/net/ipx.h
@@ -0,0 +1,161 @@
+#ifndef _NET_INET_IPX_H_
+#define _NET_INET_IPX_H_
+/*
+ * The following information is in its entirety obtained from:
+ *
+ * Novell 'IPX Router Specification' Version 1.10
+ * Part No. 107-000029-001
+ *
+ * Which is available from ftp.novell.com
+ */
+
+#include <linux/netdevice.h>
+#include <net/datalink.h>
+#include <linux/ipx.h>
+#include <linux/list.h>
+
+struct ipx_address {
+ __u32 net;
+ __u8 node[IPX_NODE_LEN];
+ __u16 sock;
+};
+
+#define ipx_broadcast_node "\377\377\377\377\377\377"
+#define ipx_this_node "\0\0\0\0\0\0"
+
+#define IPX_MAX_PPROP_HOPS 8
+
+struct ipxhdr {
+ __u16 ipx_checksum __attribute__ ((packed));
+#define IPX_NO_CHECKSUM 0xFFFF
+ __u16 ipx_pktsize __attribute__ ((packed));
+ __u8 ipx_tctrl;
+ __u8 ipx_type;
+#define IPX_TYPE_UNKNOWN 0x00
+#define IPX_TYPE_RIP 0x01 /* may also be 0 */
+#define IPX_TYPE_SAP 0x04 /* may also be 0 */
+#define IPX_TYPE_SPX 0x05 /* SPX protocol */
+#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
+#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */
+ struct ipx_address ipx_dest __attribute__ ((packed));
+ struct ipx_address ipx_source __attribute__ ((packed));
+};
+
+static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
+{
+ return (struct ipxhdr *)skb->h.raw;
+}
+
+struct ipx_interface {
+ /* IPX address */
+ __u32 if_netnum;
+ unsigned char if_node[IPX_NODE_LEN];
+ atomic_t refcnt;
+
+ /* physical device info */
+ struct net_device *if_dev;
+ struct datalink_proto *if_dlink;
+ unsigned short if_dlink_type;
+
+ /* socket support */
+ unsigned short if_sknum;
+ struct hlist_head if_sklist;
+ spinlock_t if_sklist_lock;
+
+ /* administrative overhead */
+ int if_ipx_offset;
+ unsigned char if_internal;
+ unsigned char if_primary;
+
+ struct list_head node; /* node in ipx_interfaces list */
+};
+
+struct ipx_route {
+ __u32 ir_net;
+ struct ipx_interface *ir_intrfc;
+ unsigned char ir_routed;
+ unsigned char ir_router_node[IPX_NODE_LEN];
+ struct list_head node; /* node in ipx_routes list */
+ atomic_t refcnt;
+};
+
+#ifdef __KERNEL__
+struct ipx_cb {
+ u8 ipx_tctrl;
+ u32 ipx_dest_net;
+ u32 ipx_source_net;
+ struct {
+ u32 netnum;
+ int index;
+ } last_hop;
+};
+
+#include <net/sock.h>
+
+struct ipx_sock {
+ /* struct sock has to be the first member of ipx_sock */
+ struct sock sk;
+ struct ipx_address dest_addr;
+ struct ipx_interface *intrfc;
+ unsigned short port;
+#ifdef CONFIG_IPX_INTERN
+ unsigned char node[IPX_NODE_LEN];
+#endif
+ unsigned short type;
+ /*
+ * To handle special ncp connection-handling sockets for mars_nwe,
+ * the connection number must be stored in the socket.
+ */
+ unsigned short ipx_ncp_conn;
+};
+
+static inline struct ipx_sock *ipx_sk(struct sock *sk)
+{
+ return (struct ipx_sock *)sk;
+}
+
+#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0]))
+#endif
+
+#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
+#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
+
+extern struct list_head ipx_routes;
+extern rwlock_t ipx_routes_lock;
+
+extern struct list_head ipx_interfaces;
+extern struct ipx_interface *ipx_interfaces_head(void);
+extern spinlock_t ipx_interfaces_lock;
+
+extern struct ipx_interface *ipx_primary_net;
+
+extern int ipx_proc_init(void);
+extern void ipx_proc_exit(void);
+
+extern const char *ipx_frame_name(unsigned short);
+extern const char *ipx_device_name(struct ipx_interface *intrfc);
+
+static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
+{
+ atomic_inc(&intrfc->refcnt);
+}
+
+extern void ipxitf_down(struct ipx_interface *intrfc);
+
+static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
+{
+ if (atomic_dec_and_test(&intrfc->refcnt))
+ ipxitf_down(intrfc);
+}
+
+static __inline__ void ipxrtr_hold(struct ipx_route *rt)
+{
+ atomic_inc(&rt->refcnt);
+}
+
+static __inline__ void ipxrtr_put(struct ipx_route *rt)
+{
+ if (atomic_dec_and_test(&rt->refcnt))
+ kfree(rt);
+}
+#endif /* _NET_INET_IPX_H_ */
diff --git a/include/net/irda/af_irda.h b/include/net/irda/af_irda.h
new file mode 100644
index 000000000000..7a209f61c482
--- /dev/null
+++ b/include/net/irda/af_irda.h
@@ -0,0 +1,87 @@
+/*********************************************************************
+ *
+ * Filename: af_irda.h
+ * Version: 1.0
+ * Description: IrDA sockets declarations
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:13:12 1997
+ * Modified at: Fri Jan 28 13:16:32 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef AF_IRDA_H
+#define AF_IRDA_H
+
+#include <linux/irda.h>
+#include <net/irda/irda.h>
+#include <net/irda/iriap.h> /* struct iriap_cb */
+#include <net/irda/irias_object.h> /* struct ias_value */
+#include <net/irda/irlmp.h> /* struct lsap_cb */
+#include <net/irda/irttp.h> /* struct tsap_cb */
+#include <net/irda/discovery.h> /* struct discovery_t */
+#include <net/sock.h>
+
+/* IrDA Socket */
+struct irda_sock {
+ /* struct sock has to be the first member of irda_sock */
+ struct sock sk;
+ __u32 saddr; /* my local address */
+ __u32 daddr; /* peer address */
+
+ struct lsap_cb *lsap; /* LSAP used by Ultra */
+ __u8 pid; /* Protocol IP (PID) used by Ultra */
+
+ struct tsap_cb *tsap; /* TSAP used by this connection */
+ __u8 dtsap_sel; /* remote TSAP address */
+ __u8 stsap_sel; /* local TSAP address */
+
+ __u32 max_sdu_size_rx;
+ __u32 max_sdu_size_tx;
+ __u32 max_data_size;
+ __u8 max_header_size;
+ struct qos_info qos_tx;
+
+ __u16_host_order mask; /* Hint bits mask */
+ __u16_host_order hints; /* Hint bits */
+
+ void *ckey; /* IrLMP client handle */
+ void *skey; /* IrLMP service handle */
+
+ struct ias_object *ias_obj; /* Our service name + lsap in IAS */
+ struct iriap_cb *iriap; /* Used to query remote IAS */
+ struct ias_value *ias_result; /* Result of remote IAS query */
+
+ hashbin_t *cachelog; /* Result of discovery query */
+ __u32 cachedaddr; /* Result of selective discovery query */
+
+ int nslots; /* Number of slots to use for discovery */
+
+ int errno; /* status of the IAS query */
+
+ wait_queue_head_t query_wait; /* Wait for the answer to a query */
+ struct timer_list watchdog; /* Timeout for discovery */
+
+ LOCAL_FLOW tx_flow;
+ LOCAL_FLOW rx_flow;
+};
+
+static inline struct irda_sock *irda_sk(struct sock *sk)
+{
+ return (struct irda_sock *)sk;
+}
+
+#endif /* AF_IRDA_H */
diff --git a/include/net/irda/crc.h b/include/net/irda/crc.h
new file mode 100644
index 000000000000..f202296df9bb
--- /dev/null
+++ b/include/net/irda/crc.h
@@ -0,0 +1,29 @@
+/*********************************************************************
+ *
+ * Filename: crc.h
+ * Version:
+ * Description: CRC routines
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Sun May 2 20:25:23 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ ********************************************************************/
+
+#ifndef IRDA_CRC_H
+#define IRDA_CRC_H
+
+#include <linux/types.h>
+#include <linux/crc-ccitt.h>
+
+#define INIT_FCS 0xffff /* Initial FCS value */
+#define GOOD_FCS 0xf0b8 /* Good final FCS value */
+
+/* Recompute the FCS with one more character appended. */
+#define irda_fcs(fcs, c) crc_ccitt_byte(fcs, c)
+
+/* Recompute the FCS with len bytes appended. */
+#define irda_calc_crc16(fcs, buf, len) crc_ccitt(fcs, buf, len)
+
+#endif
diff --git a/include/net/irda/discovery.h b/include/net/irda/discovery.h
new file mode 100644
index 000000000000..eb0f9de47294
--- /dev/null
+++ b/include/net/irda/discovery.h
@@ -0,0 +1,100 @@
+/*********************************************************************
+ *
+ * Filename: discovery.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Apr 6 16:53:53 1999
+ * Modified at: Tue Oct 5 10:05:10 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef DISCOVERY_H
+#define DISCOVERY_H
+
+#include <asm/param.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h> /* irda_queue_t */
+#include <net/irda/irlap_event.h> /* LAP_REASON */
+
+#define DISCOVERY_EXPIRE_TIMEOUT (2*sysctl_discovery_timeout*HZ)
+#define DISCOVERY_DEFAULT_SLOTS 0
+
+/*
+ * This type is used by the protocols that transmit 16 bits words in
+ * little endian format. A little endian machine stores MSB of word in
+ * byte[1] and LSB in byte[0]. A big endian machine stores MSB in byte[0]
+ * and LSB in byte[1].
+ *
+ * This structure is used in the code for things that are endian neutral
+ * but that fit in a word so that we can manipulate them efficiently.
+ * By endian neutral, I mean things that are really an array of bytes,
+ * and always used as such, for example the hint bits. Jean II
+ */
+typedef union {
+ __u16 word;
+ __u8 byte[2];
+} __u16_host_order;
+
+/* Same purpose, different application */
+#define u16ho(array) (* ((__u16 *) array))
+
+/* Types of discovery */
+typedef enum {
+ DISCOVERY_LOG, /* What's in our discovery log */
+ DISCOVERY_ACTIVE, /* Doing our own discovery on the medium */
+ DISCOVERY_PASSIVE, /* Peer doing discovery on the medium */
+ EXPIRY_TIMEOUT, /* Entry expired due to timeout */
+} DISCOVERY_MODE;
+
+#define NICKNAME_MAX_LEN 21
+
+/* Basic discovery information about a peer */
+typedef struct irda_device_info discinfo_t; /* linux/irda.h */
+
+/*
+ * The DISCOVERY structure is used for both discovery requests and responses
+ */
+typedef struct discovery_t {
+ irda_queue_t q; /* Must be first! */
+
+ discinfo_t data; /* Basic discovery information */
+ int name_len; /* Lenght of nickname */
+
+ LAP_REASON condition; /* More info about the discovery */
+ int gen_addr_bit; /* Need to generate a new device
+ * address? */
+ int nslots; /* Number of slots to use when
+ * discovering */
+ unsigned long timestamp; /* Last time discovered */
+ unsigned long firststamp; /* First time discovered */
+} discovery_t;
+
+void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *discovery);
+void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log);
+void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force);
+struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
+ __u16 mask, int old_entries);
+
+#endif
diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
new file mode 100644
index 000000000000..69b610acd2df
--- /dev/null
+++ b/include/net/irda/ircomm_core.h
@@ -0,0 +1,108 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_core.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 08:58:43 1999
+ * Modified at: Mon Dec 13 11:52:29 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_CORE_H
+#define IRCOMM_CORE_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h>
+#include <net/irda/ircomm_event.h>
+
+#define IRCOMM_MAGIC 0x98347298
+#define IRCOMM_HEADER_SIZE 1
+
+struct ircomm_cb; /* Forward decl. */
+
+/*
+ * A small call-table, so we don't have to check the service-type whenever
+ * we want to do something
+ */
+typedef struct {
+ int (*data_request)(struct ircomm_cb *, struct sk_buff *, int clen);
+ int (*connect_request)(struct ircomm_cb *, struct sk_buff *,
+ struct ircomm_info *);
+ int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
+ int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
+ struct ircomm_info *);
+} call_t;
+
+struct ircomm_cb {
+ irda_queue_t queue;
+ magic_t magic;
+
+ notify_t notify;
+ call_t issue;
+
+ int state;
+ int line; /* Which TTY line we are using */
+
+ struct tsap_cb *tsap;
+ struct lsap_cb *lsap;
+
+ __u8 dlsap_sel; /* Destination LSAP/TSAP selector */
+ __u8 slsap_sel; /* Source LSAP/TSAP selector */
+
+ __u32 saddr; /* Source device address (link we are using) */
+ __u32 daddr; /* Destination device address */
+
+ int max_header_size; /* Header space we must reserve for each frame */
+ int max_data_size; /* The amount of data we can fill in each frame */
+
+ LOCAL_FLOW flow_status; /* Used by ircomm_lmp */
+ int pkt_count; /* Number of frames we have sent to IrLAP */
+
+ __u8 service_type;
+};
+
+extern hashbin_t *ircomm;
+
+struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line);
+int ircomm_close(struct ircomm_cb *self);
+
+int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb);
+void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb);
+void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb);
+int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb);
+int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr, struct sk_buff *skb,
+ __u8 service_type);
+void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info);
+void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info);
+int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata);
+int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata);
+void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info);
+void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow);
+
+#define ircomm_is_connected(self) (self->state == IRCOMM_CONN)
+
+#endif
diff --git a/include/net/irda/ircomm_event.h b/include/net/irda/ircomm_event.h
new file mode 100644
index 000000000000..c290447872d1
--- /dev/null
+++ b/include/net/irda/ircomm_event.h
@@ -0,0 +1,85 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_event.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 23:51:13 1999
+ * Modified at: Thu Jun 10 08:36:25 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_EVENT_H
+#define IRCOMM_EVENT_H
+
+#include <net/irda/irmod.h>
+
+typedef enum {
+ IRCOMM_IDLE,
+ IRCOMM_WAITI,
+ IRCOMM_WAITR,
+ IRCOMM_CONN,
+} IRCOMM_STATE;
+
+/* IrCOMM Events */
+typedef enum {
+ IRCOMM_CONNECT_REQUEST,
+ IRCOMM_CONNECT_RESPONSE,
+ IRCOMM_TTP_CONNECT_INDICATION,
+ IRCOMM_LMP_CONNECT_INDICATION,
+ IRCOMM_TTP_CONNECT_CONFIRM,
+ IRCOMM_LMP_CONNECT_CONFIRM,
+
+ IRCOMM_LMP_DISCONNECT_INDICATION,
+ IRCOMM_TTP_DISCONNECT_INDICATION,
+ IRCOMM_DISCONNECT_REQUEST,
+
+ IRCOMM_TTP_DATA_INDICATION,
+ IRCOMM_LMP_DATA_INDICATION,
+ IRCOMM_DATA_REQUEST,
+ IRCOMM_CONTROL_REQUEST,
+ IRCOMM_CONTROL_INDICATION,
+} IRCOMM_EVENT;
+
+/*
+ * Used for passing information through the state-machine
+ */
+struct ircomm_info {
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ __u8 dlsap_sel;
+ LM_REASON reason; /* Reason for disconnect */
+ __u32 max_data_size;
+ __u32 max_header_size;
+
+ struct qos_info *qos;
+};
+
+extern char *ircomm_state[];
+
+struct ircomm_cb; /* Forward decl. */
+
+int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info);
+void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state);
+
+#endif
diff --git a/include/net/irda/ircomm_lmp.h b/include/net/irda/ircomm_lmp.h
new file mode 100644
index 000000000000..ae02106be590
--- /dev/null
+++ b/include/net/irda/ircomm_lmp.h
@@ -0,0 +1,38 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_lmp.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 10:06:07 1999
+ * Modified at: Fri Aug 13 07:32:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_LMP_H
+#define IRCOMM_LMP_H
+
+#include <net/irda/ircomm_core.h>
+
+int ircomm_open_lsap(struct ircomm_cb *self);
+
+#endif
diff --git a/include/net/irda/ircomm_param.h b/include/net/irda/ircomm_param.h
new file mode 100644
index 000000000000..e6678800c41f
--- /dev/null
+++ b/include/net/irda/ircomm_param.h
@@ -0,0 +1,149 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_param.h
+ * Version: 1.0
+ * Description: Parameter handling for the IrCOMM protocol
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Jun 7 08:47:28 1999
+ * Modified at: Wed Aug 25 13:46:33 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_PARAMS_H
+#define IRCOMM_PARAMS_H
+
+#include <net/irda/parameters.h>
+
+/* Parameters common to all service types */
+#define IRCOMM_SERVICE_TYPE 0x00
+#define IRCOMM_PORT_TYPE 0x01 /* Only used in LM-IAS */
+#define IRCOMM_PORT_NAME 0x02 /* Only used in LM-IAS */
+
+/* Parameters for both 3 wire and 9 wire */
+#define IRCOMM_DATA_RATE 0x10
+#define IRCOMM_DATA_FORMAT 0x11
+#define IRCOMM_FLOW_CONTROL 0x12
+#define IRCOMM_XON_XOFF 0x13
+#define IRCOMM_ENQ_ACK 0x14
+#define IRCOMM_LINE_STATUS 0x15
+#define IRCOMM_BREAK 0x16
+
+/* Parameters for 9 wire */
+#define IRCOMM_DTE 0x20
+#define IRCOMM_DCE 0x21
+#define IRCOMM_POLL 0x22
+
+/* Service type (details) */
+#define IRCOMM_3_WIRE_RAW 0x01
+#define IRCOMM_3_WIRE 0x02
+#define IRCOMM_9_WIRE 0x04
+#define IRCOMM_CENTRONICS 0x08
+
+/* Port type (details) */
+#define IRCOMM_SERIAL 0x00
+#define IRCOMM_PARALLEL 0x01
+
+/* Data format (details) */
+#define IRCOMM_WSIZE_5 0x00
+#define IRCOMM_WSIZE_6 0x01
+#define IRCOMM_WSIZE_7 0x02
+#define IRCOMM_WSIZE_8 0x03
+
+#define IRCOMM_1_STOP_BIT 0x00
+#define IRCOMM_2_STOP_BIT 0x04 /* 1.5 if char len 5 */
+
+#define IRCOMM_PARITY_DISABLE 0x00
+#define IRCOMM_PARITY_ENABLE 0x08
+
+#define IRCOMM_PARITY_ODD 0x00
+#define IRCOMM_PARITY_EVEN 0x10
+#define IRCOMM_PARITY_MARK 0x20
+#define IRCOMM_PARITY_SPACE 0x30
+
+/* Flow control */
+#define IRCOMM_XON_XOFF_IN 0x01
+#define IRCOMM_XON_XOFF_OUT 0x02
+#define IRCOMM_RTS_CTS_IN 0x04
+#define IRCOMM_RTS_CTS_OUT 0x08
+#define IRCOMM_DSR_DTR_IN 0x10
+#define IRCOMM_DSR_DTR_OUT 0x20
+#define IRCOMM_ENQ_ACK_IN 0x40
+#define IRCOMM_ENQ_ACK_OUT 0x80
+
+/* Line status */
+#define IRCOMM_OVERRUN_ERROR 0x02
+#define IRCOMM_PARITY_ERROR 0x04
+#define IRCOMM_FRAMING_ERROR 0x08
+
+/* DTE (Data terminal equipment) line settings */
+#define IRCOMM_DELTA_DTR 0x01
+#define IRCOMM_DELTA_RTS 0x02
+#define IRCOMM_DTR 0x04
+#define IRCOMM_RTS 0x08
+
+/* DCE (Data communications equipment) line settings */
+#define IRCOMM_DELTA_CTS 0x01 /* Clear to send has changed */
+#define IRCOMM_DELTA_DSR 0x02 /* Data set ready has changed */
+#define IRCOMM_DELTA_RI 0x04 /* Ring indicator has changed */
+#define IRCOMM_DELTA_CD 0x08 /* Carrier detect has changed */
+#define IRCOMM_CTS 0x10 /* Clear to send is high */
+#define IRCOMM_DSR 0x20 /* Data set ready is high */
+#define IRCOMM_RI 0x40 /* Ring indicator is high */
+#define IRCOMM_CD 0x80 /* Carrier detect is high */
+#define IRCOMM_DCE_DELTA_ANY 0x0f
+
+/*
+ * Parameter state
+ */
+struct ircomm_params {
+ /* General control params */
+ __u8 service_type;
+ __u8 port_type;
+ char port_name[32];
+
+ /* Control params for 3- and 9-wire service type */
+ __u32 data_rate; /* Data rate in bps */
+ __u8 data_format;
+ __u8 flow_control;
+ char xonxoff[2];
+ char enqack[2];
+ __u8 line_status;
+ __u8 _break;
+
+ __u8 null_modem;
+
+ /* Control params for 9-wire service type */
+ __u8 dte;
+ __u8 dce;
+ __u8 poll;
+
+ /* Control params for Centronics service type */
+};
+
+struct ircomm_tty_cb; /* Forward decl. */
+
+int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush);
+
+extern pi_param_info_t ircomm_param_info;
+
+#endif /* IRCOMM_PARAMS_H */
+
diff --git a/include/net/irda/ircomm_ttp.h b/include/net/irda/ircomm_ttp.h
new file mode 100644
index 000000000000..403081ed725c
--- /dev/null
+++ b/include/net/irda/ircomm_ttp.h
@@ -0,0 +1,39 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_ttp.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 10:06:07 1999
+ * Modified at: Fri Aug 13 07:32:22 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_TTP_H
+#define IRCOMM_TTP_H
+
+#include <net/irda/ircomm_core.h>
+
+int ircomm_open_tsap(struct ircomm_cb *self);
+
+#endif
+
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
new file mode 100644
index 000000000000..87699cb4ef8c
--- /dev/null
+++ b/include/net/irda/ircomm_tty.h
@@ -0,0 +1,139 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 23:24:22 1999
+ * Modified at: Fri Jan 28 13:16:57 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_TTY_H
+#define IRCOMM_TTY_H
+
+#include <linux/serial.h>
+#include <linux/termios.h>
+#include <linux/timer.h>
+#include <linux/tty.h> /* struct tty_struct */
+
+#include <net/irda/irias_object.h>
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_param.h>
+
+#define IRCOMM_TTY_PORTS 32
+#define IRCOMM_TTY_MAGIC 0x3432
+#define IRCOMM_TTY_MAJOR 161
+#define IRCOMM_TTY_MINOR 0
+
+/* This is used as an initial value to max_header_size before the proper
+ * value is filled in (5 for ttp, 4 for lmp). This allow us to detect
+ * the state of the underlying connection. - Jean II */
+#define IRCOMM_TTY_HDR_UNINITIALISED 16
+/* Same for payload size. See qos.c for the smallest max data size */
+#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
+
+/* Those are really defined in include/linux/serial.h - Jean II */
+#define ASYNC_B_INITIALIZED 31 /* Serial port was initialized */
+#define ASYNC_B_NORMAL_ACTIVE 29 /* Normal device is active */
+#define ASYNC_B_CLOSING 27 /* Serial port is closing */
+
+/*
+ * IrCOMM TTY driver state
+ */
+struct ircomm_tty_cb {
+ irda_queue_t queue; /* Must be first */
+ magic_t magic;
+
+ int state; /* Connect state */
+
+ struct tty_struct *tty;
+ struct ircomm_cb *ircomm; /* IrCOMM layer instance */
+
+ struct sk_buff *tx_skb; /* Transmit buffer */
+ struct sk_buff *ctrl_skb; /* Control data buffer */
+
+ /* Parameters */
+ struct ircomm_params settings;
+
+ __u8 service_type; /* The service that we support */
+ int client; /* True if we are a client */
+ LOCAL_FLOW flow; /* IrTTP flow status */
+
+ int line;
+ unsigned long flags;
+
+ __u8 dlsap_sel;
+ __u8 slsap_sel;
+
+ __u32 saddr;
+ __u32 daddr;
+
+ __u32 max_data_size; /* Max data we can transmit in one packet */
+ __u32 max_header_size; /* The amount of header space we must reserve */
+ __u32 tx_data_size; /* Max data size of current tx_skb */
+
+ struct iriap_cb *iriap; /* Instance used for querying remote IAS */
+ struct ias_object* obj;
+ void *skey;
+ void *ckey;
+
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+ struct timer_list watchdog_timer;
+ struct work_struct tqueue;
+
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+ int open_count;
+ int blocked_open; /* # of blocked opens */
+
+ /* Protect concurent access to :
+ * o self->open_count
+ * o self->ctrl_skb
+ * o self->tx_skb
+ * Maybe other things may gain to be protected as well...
+ * Jean II */
+ spinlock_t spinlock;
+};
+
+void ircomm_tty_start(struct tty_struct *tty);
+void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
+
+extern int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file);
+extern int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear);
+extern int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+extern void ircomm_tty_set_termios(struct tty_struct *tty,
+ struct termios *old_termios);
+extern hashbin_t *ircomm_tty;
+
+#endif
+
+
+
+
+
+
+
diff --git a/include/net/irda/ircomm_tty_attach.h b/include/net/irda/ircomm_tty_attach.h
new file mode 100644
index 000000000000..f91a5695aa44
--- /dev/null
+++ b/include/net/irda/ircomm_tty_attach.h
@@ -0,0 +1,94 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty_attach.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 15:55:18 1999
+ * Modified at: Fri Dec 10 21:04:55 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_TTY_ATTACH_H
+#define IRCOMM_TTY_ATTACH_H
+
+#include <net/irda/ircomm_tty.h>
+
+typedef enum {
+ IRCOMM_TTY_IDLE,
+ IRCOMM_TTY_SEARCH,
+ IRCOMM_TTY_QUERY_PARAMETERS,
+ IRCOMM_TTY_QUERY_LSAP_SEL,
+ IRCOMM_TTY_SETUP,
+ IRCOMM_TTY_READY,
+} IRCOMM_TTY_STATE;
+
+/* IrCOMM TTY Events */
+typedef enum {
+ IRCOMM_TTY_ATTACH_CABLE,
+ IRCOMM_TTY_DETACH_CABLE,
+ IRCOMM_TTY_DATA_REQUEST,
+ IRCOMM_TTY_DATA_INDICATION,
+ IRCOMM_TTY_DISCOVERY_REQUEST,
+ IRCOMM_TTY_DISCOVERY_INDICATION,
+ IRCOMM_TTY_CONNECT_CONFIRM,
+ IRCOMM_TTY_CONNECT_INDICATION,
+ IRCOMM_TTY_DISCONNECT_REQUEST,
+ IRCOMM_TTY_DISCONNECT_INDICATION,
+ IRCOMM_TTY_WD_TIMER_EXPIRED,
+ IRCOMM_TTY_GOT_PARAMETERS,
+ IRCOMM_TTY_GOT_LSAPSEL,
+} IRCOMM_TTY_EVENT;
+
+/* Used for passing information through the state-machine */
+struct ircomm_tty_info {
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ __u8 dlsap_sel;
+};
+
+extern char *ircomm_state[];
+extern char *ircomm_tty_state[];
+
+int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb, struct ircomm_tty_info *info);
+
+
+int ircomm_tty_attach_cable(struct ircomm_tty_cb *self);
+void ircomm_tty_detach_cable(struct ircomm_tty_cb *self);
+void ircomm_tty_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+void ircomm_tty_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb);
+void ircomm_tty_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self);
+void ircomm_tty_link_established(struct ircomm_tty_cb *self);
+
+#endif /* IRCOMM_TTY_ATTACH_H */
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
new file mode 100644
index 000000000000..05a840837fe7
--- /dev/null
+++ b/include/net/irda/irda.h
@@ -0,0 +1,117 @@
+/*********************************************************************
+ *
+ * Filename: irda.h
+ * Version: 1.0
+ * Description: IrDA common include file for kernel internal use
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:13:12 1997
+ * Modified at: Fri Jan 28 13:16:32 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef NET_IRDA_H
+#define NET_IRDA_H
+
+#include <linux/config.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/kernel.h>
+#include <linux/if.h> /* sa_family_t in <linux/irda.h> */
+#include <linux/irda.h>
+
+typedef __u32 magic_t;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* Hack to do small backoff when setting media busy in IrLAP */
+#ifndef SMALL
+#define SMALL 5
+#endif
+
+#ifndef IRDA_MIN /* Lets not mix this MIN with other header files */
+#define IRDA_MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef IRDA_ALIGN
+# define IRDA_ALIGN __attribute__((aligned))
+#endif
+#ifndef IRDA_PACK
+# define IRDA_PACK __attribute__((packed))
+#endif
+
+
+#ifdef CONFIG_IRDA_DEBUG
+
+extern unsigned int irda_debug;
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#define IRDA_DEBUG_LEVEL 0
+
+#define IRDA_DEBUG(n, args...) \
+do { if (irda_debug >= (n)) \
+ printk(KERN_DEBUG args); \
+} while (0)
+#define IRDA_ASSERT(expr, func) \
+do { if(!(expr)) { \
+ printk( "Assertion failed! %s:%s:%d %s\n", \
+ __FILE__,__FUNCTION__,__LINE__,(#expr) ); \
+ func } } while (0)
+#define IRDA_ASSERT_LABEL(label) label
+#else
+#define IRDA_DEBUG(n, args...) do { } while (0)
+#define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0)
+#define IRDA_ASSERT_LABEL(label)
+#endif /* CONFIG_IRDA_DEBUG */
+
+#define IRDA_WARNING(args...) printk(KERN_WARNING args)
+#define IRDA_MESSAGE(args...) printk(KERN_INFO args)
+#define IRDA_ERROR(args...) printk(KERN_ERR args)
+
+/*
+ * Magic numbers used by Linux-IrDA. Random numbers which must be unique to
+ * give the best protection
+ */
+
+#define IRTTY_MAGIC 0x2357
+#define LAP_MAGIC 0x1357
+#define LMP_MAGIC 0x4321
+#define LMP_LSAP_MAGIC 0x69333
+#define LMP_LAP_MAGIC 0x3432
+#define IRDA_DEVICE_MAGIC 0x63454
+#define IAS_MAGIC 0x007
+#define TTP_MAGIC 0x241169
+#define TTP_TSAP_MAGIC 0x4345
+#define IROBEX_MAGIC 0x341324
+#define HB_MAGIC 0x64534
+#define IRLAN_MAGIC 0x754
+#define IAS_OBJECT_MAGIC 0x34234
+#define IAS_ATTRIB_MAGIC 0x45232
+#define IRDA_TASK_MAGIC 0x38423
+
+#define IAS_DEVICE_ID 0x0000 /* Defined by IrDA, IrLMP section 4.1 (page 68) */
+#define IAS_PNP_ID 0xd342
+#define IAS_OBEX_ID 0x34323
+#define IAS_IRLAN_ID 0x34234
+#define IAS_IRCOMM_ID 0x2343
+#define IAS_IRLPT_ID 0x9876
+
+#endif /* NET_IRDA_H */
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
new file mode 100644
index 000000000000..3b216f186f18
--- /dev/null
+++ b/include/net/irda/irda_device.h
@@ -0,0 +1,301 @@
+/*********************************************************************
+ *
+ * Filename: irda_device.h
+ * Version: 0.9
+ * Description: Contains various declarations used by the drivers
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Apr 14 12:41:42 1998
+ * Modified at: Mon Mar 20 09:08:57 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+/*
+ * This header contains all the IrDA definitions a driver really
+ * needs, and therefore the driver should not need to include
+ * any other IrDA headers - Jean II
+ */
+
+#ifndef IRDA_DEVICE_H
+#define IRDA_DEVICE_H
+
+#include <linux/config.h>
+#include <linux/tty.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/irda.h>
+#include <linux/types.h>
+
+#include <net/pkt_sched.h>
+#include <net/irda/irda.h>
+#include <net/irda/qos.h> /* struct qos_info */
+#include <net/irda/irqueue.h> /* irda_queue_t */
+
+/* A few forward declarations (to make compiler happy) */
+struct irlap_cb;
+
+/* Some non-standard interface flags (should not conflict with any in if.h) */
+#define IFF_SIR 0x0001 /* Supports SIR speeds */
+#define IFF_MIR 0x0002 /* Supports MIR speeds */
+#define IFF_FIR 0x0004 /* Supports FIR speeds */
+#define IFF_VFIR 0x0008 /* Supports VFIR speeds */
+#define IFF_PIO 0x0010 /* Supports PIO transfer of data */
+#define IFF_DMA 0x0020 /* Supports DMA transfer of data */
+#define IFF_SHM 0x0040 /* Supports shared memory data transfers */
+#define IFF_DONGLE 0x0080 /* Interface has a dongle attached */
+#define IFF_AIR 0x0100 /* Supports Advanced IR (AIR) standards */
+
+#define IO_XMIT 0x01
+#define IO_RECV 0x02
+
+typedef enum {
+ IRDA_IRLAP, /* IrDA mode, and deliver to IrLAP */
+ IRDA_RAW, /* IrDA mode */
+ SHARP_ASK,
+ TV_REMOTE, /* Also known as Consumer Electronics IR */
+} INFRARED_MODE;
+
+typedef enum {
+ IRDA_TASK_INIT, /* All tasks are initialized with this state */
+ IRDA_TASK_DONE, /* Signals that the task is finished */
+ IRDA_TASK_WAIT,
+ IRDA_TASK_WAIT1,
+ IRDA_TASK_WAIT2,
+ IRDA_TASK_WAIT3,
+ IRDA_TASK_CHILD_INIT, /* Initializing child task */
+ IRDA_TASK_CHILD_WAIT, /* Waiting for child task to finish */
+ IRDA_TASK_CHILD_DONE /* Child task is finished */
+} IRDA_TASK_STATE;
+
+struct irda_task;
+typedef int (*IRDA_TASK_CALLBACK) (struct irda_task *task);
+
+struct irda_task {
+ irda_queue_t q;
+ magic_t magic;
+
+ IRDA_TASK_STATE state;
+ IRDA_TASK_CALLBACK function;
+ IRDA_TASK_CALLBACK finished;
+
+ struct irda_task *parent;
+ struct timer_list timer;
+
+ void *instance; /* Instance being called */
+ void *param; /* Parameter to be used by instance */
+};
+
+/* Dongle info */
+struct dongle_reg;
+typedef struct {
+ struct dongle_reg *issue; /* Registration info */
+ struct net_device *dev; /* Device we are attached to */
+ struct irda_task *speed_task; /* Task handling speed change */
+ struct irda_task *reset_task; /* Task handling reset */
+ __u32 speed; /* Current speed */
+
+ /* Callbacks to the IrDA device driver */
+ int (*set_mode)(struct net_device *, int mode);
+ int (*read)(struct net_device *dev, __u8 *buf, int len);
+ int (*write)(struct net_device *dev, __u8 *buf, int len);
+ int (*set_dtr_rts)(struct net_device *dev, int dtr, int rts);
+} dongle_t;
+
+/* Dongle registration info */
+struct dongle_reg {
+ irda_queue_t q; /* Must be first */
+ IRDA_DONGLE type;
+
+ void (*open)(dongle_t *dongle, struct qos_info *qos);
+ void (*close)(dongle_t *dongle);
+ int (*reset)(struct irda_task *task);
+ int (*change_speed)(struct irda_task *task);
+ struct module *owner;
+};
+
+/*
+ * Per-packet information we need to hide inside sk_buff
+ * (must not exceed 48 bytes, check with struct sk_buff)
+ */
+struct irda_skb_cb {
+ magic_t magic; /* Be sure that we can trust the information */
+ __u32 next_speed; /* The Speed to be set *after* this frame */
+ __u16 mtt; /* Minimum turn around time */
+ __u16 xbofs; /* Number of xbofs required, used by SIR mode */
+ __u16 next_xbofs; /* Number of xbofs required *after* this frame */
+ void *context; /* May be used by drivers */
+ void (*destructor)(struct sk_buff *skb); /* Used for flow control */
+ __u16 xbofs_delay; /* Number of xbofs used for generating the mtt */
+ __u8 line; /* Used by IrCOMM in IrLPT mode */
+};
+
+/* Chip specific info */
+typedef struct {
+ int cfg_base; /* Config register IO base */
+ int sir_base; /* SIR IO base */
+ int fir_base; /* FIR IO base */
+ int mem_base; /* Shared memory base */
+ int sir_ext; /* Length of SIR iobase */
+ int fir_ext; /* Length of FIR iobase */
+ int irq, irq2; /* Interrupts used */
+ int dma, dma2; /* DMA channel(s) used */
+ int fifo_size; /* FIFO size */
+ int irqflags; /* interrupt flags (ie, SA_SHIRQ|SA_INTERRUPT) */
+ int direction; /* Link direction, used by some FIR drivers */
+ int enabled; /* Powered on? */
+ int suspended; /* Suspended by APM */
+ __u32 speed; /* Currently used speed */
+ __u32 new_speed; /* Speed we must change to when Tx is finished */
+ int dongle_id; /* Dongle or transceiver currently used */
+} chipio_t;
+
+/* IO buffer specific info (inspired by struct sk_buff) */
+typedef struct {
+ int state; /* Receiving state (transmit state not used) */
+ int in_frame; /* True if receiving frame */
+
+ __u8 *head; /* start of buffer */
+ __u8 *data; /* start of data in buffer */
+
+ int len; /* current length of data */
+ int truesize; /* total allocated size of buffer */
+ __u16 fcs;
+
+ struct sk_buff *skb; /* ZeroCopy Rx in async_unwrap_char() */
+} iobuff_t;
+
+/* Maximum SIR frame (skb) that we expect to receive *unwrapped*.
+ * Max LAP MTU (I field) is 2048 bytes max (IrLAP 1.1, chapt 6.6.5, p40).
+ * Max LAP header is 2 bytes (for now).
+ * Max CRC is 2 bytes at SIR, 4 bytes at FIR.
+ * Need 1 byte for skb_reserve() to align IP header for IrLAN.
+ * Add a few extra bytes just to be safe (buffer is power of two anyway)
+ * Jean II */
+#define IRDA_SKB_MAX_MTU 2064
+/* Maximum SIR frame that we expect to send, wrapped (i.e. with XBOFS
+ * and escaped characters on top of above). */
+#define IRDA_SIR_MAX_FRAME 4269
+
+/* The SIR unwrapper async_unwrap_char() will use a Rx-copy-break mechanism
+ * when using the optional ZeroCopy Rx, where only small frames are memcpy
+ * to a smaller skb to save memory. This is the threshold under which copy
+ * will happen (and over which it won't happen).
+ * Some FIR drivers may use this #define as well...
+ * This is the same value as various Ethernet drivers. - Jean II */
+#define IRDA_RX_COPY_THRESHOLD 256
+
+/* Function prototypes */
+int irda_device_init(void);
+void irda_device_cleanup(void);
+
+/* IrLAP entry points used by the drivers.
+ * We declare them here to avoid the driver pulling a whole bunch stack
+ * headers they don't really need - Jean II */
+struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
+ const char *hw_name);
+void irlap_close(struct irlap_cb *self);
+
+/* Interface to be uses by IrLAP */
+void irda_device_set_media_busy(struct net_device *dev, int status);
+int irda_device_is_media_busy(struct net_device *dev);
+int irda_device_is_receiving(struct net_device *dev);
+
+/* Interface for internal use */
+static inline int irda_device_txqueue_empty(const struct net_device *dev)
+{
+ return (skb_queue_len(&dev->qdisc->q) == 0);
+}
+int irda_device_set_raw_mode(struct net_device* self, int status);
+struct net_device *alloc_irdadev(int sizeof_priv);
+
+/* Dongle interface */
+void irda_device_unregister_dongle(struct dongle_reg *dongle);
+int irda_device_register_dongle(struct dongle_reg *dongle);
+dongle_t *irda_device_dongle_init(struct net_device *dev, int type);
+int irda_device_dongle_cleanup(dongle_t *dongle);
+
+#ifdef CONFIG_ISA
+void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode);
+#endif
+
+void irda_task_delete(struct irda_task *task);
+struct irda_task *irda_task_execute(void *instance,
+ IRDA_TASK_CALLBACK function,
+ IRDA_TASK_CALLBACK finished,
+ struct irda_task *parent, void *param);
+void irda_task_next_state(struct irda_task *task, IRDA_TASK_STATE state);
+
+/*
+ * Function irda_get_mtt (skb)
+ *
+ * Utility function for getting the minimum turnaround time out of
+ * the skb, where it has been hidden in the cb field.
+ */
+static inline __u16 irda_get_mtt(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->mtt : 10000;
+}
+
+/*
+ * Function irda_get_next_speed (skb)
+ *
+ * Extract the speed that should be set *after* this frame from the skb
+ *
+ * Note : return -1 for user space frames
+ */
+static inline __u32 irda_get_next_speed(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->next_speed : -1;
+}
+
+/*
+ * Function irda_get_next_xbofs (skb)
+ *
+ * Extract the xbofs that should be set for this frame from the skb
+ *
+ * Note : default to 10 for user space frames
+ */
+static inline __u16 irda_get_xbofs(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->xbofs : 10;
+}
+
+/*
+ * Function irda_get_next_xbofs (skb)
+ *
+ * Extract the xbofs that should be set *after* this frame from the skb
+ *
+ * Note : return -1 for user space frames
+ */
+static inline __u16 irda_get_next_xbofs(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->next_xbofs : -1;
+}
+#endif /* IRDA_DEVICE_H */
+
+
diff --git a/include/net/irda/iriap.h b/include/net/irda/iriap.h
new file mode 100644
index 000000000000..2007c5a0a43f
--- /dev/null
+++ b/include/net/irda/iriap.h
@@ -0,0 +1,108 @@
+/*********************************************************************
+ *
+ * Filename: iriap.h
+ * Version: 0.5
+ * Description: Information Access Protocol (IAP)
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Aug 21 00:02:07 1997
+ * Modified at: Sat Dec 25 16:42:09 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRIAP_H
+#define IRIAP_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <net/irda/iriap_event.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/irqueue.h> /* irda_queue_t */
+#include <net/irda/timer.h> /* struct timer_list */
+
+#define IAP_LST 0x80
+#define IAP_ACK 0x40
+
+#define IAS_SERVER 0
+#define IAS_CLIENT 1
+
+/* IrIAP Op-codes */
+#define GET_INFO_BASE 0x01
+#define GET_OBJECTS 0x02
+#define GET_VALUE 0x03
+#define GET_VALUE_BY_CLASS 0x04
+#define GET_OBJECT_INFO 0x05
+#define GET_ATTRIB_NAMES 0x06
+
+#define IAS_SUCCESS 0
+#define IAS_CLASS_UNKNOWN 1
+#define IAS_ATTRIB_UNKNOWN 2
+#define IAS_DISCONNECT 10
+
+typedef void (*CONFIRM_CALLBACK)(int result, __u16 obj_id,
+ struct ias_value *value, void *priv);
+
+struct iriap_cb {
+ irda_queue_t q; /* Must be first */
+ magic_t magic; /* Magic cookie */
+
+ int mode; /* Client or server */
+
+ __u32 saddr;
+ __u32 daddr;
+ __u8 operation;
+
+ struct sk_buff *request_skb;
+ struct lsap_cb *lsap;
+ __u8 slsap_sel;
+
+ /* Client states */
+ IRIAP_STATE client_state;
+ IRIAP_STATE call_state;
+
+ /* Server states */
+ IRIAP_STATE server_state;
+ IRIAP_STATE r_connect_state;
+
+ CONFIRM_CALLBACK confirm;
+ void *priv; /* Used to identify client */
+
+ __u8 max_header_size;
+ __u32 max_data_size;
+
+ struct timer_list watchdog_timer;
+};
+
+int iriap_init(void);
+void iriap_cleanup(void);
+
+struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
+ CONFIRM_CALLBACK callback);
+void iriap_close(struct iriap_cb *self);
+
+int iriap_getvaluebyclass_request(struct iriap_cb *self,
+ __u32 saddr, __u32 daddr,
+ char *name, char *attr);
+void iriap_connect_request(struct iriap_cb *self);
+void iriap_send_ack( struct iriap_cb *self);
+void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb);
+
+void iriap_register_server(void);
+
+#endif
+
+
diff --git a/include/net/irda/iriap_event.h b/include/net/irda/iriap_event.h
new file mode 100644
index 000000000000..4ca3d2071b03
--- /dev/null
+++ b/include/net/irda/iriap_event.h
@@ -0,0 +1,85 @@
+/*********************************************************************
+ *
+ * Filename: iriap_event.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Sun Oct 31 22:02:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRIAP_FSM_H
+#define IRIAP_FSM_H
+
+/* Forward because of circular include dependecies */
+struct iriap_cb;
+
+/* IrIAP states */
+typedef enum {
+ /* Client */
+ S_DISCONNECT,
+ S_CONNECTING,
+ S_CALL,
+
+ /* S-Call */
+ S_MAKE_CALL,
+ S_CALLING,
+ S_OUTSTANDING,
+ S_REPLYING,
+ S_WAIT_FOR_CALL,
+ S_WAIT_ACTIVE,
+
+ /* Server */
+ R_DISCONNECT,
+ R_CALL,
+
+ /* R-Connect */
+ R_WAITING,
+ R_WAIT_ACTIVE,
+ R_RECEIVING,
+ R_EXECUTE,
+ R_RETURNING,
+} IRIAP_STATE;
+
+typedef enum {
+ IAP_CALL_REQUEST,
+ IAP_CALL_REQUEST_GVBC,
+ IAP_CALL_RESPONSE,
+ IAP_RECV_F_LST,
+ IAP_LM_DISCONNECT_INDICATION,
+ IAP_LM_CONNECT_INDICATION,
+ IAP_LM_CONNECT_CONFIRM,
+} IRIAP_EVENT;
+
+void iriap_next_client_state (struct iriap_cb *self, IRIAP_STATE state);
+void iriap_next_call_state (struct iriap_cb *self, IRIAP_STATE state);
+void iriap_next_server_state (struct iriap_cb *self, IRIAP_STATE state);
+void iriap_next_r_connect_state(struct iriap_cb *self, IRIAP_STATE state);
+
+
+void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+void iriap_do_call_event (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+void iriap_do_server_event (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+#endif /* IRIAP_FSM_H */
+
diff --git a/include/net/irda/irias_object.h b/include/net/irda/irias_object.h
new file mode 100644
index 000000000000..c41196b87955
--- /dev/null
+++ b/include/net/irda/irias_object.h
@@ -0,0 +1,108 @@
+/*********************************************************************
+ *
+ * Filename: irias_object.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Oct 1 22:49:50 1998
+ * Modified at: Wed Dec 15 11:20:57 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef LM_IAS_OBJECT_H
+#define LM_IAS_OBJECT_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h>
+
+/* LM-IAS Attribute types */
+#define IAS_MISSING 0
+#define IAS_INTEGER 1
+#define IAS_OCT_SEQ 2
+#define IAS_STRING 3
+
+/* Object ownership of attributes (user or kernel) */
+#define IAS_KERNEL_ATTR 0
+#define IAS_USER_ATTR 1
+
+/*
+ * LM-IAS Object
+ */
+struct ias_object {
+ irda_queue_t q; /* Must be first! */
+ magic_t magic;
+
+ char *name;
+ int id;
+ hashbin_t *attribs;
+};
+
+/*
+ * Values used by LM-IAS attributes
+ */
+struct ias_value {
+ __u8 type; /* Value description */
+ __u8 owner; /* Managed from user/kernel space */
+ int charset; /* Only used by string type */
+ int len;
+
+ /* Value */
+ union {
+ int integer;
+ char *string;
+ __u8 *oct_seq;
+ } t;
+};
+
+/*
+ * Attributes used by LM-IAS objects
+ */
+struct ias_attrib {
+ irda_queue_t q; /* Must be first! */
+ int magic;
+
+ char *name; /* Attribute name */
+ struct ias_value *value; /* Attribute value */
+};
+
+struct ias_object *irias_new_object(char *name, int id);
+void irias_insert_object(struct ias_object *obj);
+int irias_delete_object(struct ias_object *obj);
+int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib,
+ int cleanobject);
+void __irias_delete_object(struct ias_object *obj);
+
+void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
+ int user);
+void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
+ int user);
+void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
+ int len, int user);
+int irias_object_change_attribute(char *obj_name, char *attrib_name,
+ struct ias_value *new_value);
+struct ias_object *irias_find_object(char *name);
+struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name);
+
+struct ias_value *irias_new_string_value(char *string);
+struct ias_value *irias_new_integer_value(int integer);
+struct ias_value *irias_new_octseq_value(__u8 *octseq , int len);
+struct ias_value *irias_new_missing_value(void);
+void irias_delete_value(struct ias_value *value);
+
+extern struct ias_value irias_missing;
+extern hashbin_t *irias_objects;
+
+#endif
diff --git a/include/net/irda/irlan_client.h b/include/net/irda/irlan_client.h
new file mode 100644
index 000000000000..736dabe211e3
--- /dev/null
+++ b/include/net/irda/irlan_client.h
@@ -0,0 +1,42 @@
+/*********************************************************************
+ *
+ * Filename: irlan_client.h
+ * Version: 0.3
+ * Description: IrDA LAN access layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Thu Apr 22 14:13:34 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_CLIENT_H
+#define IRLAN_CLIENT_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irias_object.h>
+#include <net/irda/irlan_event.h>
+
+void irlan_client_discovery_indication(discinfo_t *, DISCOVERY_MODE, void *);
+void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr);
+
+void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb);
+void irlan_client_get_value_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv);
+#endif
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
new file mode 100644
index 000000000000..1c73bdbc3eb3
--- /dev/null
+++ b/include/net/irda/irlan_common.h
@@ -0,0 +1,222 @@
+/*********************************************************************
+ *
+ * Filename: irlan_common.h
+ * Version: 0.8
+ * Description: IrDA LAN access layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sun Oct 31 19:41:24 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_H
+#define IRLAN_H
+
+#include <asm/param.h> /* for HZ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irttp.h>
+
+#define IRLAN_MTU 1518
+#define IRLAN_TIMEOUT 10*HZ /* 10 seconds */
+
+/* Command packet types */
+#define CMD_GET_PROVIDER_INFO 0
+#define CMD_GET_MEDIA_CHAR 1
+#define CMD_OPEN_DATA_CHANNEL 2
+#define CMD_CLOSE_DATA_CHAN 3
+#define CMD_RECONNECT_DATA_CHAN 4
+#define CMD_FILTER_OPERATION 5
+
+/* Some responses */
+#define RSP_SUCCESS 0
+#define RSP_INSUFFICIENT_RESOURCES 1
+#define RSP_INVALID_COMMAND_FORMAT 2
+#define RSP_COMMAND_NOT_SUPPORTED 3
+#define RSP_PARAM_NOT_SUPPORTED 4
+#define RSP_VALUE_NOT_SUPPORTED 5
+#define RSP_NOT_OPEN 6
+#define RSP_AUTHENTICATION_REQUIRED 7
+#define RSP_INVALID_PASSWORD 8
+#define RSP_PROTOCOL_ERROR 9
+#define RSP_ASYNCHRONOUS_ERROR 255
+
+/* Media types */
+#define MEDIA_802_3 1
+#define MEDIA_802_5 2
+
+/* Filter parameters */
+#define DATA_CHAN 1
+#define FILTER_TYPE 2
+#define FILTER_MODE 3
+
+/* Filter types */
+#define IRLAN_DIRECTED 0x01
+#define IRLAN_FUNCTIONAL 0x02
+#define IRLAN_GROUP 0x04
+#define IRLAN_MAC_FRAME 0x08
+#define IRLAN_MULTICAST 0x10
+#define IRLAN_BROADCAST 0x20
+#define IRLAN_IPX_SOCKET 0x40
+
+/* Filter modes */
+#define ALL 1
+#define FILTER 2
+#define NONE 3
+
+/* Filter operations */
+#define GET 1
+#define CLEAR 2
+#define ADD 3
+#define REMOVE 4
+#define DYNAMIC 5
+
+/* Access types */
+#define ACCESS_DIRECT 1
+#define ACCESS_PEER 2
+#define ACCESS_HOSTED 3
+
+#define IRLAN_BYTE 0
+#define IRLAN_SHORT 1
+#define IRLAN_ARRAY 2
+
+#define IRLAN_MAX_HEADER (TTP_HEADER+LMP_HEADER+LAP_MAX_HEADER)
+
+/*
+ * IrLAN client
+ */
+struct irlan_client_cb {
+ int state;
+
+ int open_retries;
+
+ struct tsap_cb *tsap_ctrl;
+ __u32 max_sdu_size;
+ __u8 max_header_size;
+
+ int access_type; /* Access type of provider */
+ __u8 reconnect_key[255];
+ __u8 key_len;
+
+ __u16 recv_arb_val;
+ __u16 max_frame;
+ int filter_type;
+
+ int unicast_open;
+ int broadcast_open;
+
+ int tx_busy;
+ struct sk_buff_head txq; /* Transmit control queue */
+
+ struct iriap_cb *iriap;
+
+ struct timer_list kick_timer;
+};
+
+/*
+ * IrLAN provider
+ */
+struct irlan_provider_cb {
+ int state;
+
+ struct tsap_cb *tsap_ctrl;
+ __u32 max_sdu_size;
+ __u8 max_header_size;
+
+ /*
+ * Store some values here which are used by the provider to parse
+ * the filter operations
+ */
+ int data_chan;
+ int filter_type;
+ int filter_mode;
+ int filter_operation;
+ int filter_entry;
+ int access_type; /* Access type */
+ __u16 send_arb_val;
+
+ __u8 mac_address[6]; /* Generated MAC address for peer device */
+};
+
+/*
+ * IrLAN control block
+ */
+struct irlan_cb {
+ int magic;
+ struct list_head dev_list;
+ struct net_device *dev; /* Ethernet device structure*/
+ struct net_device_stats stats;
+
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ int disconnect_reason; /* Why we got disconnected */
+
+ int media; /* Media type */
+ __u8 version[2]; /* IrLAN version */
+
+ struct tsap_cb *tsap_data; /* Data TSAP */
+
+ int use_udata; /* Use Unit Data transfers */
+
+ __u8 stsap_sel_data; /* Source data TSAP selector */
+ __u8 dtsap_sel_data; /* Destination data TSAP selector */
+ __u8 dtsap_sel_ctrl; /* Destination ctrl TSAP selector */
+
+ struct irlan_client_cb client; /* Client specific fields */
+ struct irlan_provider_cb provider; /* Provider specific fields */
+
+ __u32 max_sdu_size;
+ __u8 max_header_size;
+
+ wait_queue_head_t open_wait;
+ struct timer_list watchdog_timer;
+};
+
+void irlan_close(struct irlan_cb *self);
+void irlan_close_tsaps(struct irlan_cb *self);
+
+int irlan_register_netdev(struct irlan_cb *self);
+void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel);
+void irlan_start_watchdog_timer(struct irlan_cb *self, int timeout);
+
+void irlan_open_data_tsap(struct irlan_cb *self);
+
+int irlan_run_ctrl_tx_queue(struct irlan_cb *self);
+
+struct irlan_cb *irlan_get_any(void);
+void irlan_get_provider_info(struct irlan_cb *self);
+void irlan_get_media_char(struct irlan_cb *self);
+void irlan_open_data_channel(struct irlan_cb *self);
+void irlan_close_data_channel(struct irlan_cb *self);
+void irlan_set_multicast_filter(struct irlan_cb *self, int status);
+void irlan_set_broadcast_filter(struct irlan_cb *self, int status);
+
+int irlan_insert_byte_param(struct sk_buff *skb, char *param, __u8 value);
+int irlan_insert_short_param(struct sk_buff *skb, char *param, __u16 value);
+int irlan_insert_string_param(struct sk_buff *skb, char *param, char *value);
+int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *value,
+ __u16 value_len);
+
+int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len);
+
+#endif
+
+
diff --git a/include/net/irda/irlan_eth.h b/include/net/irda/irlan_eth.h
new file mode 100644
index 000000000000..9a9b3619d305
--- /dev/null
+++ b/include/net/irda/irlan_eth.h
@@ -0,0 +1,33 @@
+/*********************************************************************
+ *
+ * Filename: irlan_eth.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Oct 15 08:36:58 1998
+ * Modified at: Fri May 14 23:29:00 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_ETH_H
+#define IRLAN_ETH_H
+
+struct net_device *alloc_irlandev(const char *name);
+int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb);
+
+void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow);
+void irlan_eth_send_gratuitous_arp(struct net_device *dev);
+#endif
diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h
new file mode 100644
index 000000000000..b9baac9eb8b6
--- /dev/null
+++ b/include/net/irda/irlan_event.h
@@ -0,0 +1,81 @@
+/*********************************************************************
+ *
+ * Filename: irlan_event.h
+ * Version:
+ * Description: LAN access
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Tue Feb 2 09:45:17 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_EVENT_H
+#define IRLAN_EVENT_H
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+#include <net/irda/irlan_common.h>
+
+typedef enum {
+ IRLAN_IDLE,
+ IRLAN_QUERY,
+ IRLAN_CONN,
+ IRLAN_INFO,
+ IRLAN_MEDIA,
+ IRLAN_OPEN,
+ IRLAN_WAIT,
+ IRLAN_ARB,
+ IRLAN_DATA,
+ IRLAN_CLOSE,
+ IRLAN_SYNC
+} IRLAN_STATE;
+
+typedef enum {
+ IRLAN_DISCOVERY_INDICATION,
+ IRLAN_IAS_PROVIDER_AVAIL,
+ IRLAN_IAS_PROVIDER_NOT_AVAIL,
+ IRLAN_LAP_DISCONNECT,
+ IRLAN_LMP_DISCONNECT,
+ IRLAN_CONNECT_COMPLETE,
+ IRLAN_DATA_INDICATION,
+ IRLAN_DATA_CONNECT_INDICATION,
+ IRLAN_RETRY_CONNECT,
+
+ IRLAN_CONNECT_INDICATION,
+ IRLAN_GET_INFO_CMD,
+ IRLAN_GET_MEDIA_CMD,
+ IRLAN_OPEN_DATA_CMD,
+ IRLAN_FILTER_CONFIG_CMD,
+
+ IRLAN_CHECK_CON_ARB,
+ IRLAN_PROVIDER_SIGNAL,
+
+ IRLAN_WATCHDOG_TIMEOUT,
+} IRLAN_EVENT;
+
+extern char *irlan_state[];
+
+void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state);
+void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state);
+
+#endif
diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h
new file mode 100644
index 000000000000..3afeb6c94ea4
--- /dev/null
+++ b/include/net/irda/irlan_filter.h
@@ -0,0 +1,33 @@
+/*********************************************************************
+ *
+ * Filename: irlan_filter.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Jan 29 15:24:08 1999
+ * Modified at: Sun Feb 7 23:35:31 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_FILTER_H
+#define IRLAN_FILTER_H
+
+void irlan_check_command_param(struct irlan_cb *self, char *param,
+ char *value);
+void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
+int irlan_print_filter(struct seq_file *seq, int filter_type);
+
+#endif /* IRLAN_FILTER_H */
diff --git a/include/net/irda/irlan_provider.h b/include/net/irda/irlan_provider.h
new file mode 100644
index 000000000000..ca51d5b7c999
--- /dev/null
+++ b/include/net/irda/irlan_provider.h
@@ -0,0 +1,52 @@
+/*********************************************************************
+ *
+ * Filename: irlan_provider.h
+ * Version: 0.1
+ * Description: IrDA LAN access layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sun May 9 12:26:11 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_SERVER_H
+#define IRLAN_SERVER_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irlan_common.h>
+
+void irlan_provider_ctrl_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb);
+
+
+void irlan_provider_connect_response(struct irlan_cb *, struct tsap_cb *);
+
+int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb);
+int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
+ struct sk_buff *skb);
+
+void irlan_provider_send_reply(struct irlan_cb *self, int command,
+ int ret_code);
+int irlan_provider_open_ctrl_tsap(struct irlan_cb *self);
+
+#endif
+
+
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
new file mode 100644
index 000000000000..f55e86e75030
--- /dev/null
+++ b/include/net/irda/irlap.h
@@ -0,0 +1,290 @@
+/*********************************************************************
+ *
+ * Filename: irlap.h
+ * Version: 0.8
+ * Description: An IrDA LAP driver for Linux
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Fri Dec 10 13:21:17 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAP_H
+#define IRLAP_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+
+#include <net/irda/irqueue.h> /* irda_queue_t */
+#include <net/irda/qos.h> /* struct qos_info */
+#include <net/irda/discovery.h> /* discovery_t */
+#include <net/irda/irlap_event.h> /* IRLAP_STATE, ... */
+#include <net/irda/irmod.h> /* struct notify_t */
+
+#define CONFIG_IRDA_DYNAMIC_WINDOW 1
+
+#define LAP_RELIABLE 1
+#define LAP_UNRELIABLE 0
+
+#define LAP_ADDR_HEADER 1 /* IrLAP Address Header */
+#define LAP_CTRL_HEADER 1 /* IrLAP Control Header */
+
+/* May be different when we get VFIR */
+#define LAP_MAX_HEADER (LAP_ADDR_HEADER + LAP_CTRL_HEADER)
+
+#define BROADCAST 0xffffffff /* Broadcast device address */
+#define CBROADCAST 0xfe /* Connection broadcast address */
+#define XID_FORMAT 0x01 /* Discovery XID format */
+
+/* Nobody seems to use this constant. */
+#define LAP_WINDOW_SIZE 8
+/* We keep the LAP queue very small to minimise the amount of buffering.
+ * this improve latency and reduce resource consumption.
+ * This work only because we have synchronous refilling of IrLAP through
+ * the flow control mechanism (via scheduler and IrTTP).
+ * 2 buffers is the minimum we can work with, one that we send while polling
+ * IrTTP, and another to know that we should not send the pf bit.
+ * Jean II */
+#define LAP_HIGH_THRESHOLD 2
+/* Some rare non TTP clients don't implement flow control, and
+ * so don't comply with the above limit (and neither with this one).
+ * For IAP and management, it doesn't matter, because they never transmit much.
+ *.For IrLPT, this should be fixed.
+ * - Jean II */
+#define LAP_MAX_QUEUE 10
+/* Please note that all IrDA management frames (LMP/TTP conn req/disc and
+ * IAS queries) fall in the second category and are sent to LAP even if TTP
+ * is stopped. This means that those frames will wait only a maximum of
+ * two (2) data frames before beeing sent on the "wire", which speed up
+ * new socket setup when the link is saturated.
+ * Same story for two sockets competing for the medium : if one saturates
+ * the LAP, when the other want to transmit it only has to wait for
+ * maximum three (3) packets (2 + one scheduling), which improve performance
+ * of delay sensitive applications.
+ * Jean II */
+
+#define NR_EXPECTED 1
+#define NR_UNEXPECTED 0
+#define NR_INVALID -1
+
+#define NS_EXPECTED 1
+#define NS_UNEXPECTED 0
+#define NS_INVALID -1
+
+/*
+ * Meta information passed within the IrLAP state machine
+ */
+struct irlap_info {
+ __u8 caddr; /* Connection address */
+ __u8 control; /* Frame type */
+ __u8 cmd;
+
+ __u32 saddr;
+ __u32 daddr;
+
+ int pf; /* Poll/final bit set */
+
+ __u8 nr; /* Sequence number of next frame expected */
+ __u8 ns; /* Sequence number of frame sent */
+
+ int S; /* Number of slots */
+ int slot; /* Random chosen slot */
+ int s; /* Current slot */
+
+ discovery_t *discovery; /* Discovery information */
+};
+
+/* Main structure of IrLAP */
+struct irlap_cb {
+ irda_queue_t q; /* Must be first */
+ magic_t magic;
+
+ /* Device we are attached to */
+ struct net_device *netdev;
+ char hw_name[2*IFNAMSIZ + 1];
+
+ /* Connection state */
+ volatile IRLAP_STATE state; /* Current state */
+
+ /* Timers used by IrLAP */
+ struct timer_list query_timer;
+ struct timer_list slot_timer;
+ struct timer_list discovery_timer;
+ struct timer_list final_timer;
+ struct timer_list poll_timer;
+ struct timer_list wd_timer;
+ struct timer_list backoff_timer;
+
+ /* Media busy stuff */
+ struct timer_list media_busy_timer;
+ int media_busy;
+
+ /* Timeouts which will be different with different turn time */
+ int slot_timeout;
+ int poll_timeout;
+ int final_timeout;
+ int wd_timeout;
+
+ struct sk_buff_head txq; /* Frames to be transmitted */
+ struct sk_buff_head txq_ultra;
+
+ __u8 caddr; /* Connection address */
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+
+ int retry_count; /* Times tried to establish connection */
+ int add_wait; /* True if we are waiting for frame */
+
+ __u8 connect_pending;
+ __u8 disconnect_pending;
+
+ /* To send a faster RR if tx queue empty */
+#ifdef CONFIG_IRDA_FAST_RR
+ int fast_RR_timeout;
+ int fast_RR;
+#endif /* CONFIG_IRDA_FAST_RR */
+
+ int N1; /* N1 * F-timer = Negitiated link disconnect warning threshold */
+ int N2; /* N2 * F-timer = Negitiated link disconnect time */
+ int N3; /* Connection retry count */
+
+ int local_busy;
+ int remote_busy;
+ int xmitflag;
+
+ __u8 vs; /* Next frame to be sent */
+ __u8 vr; /* Next frame to be received */
+ __u8 va; /* Last frame acked */
+ int window; /* Nr of I-frames allowed to send */
+ int window_size; /* Current negotiated window size */
+
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ __u32 line_capacity; /* Number of bytes allowed to send */
+ __u32 bytes_left; /* Number of bytes still allowed to transmit */
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+
+ struct sk_buff_head wx_list;
+
+ __u8 ack_required;
+
+ /* XID parameters */
+ __u8 S; /* Number of slots */
+ __u8 slot; /* Random chosen slot */
+ __u8 s; /* Current slot */
+ int frame_sent; /* Have we sent reply? */
+
+ hashbin_t *discovery_log;
+ discovery_t *discovery_cmd;
+
+ __u32 speed; /* Link speed */
+
+ struct qos_info qos_tx; /* QoS requested by peer */
+ struct qos_info qos_rx; /* QoS requested by self */
+ struct qos_info *qos_dev; /* QoS supported by device */
+
+ notify_t notify; /* Callbacks to IrLMP */
+
+ int mtt_required; /* Minumum turnaround time required */
+ int xbofs_delay; /* Nr of XBOF's used to MTT */
+ int bofs_count; /* Negotiated extra BOFs */
+ int next_bofs; /* Negotiated extra BOFs after next frame */
+};
+
+/*
+ * Function prototypes
+ */
+int irlap_init(void);
+void irlap_cleanup(void);
+
+struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
+ const char *hw_name);
+void irlap_close(struct irlap_cb *self);
+
+void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
+ struct qos_info *qos, int sniff);
+void irlap_connect_response(struct irlap_cb *self, struct sk_buff *skb);
+void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb);
+void irlap_connect_confirm(struct irlap_cb *, struct sk_buff *skb);
+
+void irlap_data_indication(struct irlap_cb *, struct sk_buff *, int unreliable);
+void irlap_data_request(struct irlap_cb *, struct sk_buff *, int unreliable);
+
+#ifdef CONFIG_IRDA_ULTRA
+void irlap_unitdata_request(struct irlap_cb *, struct sk_buff *);
+void irlap_unitdata_indication(struct irlap_cb *, struct sk_buff *);
+#endif /* CONFIG_IRDA_ULTRA */
+
+void irlap_disconnect_request(struct irlap_cb *);
+void irlap_disconnect_indication(struct irlap_cb *, LAP_REASON reason);
+
+void irlap_status_indication(struct irlap_cb *, int quality_of_link);
+
+void irlap_test_request(__u8 *info, int len);
+
+void irlap_discovery_request(struct irlap_cb *, discovery_t *discovery);
+void irlap_discovery_confirm(struct irlap_cb *, hashbin_t *discovery_log);
+void irlap_discovery_indication(struct irlap_cb *, discovery_t *discovery);
+
+void irlap_reset_indication(struct irlap_cb *self);
+void irlap_reset_confirm(void);
+
+void irlap_update_nr_received(struct irlap_cb *, int nr);
+int irlap_validate_nr_received(struct irlap_cb *, int nr);
+int irlap_validate_ns_received(struct irlap_cb *, int ns);
+
+int irlap_generate_rand_time_slot(int S, int s);
+void irlap_initiate_connection_state(struct irlap_cb *);
+void irlap_flush_all_queues(struct irlap_cb *);
+void irlap_wait_min_turn_around(struct irlap_cb *, struct qos_info *);
+
+void irlap_apply_default_connection_parameters(struct irlap_cb *self);
+void irlap_apply_connection_parameters(struct irlap_cb *self, int now);
+
+#define IRLAP_GET_HEADER_SIZE(self) (LAP_MAX_HEADER)
+#define IRLAP_GET_TX_QUEUE_LEN(self) skb_queue_len(&self->txq)
+
+/* Return TRUE if the node is in primary mode (i.e. master)
+ * - Jean II */
+static inline int irlap_is_primary(struct irlap_cb *self)
+{
+ int ret;
+ switch(self->state) {
+ case LAP_XMIT_P:
+ case LAP_NRM_P:
+ ret = 1;
+ break;
+ case LAP_XMIT_S:
+ case LAP_NRM_S:
+ ret = 0;
+ break;
+ default:
+ ret = -1;
+ }
+ return(ret);
+}
+
+/* Clear a pending IrLAP disconnect. - Jean II */
+static inline void irlap_clear_disconnect(struct irlap_cb *self)
+{
+ self->disconnect_pending = FALSE;
+}
+
+#endif
diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h
new file mode 100644
index 000000000000..2ae2e119ef4b
--- /dev/null
+++ b/include/net/irda/irlap_event.h
@@ -0,0 +1,131 @@
+/*********************************************************************
+ *
+ *
+ * Filename: irlap_event.h
+ * Version: 0.1
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Aug 16 00:59:29 1997
+ * Modified at: Tue Dec 21 11:20:30 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRLAP_EVENT_H
+#define IRLAP_EVENT_H
+
+#include <net/irda/irda.h>
+
+/* A few forward declarations (to make compiler happy) */
+struct irlap_cb;
+struct irlap_info;
+
+/* IrLAP States */
+typedef enum {
+ LAP_NDM, /* Normal disconnected mode */
+ LAP_QUERY,
+ LAP_REPLY,
+ LAP_CONN, /* Connect indication */
+ LAP_SETUP, /* Setting up connection */
+ LAP_OFFLINE, /* A really boring state */
+ LAP_XMIT_P,
+ LAP_PCLOSE,
+ LAP_NRM_P, /* Normal response mode as primary */
+ LAP_RESET_WAIT,
+ LAP_RESET,
+ LAP_NRM_S, /* Normal response mode as secondary */
+ LAP_XMIT_S,
+ LAP_SCLOSE,
+ LAP_RESET_CHECK,
+} IRLAP_STATE;
+
+/* IrLAP Events */
+typedef enum {
+ /* Services events */
+ DISCOVERY_REQUEST,
+ CONNECT_REQUEST,
+ CONNECT_RESPONSE,
+ DISCONNECT_REQUEST,
+ DATA_REQUEST,
+ RESET_REQUEST,
+ RESET_RESPONSE,
+
+ /* Send events */
+ SEND_I_CMD,
+ SEND_UI_FRAME,
+
+ /* Receive events */
+ RECV_DISCOVERY_XID_CMD,
+ RECV_DISCOVERY_XID_RSP,
+ RECV_SNRM_CMD,
+ RECV_TEST_CMD,
+ RECV_TEST_RSP,
+ RECV_UA_RSP,
+ RECV_DM_RSP,
+ RECV_RD_RSP,
+ RECV_I_CMD,
+ RECV_I_RSP,
+ RECV_UI_FRAME,
+ RECV_FRMR_RSP,
+ RECV_RR_CMD,
+ RECV_RR_RSP,
+ RECV_RNR_CMD,
+ RECV_RNR_RSP,
+ RECV_REJ_CMD,
+ RECV_REJ_RSP,
+ RECV_SREJ_CMD,
+ RECV_SREJ_RSP,
+ RECV_DISC_CMD,
+
+ /* Timer events */
+ SLOT_TIMER_EXPIRED,
+ QUERY_TIMER_EXPIRED,
+ FINAL_TIMER_EXPIRED,
+ POLL_TIMER_EXPIRED,
+ DISCOVERY_TIMER_EXPIRED,
+ WD_TIMER_EXPIRED,
+ BACKOFF_TIMER_EXPIRED,
+ MEDIA_BUSY_TIMER_EXPIRED,
+} IRLAP_EVENT;
+
+/*
+ * Disconnect reason code
+ */
+typedef enum { /* FIXME check the two first reason codes */
+ LAP_DISC_INDICATION=1, /* Received a disconnect request from peer */
+ LAP_NO_RESPONSE, /* To many retransmits without response */
+ LAP_RESET_INDICATION, /* To many retransmits, or invalid nr/ns */
+ LAP_FOUND_NONE, /* No devices were discovered */
+ LAP_MEDIA_BUSY,
+ LAP_PRIMARY_CONFLICT,
+} LAP_REASON;
+
+extern const char *irlap_state[];
+
+void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+void irlap_print_event(IRLAP_EVENT event);
+
+extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+
+#endif
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
new file mode 100644
index 000000000000..3452ae257c84
--- /dev/null
+++ b/include/net/irda/irlap_frame.h
@@ -0,0 +1,142 @@
+/*********************************************************************
+ *
+ * Filename: irlap_frame.h
+ * Version: 0.9
+ * Description: IrLAP frame declarations
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Aug 19 10:27:26 1997
+ * Modified at: Sat Dec 25 21:07:26 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRLAP_FRAME_H
+#define IRLAP_FRAME_H
+
+#include <linux/skbuff.h>
+
+#include <net/irda/irda.h>
+
+/* A few forward declarations (to make compiler happy) */
+struct irlap_cb;
+struct discovery_t;
+
+/* Frame types and templates */
+#define INVALID 0xff
+
+/* Unnumbered (U) commands */
+#define SNRM_CMD 0x83 /* Set Normal Response Mode */
+#define DISC_CMD 0x43 /* Disconnect */
+#define XID_CMD 0x2f /* Exchange Station Identification */
+#define TEST_CMD 0xe3 /* Test */
+
+/* Unnumbered responses */
+#define RNRM_RSP 0x83 /* Request Normal Response Mode */
+#define UA_RSP 0x63 /* Unnumbered Acknowledgement */
+#define FRMR_RSP 0x87 /* Frame Reject */
+#define DM_RSP 0x0f /* Disconnect Mode */
+#define RD_RSP 0x43 /* Request Disconnection */
+#define XID_RSP 0xaf /* Exchange Station Identification */
+#define TEST_RSP 0xe3 /* Test frame */
+
+/* Supervisory (S) */
+#define RR 0x01 /* Receive Ready */
+#define REJ 0x09 /* Reject */
+#define RNR 0x05 /* Receive Not Ready */
+#define SREJ 0x0d /* Selective Reject */
+
+/* Information (I) */
+#define I_FRAME 0x00 /* Information Format */
+#define UI_FRAME 0x03 /* Unnumbered Information */
+
+#define CMD_FRAME 0x01
+#define RSP_FRAME 0x00
+
+#define PF_BIT 0x10 /* Poll/final bit */
+
+struct xid_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+ __u8 ident; /* Should always be XID_FORMAT */
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ __u8 flags; /* Discovery flags */
+ __u8 slotnr;
+ __u8 version;
+} IRDA_PACK;
+
+struct test_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+} IRDA_PACK;
+
+struct ua_frame {
+ __u8 caddr;
+ __u8 control;
+
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Dest device address */
+} IRDA_PACK;
+
+struct i_frame {
+ __u8 caddr;
+ __u8 control;
+} IRDA_PACK;
+
+struct snrm_frame {
+ __u8 caddr;
+ __u8 control;
+ __u32 saddr;
+ __u32 daddr;
+ __u8 ncaddr;
+} IRDA_PACK;
+
+void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
+void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s,
+ __u8 command,
+ struct discovery_t *discovery);
+void irlap_send_snrm_frame(struct irlap_cb *, struct qos_info *);
+void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
+ struct sk_buff *cmd);
+void irlap_send_ua_response_frame(struct irlap_cb *, struct qos_info *);
+void irlap_send_dm_frame(struct irlap_cb *self);
+void irlap_send_rd_frame(struct irlap_cb *self);
+void irlap_send_disc_frame(struct irlap_cb *self);
+void irlap_send_rr_frame(struct irlap_cb *self, int command);
+
+void irlap_send_data_primary(struct irlap_cb *, struct sk_buff *);
+void irlap_send_data_primary_poll(struct irlap_cb *, struct sk_buff *);
+void irlap_send_data_secondary(struct irlap_cb *, struct sk_buff *);
+void irlap_send_data_secondary_final(struct irlap_cb *, struct sk_buff *);
+void irlap_resend_rejected_frames(struct irlap_cb *, int command);
+void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
+
+void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
+ __u8 caddr, int command);
+
+extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+ struct sk_buff *skb);
+
+#endif
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
new file mode 100644
index 000000000000..86aefb1fda5e
--- /dev/null
+++ b/include/net/irda/irlmp.h
@@ -0,0 +1,295 @@
+/*********************************************************************
+ *
+ * Filename: irlmp.h
+ * Version: 0.9
+ * Description: IrDA Link Management Protocol (LMP) layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 17 20:54:32 1997
+ * Modified at: Fri Dec 10 13:23:01 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLMP_H
+#define IRLMP_H
+
+#include <asm/param.h> /* for HZ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/qos.h>
+#include <net/irda/irlap.h> /* LAP_MAX_HEADER, ... */
+#include <net/irda/irlmp_event.h>
+#include <net/irda/irqueue.h>
+#include <net/irda/discovery.h>
+
+/* LSAP-SEL's */
+#define LSAP_MASK 0x7f
+#define LSAP_IAS 0x00
+#define LSAP_ANY 0xff
+#define LSAP_MAX 0x6f /* 0x70-0x7f are reserved */
+#define LSAP_CONNLESS 0x70 /* Connectionless LSAP, mostly used for Ultra */
+
+#define DEV_ADDR_ANY 0xffffffff
+
+#define LMP_HEADER 2 /* Dest LSAP + Source LSAP */
+#define LMP_CONTROL_HEADER 4
+#define LMP_PID_HEADER 1 /* Used by Ultra */
+#define LMP_MAX_HEADER (LMP_CONTROL_HEADER+LAP_MAX_HEADER)
+
+#define LM_MAX_CONNECTIONS 10
+
+#define LM_IDLE_TIMEOUT 2*HZ /* 2 seconds for now */
+
+typedef enum {
+ S_PNP = 0,
+ S_PDA,
+ S_COMPUTER,
+ S_PRINTER,
+ S_MODEM,
+ S_FAX,
+ S_LAN,
+ S_TELEPHONY,
+ S_COMM,
+ S_OBEX,
+ S_ANY,
+ S_END,
+} SERVICE;
+
+/* For selective discovery */
+typedef void (*DISCOVERY_CALLBACK1) (discinfo_t *, DISCOVERY_MODE, void *);
+/* For expiry (the same) */
+typedef void (*DISCOVERY_CALLBACK2) (discinfo_t *, DISCOVERY_MODE, void *);
+
+typedef struct {
+ irda_queue_t queue; /* Must be first */
+
+ __u16_host_order hints; /* Hint bits */
+} irlmp_service_t;
+
+typedef struct {
+ irda_queue_t queue; /* Must be first */
+
+ __u16_host_order hint_mask;
+
+ DISCOVERY_CALLBACK1 disco_callback; /* Selective discovery */
+ DISCOVERY_CALLBACK2 expir_callback; /* Selective expiration */
+ void *priv; /* Used to identify client */
+} irlmp_client_t;
+
+/*
+ * Information about each logical LSAP connection
+ */
+struct lsap_cb {
+ irda_queue_t queue; /* Must be first */
+ magic_t magic;
+
+ unsigned long connected; /* set_bit used on this */
+ int persistent;
+
+ __u8 slsap_sel; /* Source (this) LSAP address */
+ __u8 dlsap_sel; /* Destination LSAP address (if connected) */
+#ifdef CONFIG_IRDA_ULTRA
+ __u8 pid; /* Used by connectionless LSAP */
+#endif /* CONFIG_IRDA_ULTRA */
+ struct sk_buff *conn_skb; /* Store skb here while connecting */
+
+ struct timer_list watchdog_timer;
+
+ IRLMP_STATE lsap_state; /* Connection state */
+ notify_t notify; /* Indication/Confirm entry points */
+ struct qos_info qos; /* QoS for this connection */
+
+ struct lap_cb *lap; /* Pointer to LAP connection structure */
+};
+
+/*
+ * Used for caching the last slsap->dlsap->handle mapping
+ *
+ * We don't need to keep/match the remote address in the cache because
+ * we are associated with a specific LAP (which implies it).
+ * Jean II
+ */
+typedef struct {
+ int valid;
+
+ __u8 slsap_sel;
+ __u8 dlsap_sel;
+ struct lsap_cb *lsap;
+} CACHE_ENTRY;
+
+/*
+ * Information about each registred IrLAP layer
+ */
+struct lap_cb {
+ irda_queue_t queue; /* Must be first */
+ magic_t magic;
+
+ int reason; /* LAP disconnect reason */
+
+ IRLMP_STATE lap_state;
+
+ struct irlap_cb *irlap; /* Instance of IrLAP layer */
+ hashbin_t *lsaps; /* LSAP associated with this link */
+ struct lsap_cb *flow_next; /* Next lsap to be polled for Tx */
+
+ __u8 caddr; /* Connection address */
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+
+ struct qos_info *qos; /* LAP QoS for this session */
+ struct timer_list idle_timer;
+
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ /* The lsap cache was moved from struct irlmp_cb to here because
+ * it must be associated with the specific LAP. Also, this
+ * improves performance. - Jean II */
+ CACHE_ENTRY cache; /* Caching last slsap->dlsap->handle mapping */
+#endif
+};
+
+/*
+ * Main structure for IrLMP
+ */
+struct irlmp_cb {
+ magic_t magic;
+
+ __u8 conflict_flag;
+
+ discovery_t discovery_cmd; /* Discovery command to use by IrLAP */
+ discovery_t discovery_rsp; /* Discovery response to use by IrLAP */
+
+ /* Last lsap picked automatically by irlmp_find_free_slsap() */
+ int last_lsap_sel;
+
+ struct timer_list discovery_timer;
+
+ hashbin_t *links; /* IrLAP connection table */
+ hashbin_t *unconnected_lsaps;
+ hashbin_t *clients;
+ hashbin_t *services;
+
+ hashbin_t *cachelog; /* Current discovery log */
+
+ int running;
+
+ __u16_host_order hints; /* Hint bits */
+};
+
+/* Prototype declarations */
+int irlmp_init(void);
+void irlmp_cleanup(void);
+struct lsap_cb *irlmp_open_lsap(__u8 slsap, notify_t *notify, __u8 pid);
+void irlmp_close_lsap( struct lsap_cb *self);
+
+__u16 irlmp_service_to_hint(int service);
+void *irlmp_register_service(__u16 hints);
+int irlmp_unregister_service(void *handle);
+void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
+ DISCOVERY_CALLBACK2 expir_clb, void *priv);
+int irlmp_unregister_client(void *handle);
+int irlmp_update_client(void *handle, __u16 hint_mask,
+ DISCOVERY_CALLBACK1 disco_clb,
+ DISCOVERY_CALLBACK2 expir_clb, void *priv);
+
+void irlmp_register_link(struct irlap_cb *, __u32 saddr, notify_t *);
+void irlmp_unregister_link(__u32 saddr);
+
+int irlmp_connect_request(struct lsap_cb *, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *, struct sk_buff *);
+void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb);
+int irlmp_connect_response(struct lsap_cb *, struct sk_buff *);
+void irlmp_connect_confirm(struct lsap_cb *, struct sk_buff *);
+struct lsap_cb *irlmp_dup(struct lsap_cb *self, void *instance);
+
+void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
+ struct sk_buff *userdata);
+int irlmp_disconnect_request(struct lsap_cb *, struct sk_buff *userdata);
+
+void irlmp_discovery_confirm(hashbin_t *discovery_log, DISCOVERY_MODE mode);
+void irlmp_discovery_request(int nslots);
+discinfo_t *irlmp_get_discoveries(int *pn, __u16 mask, int nslots);
+void irlmp_do_expiry(void);
+void irlmp_do_discovery(int nslots);
+discovery_t *irlmp_get_discovery_response(void);
+void irlmp_discovery_expiry(discinfo_t *expiry, int number);
+
+int irlmp_data_request(struct lsap_cb *, struct sk_buff *);
+void irlmp_data_indication(struct lsap_cb *, struct sk_buff *);
+
+int irlmp_udata_request(struct lsap_cb *, struct sk_buff *);
+void irlmp_udata_indication(struct lsap_cb *, struct sk_buff *);
+
+#ifdef CONFIG_IRDA_ULTRA
+int irlmp_connless_data_request(struct lsap_cb *, struct sk_buff *, __u8);
+void irlmp_connless_data_indication(struct lsap_cb *, struct sk_buff *);
+#endif /* CONFIG_IRDA_ULTRA */
+
+void irlmp_status_indication(struct lap_cb *, LINK_STATUS link, LOCK_STATUS lock);
+void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow);
+
+LM_REASON irlmp_convert_lap_reason(LAP_REASON);
+
+static inline __u32 irlmp_get_saddr(const struct lsap_cb *self)
+{
+ return (self && self->lap) ? self->lap->saddr : 0;
+}
+
+static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
+{
+ return (self && self->lap) ? self->lap->daddr : 0;
+}
+
+extern const char *irlmp_reasons[];
+extern int sysctl_discovery_timeout;
+extern int sysctl_discovery_slots;
+extern int sysctl_discovery;
+extern int sysctl_lap_keepalive_time; /* in ms, default is LM_IDLE_TIMEOUT */
+extern struct irlmp_cb *irlmp;
+
+/* Check if LAP queue is full.
+ * Used by IrTTP for low control, see comments in irlap.h - Jean II */
+static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
+{
+ if (self == NULL)
+ return 0;
+ if (self->lap == NULL)
+ return 0;
+ if (self->lap->irlap == NULL)
+ return 0;
+
+ return(IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD);
+}
+
+/* After doing a irlmp_dup(), this get one of the two socket back into
+ * a state where it's waiting incomming connections.
+ * Note : this can be used *only* if the socket is not yet connected
+ * (i.e. NO irlmp_connect_response() done on this socket).
+ * - Jean II */
+static inline void irlmp_listen(struct lsap_cb *self)
+{
+ self->dlsap_sel = LSAP_ANY;
+ self->lap = NULL;
+ self->lsap_state = LSAP_DISCONNECTED;
+ /* Started when we received the LM_CONNECT_INDICATION */
+ del_timer(&self->watchdog_timer);
+}
+
+#endif
diff --git a/include/net/irda/irlmp_event.h b/include/net/irda/irlmp_event.h
new file mode 100644
index 000000000000..03c6f81a502a
--- /dev/null
+++ b/include/net/irda/irlmp_event.h
@@ -0,0 +1,98 @@
+/*********************************************************************
+ *
+ * Filename: irlmp_event.h
+ * Version: 0.1
+ * Description: IrDA-LMP event handling
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Thu Jul 8 12:18:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLMP_EVENT_H
+#define IRLMP_EVENT_H
+
+/* A few forward declarations (to make compiler happy) */
+struct irlmp_cb;
+struct lsap_cb;
+struct lap_cb;
+struct discovery_t;
+
+/* LAP states */
+typedef enum {
+ /* IrLAP connection control states */
+ LAP_STANDBY, /* No LAP connection */
+ LAP_U_CONNECT, /* Starting LAP connection */
+ LAP_ACTIVE, /* LAP connection is active */
+} IRLMP_STATE;
+
+/* LSAP connection control states */
+typedef enum {
+ LSAP_DISCONNECTED, /* No LSAP connection */
+ LSAP_CONNECT, /* Connect indication from peer */
+ LSAP_CONNECT_PEND, /* Connect request from service user */
+ LSAP_DATA_TRANSFER_READY, /* LSAP connection established */
+ LSAP_SETUP, /* Trying to set up LSAP connection */
+ LSAP_SETUP_PEND, /* Request to start LAP connection */
+} LSAP_STATE;
+
+typedef enum {
+ /* LSAP events */
+ LM_CONNECT_REQUEST,
+ LM_CONNECT_CONFIRM,
+ LM_CONNECT_RESPONSE,
+ LM_CONNECT_INDICATION,
+
+ LM_DISCONNECT_INDICATION,
+ LM_DISCONNECT_REQUEST,
+
+ LM_DATA_REQUEST,
+ LM_UDATA_REQUEST,
+ LM_DATA_INDICATION,
+ LM_UDATA_INDICATION,
+
+ LM_WATCHDOG_TIMEOUT,
+
+ /* IrLAP events */
+ LM_LAP_CONNECT_REQUEST,
+ LM_LAP_CONNECT_INDICATION,
+ LM_LAP_CONNECT_CONFIRM,
+ LM_LAP_DISCONNECT_INDICATION,
+ LM_LAP_DISCONNECT_REQUEST,
+ LM_LAP_DISCOVERY_REQUEST,
+ LM_LAP_DISCOVERY_CONFIRM,
+ LM_LAP_IDLE_TIMEOUT,
+} IRLMP_EVENT;
+
+extern const char *irlmp_state[];
+extern const char *irlsap_state[];
+
+void irlmp_watchdog_timer_expired(void *data);
+void irlmp_discovery_timer_expired(void *data);
+void irlmp_idle_timer_expired(void *data);
+
+void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb);
+int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb);
+
+#endif /* IRLMP_EVENT_H */
+
+
+
+
diff --git a/include/net/irda/irlmp_frame.h b/include/net/irda/irlmp_frame.h
new file mode 100644
index 000000000000..eb3ad158c023
--- /dev/null
+++ b/include/net/irda/irlmp_frame.h
@@ -0,0 +1,63 @@
+/*********************************************************************
+ *
+ * Filename: irlmp_frame.h
+ * Version: 0.9
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Aug 19 02:09:59 1997
+ * Modified at: Fri Dec 10 13:21:53 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRMLP_FRAME_H
+#define IRMLP_FRAME_H
+
+#include <linux/config.h>
+#include <linux/skbuff.h>
+
+#include <net/irda/discovery.h>
+
+/* IrLMP frame opcodes */
+#define CONNECT_CMD 0x01
+#define CONNECT_CNF 0x81
+#define DISCONNECT 0x02
+#define ACCESSMODE_CMD 0x03
+#define ACCESSMODE_CNF 0x83
+
+#define CONTROL_BIT 0x80
+
+void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ int expedited, struct sk_buff *skb);
+void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ __u8 opcode, struct sk_buff *skb);
+void irlmp_link_data_indication(struct lap_cb *, struct sk_buff *,
+ int unreliable);
+#ifdef CONFIG_IRDA_ULTRA
+void irlmp_link_unitdata_indication(struct lap_cb *, struct sk_buff *);
+#endif /* CONFIG_IRDA_ULTRA */
+
+void irlmp_link_connect_indication(struct lap_cb *, __u32 saddr, __u32 daddr,
+ struct qos_info *qos, struct sk_buff *skb);
+void irlmp_link_connect_request(__u32 daddr);
+void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
+ struct sk_buff *skb);
+void irlmp_link_disconnect_indication(struct lap_cb *, struct irlap_cb *,
+ LAP_REASON reason, struct sk_buff *);
+void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log);
+void irlmp_link_discovery_indication(struct lap_cb *, discovery_t *discovery);
+
+#endif
diff --git a/include/net/irda/irmod.h b/include/net/irda/irmod.h
new file mode 100644
index 000000000000..72b446c1e22c
--- /dev/null
+++ b/include/net/irda/irmod.h
@@ -0,0 +1,109 @@
+/*********************************************************************
+ *
+ * Filename: irmod.h
+ * Version: 0.3
+ * Description: IrDA module and utilities functions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Dec 15 13:58:52 1997
+ * Modified at: Fri Jan 28 13:15:24 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charg.
+ *
+ ********************************************************************/
+
+#ifndef IRMOD_H
+#define IRMOD_H
+
+/* Misc status information */
+typedef enum {
+ STATUS_OK,
+ STATUS_ABORTED,
+ STATUS_NO_ACTIVITY,
+ STATUS_NOISY,
+ STATUS_REMOTE,
+} LINK_STATUS;
+
+typedef enum {
+ LOCK_NO_CHANGE,
+ LOCK_LOCKED,
+ LOCK_UNLOCKED,
+} LOCK_STATUS;
+
+typedef enum { FLOW_STOP, FLOW_START } LOCAL_FLOW;
+
+/*
+ * IrLMP disconnect reasons. The order is very important, since they
+ * correspond to disconnect reasons sent in IrLMP disconnect frames, so
+ * please do not touch :-)
+ */
+typedef enum {
+ LM_USER_REQUEST = 1, /* User request */
+ LM_LAP_DISCONNECT, /* Unexpected IrLAP disconnect */
+ LM_CONNECT_FAILURE, /* Failed to establish IrLAP connection */
+ LM_LAP_RESET, /* IrLAP reset */
+ LM_INIT_DISCONNECT, /* Link Management initiated disconnect */
+ LM_LSAP_NOTCONN, /* Data delivered on unconnected LSAP */
+ LM_NON_RESP_CLIENT, /* Non responsive LM-MUX client */
+ LM_NO_AVAIL_CLIENT, /* No available LM-MUX client */
+ LM_CONN_HALF_OPEN, /* Connection is half open */
+ LM_BAD_SOURCE_ADDR, /* Illegal source address (i.e 0x00) */
+} LM_REASON;
+#define LM_UNKNOWN 0xff /* Unspecified disconnect reason */
+
+/* A few forward declarations (to make compiler happy) */
+struct qos_info; /* in <net/irda/qos.h> */
+
+/*
+ * Notify structure used between transport and link management layers
+ */
+typedef struct {
+ int (*data_indication)(void *priv, void *sap, struct sk_buff *skb);
+ int (*udata_indication)(void *priv, void *sap, struct sk_buff *skb);
+ void (*connect_confirm)(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size, struct sk_buff *skb);
+ void (*connect_indication)(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size, struct sk_buff *skb);
+ void (*disconnect_indication)(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *);
+ void (*flow_indication)(void *instance, void *sap, LOCAL_FLOW flow);
+ void (*status_indication)(void *instance,
+ LINK_STATUS link, LOCK_STATUS lock);
+ void *instance; /* Layer instance pointer */
+ char name[16]; /* Name of layer */
+} notify_t;
+
+#define NOTIFY_MAX_NAME 16
+
+/* Zero the notify structure */
+void irda_notify_init(notify_t *notify);
+
+/* Locking wrapper - Note the inverted logic on irda_lock().
+ * Those function basically return false if the lock is already in the
+ * position you want to set it. - Jean II */
+#define irda_lock(lock) (! test_and_set_bit(0, (void *) (lock)))
+#define irda_unlock(lock) (test_and_clear_bit(0, (void *) (lock)))
+
+#endif /* IRMOD_H */
+
+
+
+
+
+
+
+
+
diff --git a/include/net/irda/irqueue.h b/include/net/irda/irqueue.h
new file mode 100644
index 000000000000..335b0ace9665
--- /dev/null
+++ b/include/net/irda/irqueue.h
@@ -0,0 +1,96 @@
+/*********************************************************************
+ *
+ * Filename: irqueue.h
+ * Version: 0.3
+ * Description: General queue implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Jun 9 13:26:50 1998
+ * Modified at: Thu Oct 7 13:25:16 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
+ * Copyright (c) 1998, Dag Brattli
+ * All Rights Reserved.
+ *
+ * This code is taken from the Vortex Operating System written by Aage
+ * Kvalnes and has been ported to Linux and Linux/IR by Dag Brattli
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#ifndef IRDA_QUEUE_H
+#define IRDA_QUEUE_H
+
+#define NAME_SIZE 32
+
+/*
+ * Hash types (some flags can be xored)
+ * See comments in irqueue.c for which one to use...
+ */
+#define HB_NOLOCK 0 /* No concurent access prevention */
+#define HB_LOCK 1 /* Prevent concurent write with global lock */
+
+/*
+ * Hash defines
+ */
+#define HASHBIN_SIZE 8
+#define HASHBIN_MASK 0x7
+
+#ifndef IRDA_ALIGN
+#define IRDA_ALIGN __attribute__((aligned))
+#endif
+
+#define Q_NULL { NULL, NULL, "", 0 }
+
+typedef void (*FREE_FUNC)(void *arg);
+
+struct irda_queue {
+ struct irda_queue *q_next;
+ struct irda_queue *q_prev;
+
+ char q_name[NAME_SIZE];
+ long q_hash; /* Must be able to cast a (void *) */
+};
+typedef struct irda_queue irda_queue_t;
+
+typedef struct hashbin_t {
+ __u32 magic;
+ int hb_type;
+ int hb_size;
+ spinlock_t hb_spinlock; /* HB_LOCK - Can be used by the user */
+
+ irda_queue_t* hb_queue[HASHBIN_SIZE] IRDA_ALIGN;
+
+ irda_queue_t* hb_current;
+} hashbin_t;
+
+hashbin_t *hashbin_new(int type);
+int hashbin_delete(hashbin_t* hashbin, FREE_FUNC func);
+int hashbin_clear(hashbin_t* hashbin, FREE_FUNC free_func);
+void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
+ const char* name);
+void* hashbin_remove(hashbin_t* hashbin, long hashv, const char* name);
+void* hashbin_remove_first(hashbin_t *hashbin);
+void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry);
+void* hashbin_find(hashbin_t* hashbin, long hashv, const char* name);
+void* hashbin_lock_find(hashbin_t* hashbin, long hashv, const char* name);
+void* hashbin_find_next(hashbin_t* hashbin, long hashv, const char* name,
+ void ** pnext);
+irda_queue_t *hashbin_get_first(hashbin_t *hashbin);
+irda_queue_t *hashbin_get_next(hashbin_t *hashbin);
+
+#define HASHBIN_GET_SIZE(hashbin) hashbin->hb_size
+
+#endif
diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h
new file mode 100644
index 000000000000..a899e5837be8
--- /dev/null
+++ b/include/net/irda/irttp.h
@@ -0,0 +1,210 @@
+/*********************************************************************
+ *
+ * Filename: irttp.h
+ * Version: 1.0
+ * Description: Tiny Transport Protocol (TTP) definitions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:31 1997
+ * Modified at: Sun Dec 12 13:09:07 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRTTP_H
+#define IRTTP_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h> /* struct lsap_cb */
+#include <net/irda/qos.h> /* struct qos_info */
+#include <net/irda/irqueue.h>
+
+#define TTP_MAX_CONNECTIONS LM_MAX_CONNECTIONS
+#define TTP_HEADER 1
+#define TTP_MAX_HEADER (TTP_HEADER + LMP_MAX_HEADER)
+#define TTP_SAR_HEADER 5
+#define TTP_PARAMETERS 0x80
+#define TTP_MORE 0x80
+
+/* Transmission queue sizes */
+/* Worst case scenario, two window of data - Jean II */
+#define TTP_TX_MAX_QUEUE 14
+/* We need to keep at least 5 frames to make sure that we can refill
+ * appropriately the LAP layer. LAP keeps only two buffers, and we need
+ * to have 7 to make a full window - Jean II */
+#define TTP_TX_LOW_THRESHOLD 5
+/* Most clients are synchronous with respect to flow control, so we can
+ * keep a low number of Tx buffers in TTP - Jean II */
+#define TTP_TX_HIGH_THRESHOLD 7
+
+/* Receive queue sizes */
+/* Minimum of credit that the peer should hold.
+ * If the peer has less credits than 9 frames, we will explicitely send
+ * him some credits (through irttp_give_credit() and a specific frame).
+ * Note that when we give credits it's likely that it won't be sent in
+ * this LAP window, but in the next one. So, we make sure that the peer
+ * has something to send while waiting for credits (one LAP window == 7
+ * + 1 frames while he process the credits). - Jean II */
+#define TTP_RX_MIN_CREDIT 8
+/* This is the default maximum number of credits held by the peer, so the
+ * default maximum number of frames he can send us before needing flow
+ * control answer from us (this may be negociated differently at TSAP setup).
+ * We want to minimise the number of times we have to explicitely send some
+ * credit to the peer, hoping we can piggyback it on the return data. In
+ * particular, it doesn't make sense for us to send credit more than once
+ * per LAP window.
+ * Moreover, giving credits has some latency, so we need strictly more than
+ * a LAP window, otherwise we may already have credits in our Tx queue.
+ * But on the other hand, we don't want to keep too many Rx buffer here
+ * before starting to flow control the other end, so make it exactly one
+ * LAP window + 1 + MIN_CREDITS. - Jean II */
+#define TTP_RX_DEFAULT_CREDIT 16
+/* Maximum number of credits we can allow the peer to have, and therefore
+ * maximum Rx queue size.
+ * Note that we try to deliver packets to the higher layer every time we
+ * receive something, so in normal mode the Rx queue will never contains
+ * more than one or two packets. - Jean II */
+#define TTP_RX_MAX_CREDIT 21
+
+/* What clients should use when calling ttp_open_tsap() */
+#define DEFAULT_INITIAL_CREDIT TTP_RX_DEFAULT_CREDIT
+
+/* Some priorities for disconnect requests */
+#define P_NORMAL 0
+#define P_HIGH 1
+
+#define TTP_SAR_DISABLE 0
+#define TTP_SAR_UNBOUND 0xffffffff
+
+/* Parameters */
+#define TTP_MAX_SDU_SIZE 0x01
+
+/*
+ * This structure contains all data assosiated with one instance of a TTP
+ * connection.
+ */
+struct tsap_cb {
+ irda_queue_t q; /* Must be first */
+ magic_t magic; /* Just in case */
+
+ __u8 stsap_sel; /* Source TSAP */
+ __u8 dtsap_sel; /* Destination TSAP */
+
+ struct lsap_cb *lsap; /* Corresponding LSAP to this TSAP */
+
+ __u8 connected; /* TSAP connected */
+
+ __u8 initial_credit; /* Initial credit to give peer */
+
+ int avail_credit; /* Available credit to return to peer */
+ int remote_credit; /* Credit held by peer TTP entity */
+ int send_credit; /* Credit held by local TTP entity */
+
+ struct sk_buff_head tx_queue; /* Frames to be transmitted */
+ struct sk_buff_head rx_queue; /* Received frames */
+ struct sk_buff_head rx_fragments;
+ int tx_queue_lock;
+ int rx_queue_lock;
+ spinlock_t lock;
+
+ notify_t notify; /* Callbacks to client layer */
+
+ struct net_device_stats stats;
+ struct timer_list todo_timer;
+
+ __u32 max_seg_size; /* Max data that fit into an IrLAP frame */
+ __u8 max_header_size;
+
+ int rx_sdu_busy; /* RxSdu.busy */
+ __u32 rx_sdu_size; /* Current size of a partially received frame */
+ __u32 rx_max_sdu_size; /* Max receive user data size */
+
+ int tx_sdu_busy; /* TxSdu.busy */
+ __u32 tx_max_sdu_size; /* Max transmit user data size */
+
+ int close_pend; /* Close, but disconnect_pend */
+ unsigned long disconnect_pend; /* Disconnect, but still data to send */
+ struct sk_buff *disconnect_skb;
+};
+
+struct irttp_cb {
+ magic_t magic;
+ hashbin_t *tsaps;
+};
+
+int irttp_init(void);
+void irttp_cleanup(void);
+
+struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify);
+int irttp_close_tsap(struct tsap_cb *self);
+
+int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb);
+int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb);
+
+int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *userdata);
+int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
+ struct sk_buff *userdata);
+int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *skb,
+ int priority);
+void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow);
+struct tsap_cb *irttp_dup(struct tsap_cb *self, void *instance);
+
+static __inline __u32 irttp_get_saddr(struct tsap_cb *self)
+{
+ return irlmp_get_saddr(self->lsap);
+}
+
+static __inline __u32 irttp_get_daddr(struct tsap_cb *self)
+{
+ return irlmp_get_daddr(self->lsap);
+}
+
+static __inline __u32 irttp_get_max_seg_size(struct tsap_cb *self)
+{
+ return self->max_seg_size;
+}
+
+/* After doing a irttp_dup(), this get one of the two socket back into
+ * a state where it's waiting incomming connections.
+ * Note : this can be used *only* if the socket is not yet connected
+ * (i.e. NO irttp_connect_response() done on this socket).
+ * - Jean II */
+static inline void irttp_listen(struct tsap_cb *self)
+{
+ irlmp_listen(self->lsap);
+ self->dtsap_sel = LSAP_ANY;
+}
+
+/* Return TRUE if the node is in primary mode (i.e. master)
+ * - Jean II */
+static inline int irttp_is_primary(struct tsap_cb *self)
+{
+ if ((self == NULL) ||
+ (self->lsap == NULL) ||
+ (self->lsap->lap == NULL) ||
+ (self->lsap->lap->irlap == NULL))
+ return -2;
+ return(irlap_is_primary(self->lsap->lap->irlap));
+}
+
+#endif /* IRTTP_H */
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
new file mode 100644
index 000000000000..3a605d37ddbf
--- /dev/null
+++ b/include/net/irda/parameters.h
@@ -0,0 +1,102 @@
+/*********************************************************************
+ *
+ * Filename: parameters.h
+ * Version: 1.0
+ * Description: A more general way to handle (pi,pl,pv) parameters
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Jun 7 08:47:28 1999
+ * Modified at: Sun Jan 30 14:05:14 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * Michel Dänzer <daenzer@debian.org>, 10/2001
+ * - simplify irda_pv_t to avoid endianness issues
+ *
+ ********************************************************************/
+
+#ifndef IRDA_PARAMS_H
+#define IRDA_PARAMS_H
+
+/*
+ * The currently supported types. Beware not to change the sequence since
+ * it a good reason why the sized integers has a value equal to their size
+ */
+typedef enum {
+ PV_INTEGER, /* Integer of any (pl) length */
+ PV_INT_8_BITS, /* Integer of 8 bits in length */
+ PV_INT_16_BITS, /* Integer of 16 bits in length */
+ PV_STRING, /* \0 terminated string */
+ PV_INT_32_BITS, /* Integer of 32 bits in length */
+ PV_OCT_SEQ, /* Octet sequence */
+ PV_NO_VALUE /* Does not contain any value (pl=0) */
+} PV_TYPE;
+
+/* Bit 7 of type field */
+#define PV_BIG_ENDIAN 0x80
+#define PV_LITTLE_ENDIAN 0x00
+#define PV_MASK 0x7f /* To mask away endian bit */
+
+#define PV_PUT 0
+#define PV_GET 1
+
+typedef union {
+ char *c;
+ __u32 i;
+ __u32 *ip;
+} irda_pv_t;
+
+typedef struct {
+ __u8 pi;
+ __u8 pl;
+ irda_pv_t pv;
+} irda_param_t;
+
+typedef int (*PI_HANDLER)(void *self, irda_param_t *param, int get);
+typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+
+typedef struct {
+ PI_HANDLER func; /* Handler for this parameter identifier */
+ PV_TYPE type; /* Data type for this parameter */
+} pi_minor_info_t;
+
+typedef struct {
+ pi_minor_info_t *pi_minor_call_table;
+ int len;
+} pi_major_info_t;
+
+typedef struct {
+ pi_major_info_t *tables;
+ int len;
+ __u8 pi_mask;
+ int pi_major_offset;
+} pi_param_info_t;
+
+int irda_param_pack(__u8 *buf, char *fmt, ...);
+
+int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
+ pi_param_info_t *info);
+int irda_param_extract_all(void *self, __u8 *buf, int len,
+ pi_param_info_t *info);
+
+#define irda_param_insert_byte(buf,pi,pv) irda_param_pack(buf,"bbb",pi,1,pv)
+
+#endif /* IRDA_PARAMS_H */
+
diff --git a/include/net/irda/qos.h b/include/net/irda/qos.h
new file mode 100644
index 000000000000..9ae3d6bc2423
--- /dev/null
+++ b/include/net/irda/qos.h
@@ -0,0 +1,104 @@
+/*********************************************************************
+ *
+ * Filename: qos.h
+ * Version: 1.0
+ * Description: Quality of Service definitions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Sep 19 23:21:09 1997
+ * Modified at: Thu Dec 2 13:51:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRDA_QOS_H
+#define IRDA_QOS_H
+
+#include <linux/config.h>
+#include <linux/skbuff.h>
+
+#include <net/irda/parameters.h>
+
+#define PI_BAUD_RATE 0x01
+#define PI_MAX_TURN_TIME 0x82
+#define PI_DATA_SIZE 0x83
+#define PI_WINDOW_SIZE 0x84
+#define PI_ADD_BOFS 0x85
+#define PI_MIN_TURN_TIME 0x86
+#define PI_LINK_DISC 0x08
+
+#define IR_115200_MAX 0x3f
+
+/* Baud rates (first byte) */
+#define IR_2400 0x01
+#define IR_9600 0x02
+#define IR_19200 0x04
+#define IR_38400 0x08
+#define IR_57600 0x10
+#define IR_115200 0x20
+#define IR_576000 0x40
+#define IR_1152000 0x80
+
+/* Baud rates (second byte) */
+#define IR_4000000 0x01
+#define IR_16000000 0x02
+
+/* Quality of Service information */
+typedef struct {
+ __u32 value;
+ __u16 bits; /* LSB is first byte, MSB is second byte */
+} qos_value_t;
+
+struct qos_info {
+ magic_t magic;
+
+ qos_value_t baud_rate; /* IR_11520O | ... */
+ qos_value_t max_turn_time;
+ qos_value_t data_size;
+ qos_value_t window_size;
+ qos_value_t additional_bofs;
+ qos_value_t min_turn_time;
+ qos_value_t link_disc_time;
+
+ qos_value_t power;
+};
+
+extern int sysctl_max_baud_rate;
+extern int sysctl_max_inactive_time;
+
+void irda_init_max_qos_capabilies(struct qos_info *qos);
+void irda_qos_compute_intersection(struct qos_info *, struct qos_info *);
+
+__u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time);
+
+void irda_qos_bits_to_value(struct qos_info *qos);
+
+/* So simple, how could we not inline those two ?
+ * Note : one byte is 10 bits if you include start and stop bits
+ * Jean II */
+#define irlap_min_turn_time_in_bytes(speed, min_turn_time) ( \
+ speed * min_turn_time / 10000000 \
+)
+#define irlap_xbofs_in_usec(speed, xbofs) ( \
+ xbofs * 10000000 / speed \
+)
+
+#endif
+
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
new file mode 100644
index 000000000000..2c5d8864ab77
--- /dev/null
+++ b/include/net/irda/timer.h
@@ -0,0 +1,104 @@
+/*********************************************************************
+ *
+ * Filename: timer.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Aug 16 00:59:29 1997
+ * Modified at: Thu Oct 7 12:25:24 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef TIMER_H
+#define TIMER_H
+
+#include <linux/timer.h>
+
+#include <asm/param.h> /* for HZ */
+
+#include <net/irda/irda.h>
+
+/* A few forward declarations (to make compiler happy) */
+struct irlmp_cb;
+struct irlap_cb;
+struct lsap_cb;
+struct lap_cb;
+
+/*
+ * Timeout definitions, some defined in IrLAP 6.13.5 - p. 92
+ */
+#define POLL_TIMEOUT (450*HZ/1000) /* Must never exceed 500 ms */
+#define FINAL_TIMEOUT (500*HZ/1000) /* Must never exceed 500 ms */
+
+/*
+ * Normally twice of p-timer. Note 3, IrLAP 6.3.11.2 - p. 60 suggests
+ * at least twice duration of the P-timer.
+ */
+#define WD_TIMEOUT (POLL_TIMEOUT*2)
+
+#define MEDIABUSY_TIMEOUT (500*HZ/1000) /* 500 msec */
+#define SMALLBUSY_TIMEOUT (100*HZ/1000) /* 100 msec - IrLAP 6.13.4 */
+
+/*
+ * Slot timer must never exceed 85 ms, and must always be at least 25 ms,
+ * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of
+ * devices, and other stackes uses a lot more, so it's best we do it as well
+ * (Note : this is the default value and sysctl overides it - Jean II)
+ */
+#define SLOT_TIMEOUT (90*HZ/1000)
+
+/*
+ * The latest discovery frame (XID) is longer due to the extra discovery
+ * information (hints, device name...). This is its extra length.
+ * We use that when setting the query timeout. Jean II
+ */
+#define XIDEXTRA_TIMEOUT (34*HZ/1000) /* 34 msec */
+
+#define WATCHDOG_TIMEOUT (20*HZ) /* 20 sec */
+
+typedef void (*TIMER_CALLBACK)(void *);
+
+static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
+ void* data, TIMER_CALLBACK callback)
+{
+ ptimer->function = (void (*)(unsigned long)) callback;
+ ptimer->data = (unsigned long) data;
+
+ /* Set new value for timer (update or add timer).
+ * We use mod_timer() because it's more efficient and also
+ * safer with respect to race conditions - Jean II */
+ mod_timer(ptimer, jiffies + timeout);
+}
+
+
+void irlap_start_slot_timer(struct irlap_cb *self, int timeout);
+void irlap_start_query_timer(struct irlap_cb *self, int S, int s);
+void irlap_start_final_timer(struct irlap_cb *self, int timeout);
+void irlap_start_wd_timer(struct irlap_cb *self, int timeout);
+void irlap_start_backoff_timer(struct irlap_cb *self, int timeout);
+
+void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout);
+void irlap_stop_mbusy_timer(struct irlap_cb *);
+
+void irlmp_start_watchdog_timer(struct lsap_cb *, int timeout);
+void irlmp_start_discovery_timer(struct irlmp_cb *, int timeout);
+void irlmp_start_idle_timer(struct lap_cb *, int timeout);
+void irlmp_stop_idle_timer(struct lap_cb *self);
+
+#endif
+
diff --git a/include/net/irda/wrapper.h b/include/net/irda/wrapper.h
new file mode 100644
index 000000000000..98768b3f9e31
--- /dev/null
+++ b/include/net/irda/wrapper.h
@@ -0,0 +1,58 @@
+/*********************************************************************
+ *
+ * Filename: wrapper.h
+ * Version: 1.2
+ * Description: IrDA SIR async wrapper layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Tue Jan 11 12:37:29 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef WRAPPER_H
+#define WRAPPER_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irda_device.h> /* iobuff_t */
+
+#define BOF 0xc0 /* Beginning of frame */
+#define XBOF 0xff
+#define EOF 0xc1 /* End of frame */
+#define CE 0x7d /* Control escape */
+
+#define STA BOF /* Start flag */
+#define STO EOF /* End flag */
+
+#define IRDA_TRANS 0x20 /* Asynchronous transparency modifier */
+
+/* States for receving a frame in async mode */
+enum {
+ OUTSIDE_FRAME,
+ BEGIN_FRAME,
+ LINK_ESCAPE,
+ INSIDE_FRAME
+};
+
+/* Proto definitions */
+int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize);
+void async_unwrap_char(struct net_device *dev, struct net_device_stats *stats,
+ iobuff_t *buf, __u8 byte);
+
+#endif
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
new file mode 100644
index 000000000000..44edd48f1234
--- /dev/null
+++ b/include/net/iw_handler.h
@@ -0,0 +1,540 @@
+/*
+ * This file define the new driver API for Wireless Extensions
+ *
+ * Version : 6 21.6.04
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 2001-2004 Jean Tourrilhes, All Rights Reserved.
+ */
+
+#ifndef _IW_HANDLER_H
+#define _IW_HANDLER_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * Initial driver API (1996 -> onward) :
+ * -----------------------------------
+ * The initial API just sends the IOCTL request received from user space
+ * to the driver (via the driver ioctl handler). The driver has to
+ * handle all the rest...
+ *
+ * The initial API also defines a specific handler in struct net_device
+ * to handle wireless statistics.
+ *
+ * The initial APIs served us well and has proven a reasonably good design.
+ * However, there is a few shortcommings :
+ * o No events, everything is a request to the driver.
+ * o Large ioctl function in driver with gigantic switch statement
+ * (i.e. spaghetti code).
+ * o Driver has to mess up with copy_to/from_user, and in many cases
+ * does it unproperly. Common mistakes are :
+ * * buffer overflows (no checks or off by one checks)
+ * * call copy_to/from_user with irq disabled
+ * o The user space interface is tied to ioctl because of the use
+ * copy_to/from_user.
+ *
+ * New driver API (2002 -> onward) :
+ * -------------------------------
+ * The new driver API is just a bunch of standard functions (handlers),
+ * each handling a specific Wireless Extension. The driver just export
+ * the list of handler it supports, and those will be called apropriately.
+ *
+ * I tried to keep the main advantage of the previous API (simplicity,
+ * efficiency and light weight), and also I provide a good dose of backward
+ * compatibility (most structures are the same, driver can use both API
+ * simultaneously, ...).
+ * Hopefully, I've also addressed the shortcomming of the initial API.
+ *
+ * The advantage of the new API are :
+ * o Handling of Extensions in driver broken in small contained functions
+ * o Tighter checks of ioctl before calling the driver
+ * o Flexible commit strategy (at least, the start of it)
+ * o Backward compatibility (can be mixed with old API)
+ * o Driver doesn't have to worry about memory and user-space issues
+ * The last point is important for the following reasons :
+ * o You are now able to call the new driver API from any API you
+ * want (including from within other parts of the kernel).
+ * o Common mistakes are avoided (buffer overflow, user space copy
+ * with irq disabled and so on).
+ *
+ * The Drawback of the new API are :
+ * o bloat (especially kernel)
+ * o need to migrate existing drivers to new API
+ * My initial testing shows that the new API adds around 3kB to the kernel
+ * and save between 0 and 5kB from a typical driver.
+ * Also, as all structures and data types are unchanged, the migration is
+ * quite straightforward (but tedious).
+ *
+ * ---
+ *
+ * The new driver API is defined below in this file. User space should
+ * not be aware of what's happening down there...
+ *
+ * A new kernel wrapper is in charge of validating the IOCTLs and calling
+ * the appropriate driver handler. This is implemented in :
+ * # net/core/wireless.c
+ *
+ * The driver export the list of handlers in :
+ * # include/linux/netdevice.h (one place)
+ *
+ * The new driver API is available for WIRELESS_EXT >= 13.
+ * Good luck with migration to the new API ;-)
+ */
+
+/* ---------------------- THE IMPLEMENTATION ---------------------- */
+/*
+ * Some of the choice I've made are pretty controversials. Defining an
+ * API is very much weighting compromises. This goes into some of the
+ * details and the thinking behind the implementation.
+ *
+ * Implementation goals :
+ * --------------------
+ * The implementation goals were as follow :
+ * o Obvious : you should not need a PhD to understand what's happening,
+ * the benefit is easier maintainance.
+ * o Flexible : it should accommodate a wide variety of driver
+ * implementations and be as flexible as the old API.
+ * o Lean : it should be efficient memory wise to minimise the impact
+ * on kernel footprint.
+ * o Transparent to user space : the large number of user space
+ * applications that use Wireless Extensions should not need
+ * any modifications.
+ *
+ * Array of functions versus Struct of functions
+ * ---------------------------------------------
+ * 1) Having an array of functions allow the kernel code to access the
+ * handler in a single lookup, which is much more efficient (think hash
+ * table here).
+ * 2) The only drawback is that driver writer may put their handler in
+ * the wrong slot. This is trivial to test (I set the frequency, the
+ * bitrate changes). Once the handler is in the proper slot, it will be
+ * there forever, because the array is only extended at the end.
+ * 3) Backward/forward compatibility : adding new handler just require
+ * extending the array, so you can put newer driver in older kernel
+ * without having to patch the kernel code (and vice versa).
+ *
+ * All handler are of the same generic type
+ * ----------------------------------------
+ * That's a feature !!!
+ * 1) Having a generic handler allow to have generic code, which is more
+ * efficient. If each of the handler was individually typed I would need
+ * to add a big switch in the kernel (== more bloat). This solution is
+ * more scalable, adding new Wireless Extensions doesn't add new code.
+ * 2) You can use the same handler in different slots of the array. For
+ * hardware, it may be more efficient or logical to handle multiple
+ * Wireless Extensions with a single function, and the API allow you to
+ * do that. (An example would be a single record on the card to control
+ * both bitrate and frequency, the handler would read the old record,
+ * modify it according to info->cmd and rewrite it).
+ *
+ * Functions prototype uses union iwreq_data
+ * -----------------------------------------
+ * Some would have prefered functions defined this way :
+ * static int mydriver_ioctl_setrate(struct net_device *dev,
+ * long rate, int auto)
+ * 1) The kernel code doesn't "validate" the content of iwreq_data, and
+ * can't do it (different hardware may have different notion of what a
+ * valid frequency is), so we don't pretend that we do it.
+ * 2) The above form is not extendable. If I want to add a flag (for
+ * example to distinguish setting max rate and basic rate), I would
+ * break the prototype. Using iwreq_data is more flexible.
+ * 3) Also, the above form is not generic (see above).
+ * 4) I don't expect driver developper using the wrong field of the
+ * union (Doh !), so static typechecking doesn't add much value.
+ * 5) Lastly, you can skip the union by doing :
+ * static int mydriver_ioctl_setrate(struct net_device *dev,
+ * struct iw_request_info *info,
+ * struct iw_param *rrq,
+ * char *extra)
+ * And then adding the handler in the array like this :
+ * (iw_handler) mydriver_ioctl_setrate, // SIOCSIWRATE
+ *
+ * Using functions and not a registry
+ * ----------------------------------
+ * Another implementation option would have been for every instance to
+ * define a registry (a struct containing all the Wireless Extensions)
+ * and only have a function to commit the registry to the hardware.
+ * 1) This approach can be emulated by the current code, but not
+ * vice versa.
+ * 2) Some drivers don't keep any configuration in the driver, for them
+ * adding such a registry would be a significant bloat.
+ * 3) The code to translate from Wireless Extension to native format is
+ * needed anyway, so it would not reduce significantely the amount of code.
+ * 4) The current approach only selectively translate Wireless Extensions
+ * to native format and only selectively set, whereas the registry approach
+ * would require to translate all WE and set all parameters for any single
+ * change.
+ * 5) For many Wireless Extensions, the GET operation return the current
+ * dynamic value, not the value that was set.
+ *
+ * This header is <net/iw_handler.h>
+ * ---------------------------------
+ * 1) This header is kernel space only and should not be exported to
+ * user space. Headers in "include/linux/" are exported, headers in
+ * "include/net/" are not.
+ *
+ * Mixed 32/64 bit issues
+ * ----------------------
+ * The Wireless Extensions are designed to be 64 bit clean, by using only
+ * datatypes with explicit storage size.
+ * There are some issues related to kernel and user space using different
+ * memory model, and in particular 64bit kernel with 32bit user space.
+ * The problem is related to struct iw_point, that contains a pointer
+ * that *may* need to be translated.
+ * This is quite messy. The new API doesn't solve this problem (it can't),
+ * but is a step in the right direction :
+ * 1) Meta data about each ioctl is easily available, so we know what type
+ * of translation is needed.
+ * 2) The move of data between kernel and user space is only done in a single
+ * place in the kernel, so adding specific hooks in there is possible.
+ * 3) In the long term, it allows to move away from using ioctl as the
+ * user space API.
+ *
+ * So many comments and so few code
+ * --------------------------------
+ * That's a feature. Comments won't bloat the resulting kernel binary.
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/wireless.h> /* IOCTL user space API */
+#include <linux/if_ether.h>
+
+/***************************** VERSION *****************************/
+/*
+ * This constant is used to know which version of the driver API is
+ * available. Hopefully, this will be pretty stable and no changes
+ * will be needed...
+ * I just plan to increment with each new version.
+ */
+#define IW_HANDLER_VERSION 6
+
+/*
+ * Changes :
+ *
+ * V2 to V3
+ * --------
+ * - Move event definition in <linux/wireless.h>
+ * - Add Wireless Event support :
+ * o wireless_send_event() prototype
+ * o iwe_stream_add_event/point() inline functions
+ * V3 to V4
+ * --------
+ * - Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes
+ *
+ * V4 to V5
+ * --------
+ * - Add new spy support : struct iw_spy_data & prototypes
+ *
+ * V5 to V6
+ * --------
+ * - Change the way we get to spy_data method for added safety
+ * - Remove spy #ifdef, they are always on -> cleaner code
+ * - Add IW_DESCR_FLAG_NOMAX flag for very large requests
+ * - Start migrating get_wireless_stats to struct iw_handler_def
+ */
+
+/**************************** CONSTANTS ****************************/
+
+/* Enhanced spy support available */
+#define IW_WIRELESS_SPY
+#define IW_WIRELESS_THRSPY
+
+/* Special error message for the driver to indicate that we
+ * should do a commit after return from the iw_handler */
+#define EIWCOMMIT EINPROGRESS
+
+/* Flags available in struct iw_request_info */
+#define IW_REQUEST_FLAG_NONE 0x0000 /* No flag so far */
+
+/* Type of headers we know about (basically union iwreq_data) */
+#define IW_HEADER_TYPE_NULL 0 /* Not available */
+#define IW_HEADER_TYPE_CHAR 2 /* char [IFNAMSIZ] */
+#define IW_HEADER_TYPE_UINT 4 /* __u32 */
+#define IW_HEADER_TYPE_FREQ 5 /* struct iw_freq */
+#define IW_HEADER_TYPE_ADDR 6 /* struct sockaddr */
+#define IW_HEADER_TYPE_POINT 8 /* struct iw_point */
+#define IW_HEADER_TYPE_PARAM 9 /* struct iw_param */
+#define IW_HEADER_TYPE_QUAL 10 /* struct iw_quality */
+
+/* Handling flags */
+/* Most are not implemented. I just use them as a reminder of some
+ * cool features we might need one day ;-) */
+#define IW_DESCR_FLAG_NONE 0x0000 /* Obvious */
+/* Wrapper level flags */
+#define IW_DESCR_FLAG_DUMP 0x0001 /* Not part of the dump command */
+#define IW_DESCR_FLAG_EVENT 0x0002 /* Generate an event on SET */
+#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
+ /* SET : Omit payload from generated iwevent */
+#define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */
+/* Driver level flags */
+#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
+
+/****************************** TYPES ******************************/
+
+/* ----------------------- WIRELESS HANDLER ----------------------- */
+/*
+ * A wireless handler is just a standard function, that looks like the
+ * ioctl handler.
+ * We also define there how a handler list look like... As the Wireless
+ * Extension space is quite dense, we use a simple array, which is faster
+ * (that's the perfect hash table ;-).
+ */
+
+/*
+ * Meta data about the request passed to the iw_handler.
+ * Most handlers can safely ignore what's in there.
+ * The 'cmd' field might come handy if you want to use the same handler
+ * for multiple command...
+ * This struct is also my long term insurance. I can add new fields here
+ * without breaking the prototype of iw_handler...
+ */
+struct iw_request_info
+{
+ __u16 cmd; /* Wireless Extension command */
+ __u16 flags; /* More to come ;-) */
+};
+
+struct net_device;
+
+/*
+ * This is how a function handling a Wireless Extension should look
+ * like (both get and set, standard and private).
+ */
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+/*
+ * This define all the handler that the driver export.
+ * As you need only one per driver type, please use a static const
+ * shared by all driver instances... Same for the members...
+ * This will be linked from net_device in <linux/netdevice.h>
+ */
+struct iw_handler_def
+{
+ /* Number of handlers defined (more precisely, index of the
+ * last defined handler + 1) */
+ __u16 num_standard;
+ __u16 num_private;
+ /* Number of private arg description */
+ __u16 num_private_args;
+
+ /* Array of handlers for standard ioctls
+ * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME]
+ */
+ const iw_handler * standard;
+
+ /* Array of handlers for private ioctls
+ * Will call dev->wireless_handlers->private[ioctl - SIOCIWFIRSTPRIV]
+ */
+ const iw_handler * private;
+
+ /* Arguments of private handler. This one is just a list, so you
+ * can put it in any order you want and should not leave holes...
+ * We will automatically export that to user space... */
+ const struct iw_priv_args * private_args;
+
+ /* This field will be *removed* in the next version of WE */
+ long spy_offset; /* DO NOT USE */
+
+ /* New location of get_wireless_stats, to de-bloat struct net_device.
+ * The old pointer in struct net_device will be gradually phased
+ * out, and drivers are encouraged to use this one... */
+ struct iw_statistics* (*get_wireless_stats)(struct net_device *dev);
+};
+
+/* ---------------------- IOCTL DESCRIPTION ---------------------- */
+/*
+ * One of the main goal of the new interface is to deal entirely with
+ * user space/kernel space memory move.
+ * For that, we need to know :
+ * o if iwreq is a pointer or contain the full data
+ * o what is the size of the data to copy
+ *
+ * For private IOCTLs, we use the same rules as used by iwpriv and
+ * defined in struct iw_priv_args.
+ *
+ * For standard IOCTLs, things are quite different and we need to
+ * use the stuctures below. Actually, this struct is also more
+ * efficient, but that's another story...
+ */
+
+/*
+ * Describe how a standard IOCTL looks like.
+ */
+struct iw_ioctl_description
+{
+ __u8 header_type; /* NULL, iw_point or other */
+ __u8 token_type; /* Future */
+ __u16 token_size; /* Granularity of payload */
+ __u16 min_tokens; /* Min acceptable token number */
+ __u16 max_tokens; /* Max acceptable token number */
+ __u32 flags; /* Special handling of the request */
+};
+
+/* Need to think of short header translation table. Later. */
+
+/* --------------------- ENHANCED SPY SUPPORT --------------------- */
+/*
+ * In the old days, the driver was handling spy support all by itself.
+ * Now, the driver can delegate this task to Wireless Extensions.
+ * It needs to include this struct in its private part and use the
+ * standard spy iw_handler.
+ */
+
+/*
+ * Instance specific spy data, i.e. addresses spied and quality for them.
+ */
+struct iw_spy_data
+{
+ /* --- Standard spy support --- */
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+ /* --- Enhanced spy support (event) */
+ struct iw_quality spy_thr_low; /* Low threshold */
+ struct iw_quality spy_thr_high; /* High threshold */
+ u_char spy_thr_under[IW_MAX_SPY];
+};
+
+/* --------------------- DEVICE WIRELESS DATA --------------------- */
+/*
+ * This is all the wireless data specific to a device instance that
+ * is managed by the core of Wireless Extensions.
+ * We only keep pointer to those structures, so that a driver is free
+ * to share them between instances.
+ * This structure should be initialised before registering the device.
+ * Access to this data follow the same rules as any other struct net_device
+ * data (i.e. valid as long as struct net_device exist, same locking rules).
+ */
+struct iw_public_data {
+ /* Driver enhanced spy support */
+ struct iw_spy_data * spy_data;
+};
+
+/**************************** PROTOTYPES ****************************/
+/*
+ * Functions part of the Wireless Extensions (defined in net/core/wireless.c).
+ * Those may be called only within the kernel.
+ */
+
+/* First : function strictly used inside the kernel */
+
+/* Handle /proc/net/wireless, called in net/code/dev.c */
+extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
+ int length);
+
+/* Handle IOCTLs, called in net/code/dev.c */
+extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd);
+
+/* Second : functions that may be called by driver modules */
+
+/* Send a single event to user space */
+extern void wireless_send_event(struct net_device * dev,
+ unsigned int cmd,
+ union iwreq_data * wrqu,
+ char * extra);
+
+/* We may need a function to send a stream of events to user space.
+ * More on that later... */
+
+/* Standard handler for SIOCSIWSPY */
+extern int iw_handler_set_spy(struct net_device * dev,
+ struct iw_request_info * info,
+ union iwreq_data * wrqu,
+ char * extra);
+/* Standard handler for SIOCGIWSPY */
+extern int iw_handler_get_spy(struct net_device * dev,
+ struct iw_request_info * info,
+ union iwreq_data * wrqu,
+ char * extra);
+/* Standard handler for SIOCSIWTHRSPY */
+extern int iw_handler_set_thrspy(struct net_device * dev,
+ struct iw_request_info *info,
+ union iwreq_data * wrqu,
+ char * extra);
+/* Standard handler for SIOCGIWTHRSPY */
+extern int iw_handler_get_thrspy(struct net_device * dev,
+ struct iw_request_info *info,
+ union iwreq_data * wrqu,
+ char * extra);
+/* Driver call to update spy records */
+extern void wireless_spy_update(struct net_device * dev,
+ unsigned char * address,
+ struct iw_quality * wstats);
+
+/************************* INLINE FUNTIONS *************************/
+/*
+ * Function that are so simple that it's more efficient inlining them
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper to add an Wireless Event to a stream of events.
+ */
+static inline char *
+iwe_stream_add_event(char * stream, /* Stream of events */
+ char * ends, /* End of stream */
+ struct iw_event *iwe, /* Payload */
+ int event_len) /* Real size of payload */
+{
+ /* Check if it's possible */
+ if((stream + event_len) < ends) {
+ iwe->len = event_len;
+ memcpy(stream, (char *) iwe, event_len);
+ stream += event_len;
+ }
+ return stream;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper to add an short Wireless Event containing a pointer to a
+ * stream of events.
+ */
+static inline char *
+iwe_stream_add_point(char * stream, /* Stream of events */
+ char * ends, /* End of stream */
+ struct iw_event *iwe, /* Payload */
+ char * extra)
+{
+ int event_len = IW_EV_POINT_LEN + iwe->u.data.length;
+ /* Check if it's possible */
+ if((stream + event_len) < ends) {
+ iwe->len = event_len;
+ memcpy(stream, (char *) iwe, IW_EV_POINT_LEN);
+ memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length);
+ stream += event_len;
+ }
+ return stream;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper to add a value to a Wireless Event in a stream of events.
+ * Be careful, this one is tricky to use properly :
+ * At the first run, you need to have (value = event + IW_EV_LCP_LEN).
+ */
+static inline char *
+iwe_stream_add_value(char * event, /* Event in the stream */
+ char * value, /* Value in event */
+ char * ends, /* End of stream */
+ struct iw_event *iwe, /* Payload */
+ int event_len) /* Real size of payload */
+{
+ /* Don't duplicate LCP */
+ event_len -= IW_EV_LCP_LEN;
+
+ /* Check if it's possible */
+ if((value + event_len) < ends) {
+ /* Add new value */
+ memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len);
+ value += event_len;
+ /* Patch LCP */
+ iwe->len = value - event;
+ memcpy(event, (char *) iwe, IW_EV_LCP_LEN);
+ }
+ return value;
+}
+
+#endif /* _IW_HANDLER_H */
diff --git a/include/net/lapb.h b/include/net/lapb.h
new file mode 100644
index 000000000000..96cb5ddaa9f1
--- /dev/null
+++ b/include/net/lapb.h
@@ -0,0 +1,152 @@
+#ifndef _LAPB_H
+#define _LAPB_H
+#include <linux/lapb.h>
+
+#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
+
+#define LAPB_ACK_PENDING_CONDITION 0x01
+#define LAPB_REJECT_CONDITION 0x02
+#define LAPB_PEER_RX_BUSY_CONDITION 0x04
+
+/* Control field templates */
+#define LAPB_I 0x00 /* Information frames */
+#define LAPB_S 0x01 /* Supervisory frames */
+#define LAPB_U 0x03 /* Unnumbered frames */
+
+#define LAPB_RR 0x01 /* Receiver ready */
+#define LAPB_RNR 0x05 /* Receiver not ready */
+#define LAPB_REJ 0x09 /* Reject */
+
+#define LAPB_SABM 0x2F /* Set Asynchronous Balanced Mode */
+#define LAPB_SABME 0x6F /* Set Asynchronous Balanced Mode Extended */
+#define LAPB_DISC 0x43 /* Disconnect */
+#define LAPB_DM 0x0F /* Disconnected mode */
+#define LAPB_UA 0x63 /* Unnumbered acknowledge */
+#define LAPB_FRMR 0x87 /* Frame reject */
+
+#define LAPB_ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define LAPB_SPF 0x10 /* Poll/final bit for standard LAPB */
+#define LAPB_EPF 0x01 /* Poll/final bit for extended LAPB */
+
+#define LAPB_FRMR_W 0x01 /* Control field invalid */
+#define LAPB_FRMR_X 0x02 /* I field invalid */
+#define LAPB_FRMR_Y 0x04 /* I field too long */
+#define LAPB_FRMR_Z 0x08 /* Invalid N(R) */
+
+#define LAPB_POLLOFF 0
+#define LAPB_POLLON 1
+
+/* LAPB C-bit */
+#define LAPB_COMMAND 1
+#define LAPB_RESPONSE 2
+
+#define LAPB_ADDR_A 0x03
+#define LAPB_ADDR_B 0x01
+#define LAPB_ADDR_C 0x0F
+#define LAPB_ADDR_D 0x07
+
+/* Define Link State constants. */
+enum {
+ LAPB_STATE_0, /* Disconnected State */
+ LAPB_STATE_1, /* Awaiting Connection State */
+ LAPB_STATE_2, /* Awaiting Disconnection State */
+ LAPB_STATE_3, /* Data Transfer State */
+ LAPB_STATE_4 /* Frame Reject State */
+};
+
+#define LAPB_DEFAULT_MODE (LAPB_STANDARD | LAPB_SLP | LAPB_DTE)
+#define LAPB_DEFAULT_WINDOW 7 /* Window=7 */
+#define LAPB_DEFAULT_T1 (5 * HZ) /* T1=5s */
+#define LAPB_DEFAULT_T2 (1 * HZ) /* T2=1s */
+#define LAPB_DEFAULT_N2 20 /* N2=20 */
+
+#define LAPB_SMODULUS 8
+#define LAPB_EMODULUS 128
+
+/*
+ * Information about the current frame.
+ */
+struct lapb_frame {
+ unsigned short type; /* Parsed type */
+ unsigned short nr, ns; /* N(R), N(S) */
+ unsigned char cr; /* Command/Response */
+ unsigned char pf; /* Poll/Final */
+ unsigned char control[2]; /* Original control data*/
+};
+
+/*
+ * The per LAPB connection control structure.
+ */
+struct lapb_cb {
+ struct list_head node;
+ struct net_device *dev;
+
+ /* Link status fields */
+ unsigned int mode;
+ unsigned char state;
+ unsigned short vs, vr, va;
+ unsigned char condition;
+ unsigned short n2, n2count;
+ unsigned short t1, t2;
+ struct timer_list t1timer, t2timer;
+
+ /* Internal control information */
+ struct sk_buff_head write_queue;
+ struct sk_buff_head ack_queue;
+ unsigned char window;
+ struct lapb_register_struct callbacks;
+
+ /* FRMR control information */
+ struct lapb_frame frmr_data;
+ unsigned char frmr_type;
+
+ atomic_t refcnt;
+};
+
+/* lapb_iface.c */
+extern void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+extern void lapb_connect_indication(struct lapb_cb *lapb, int);
+extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+extern void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+extern int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+extern int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+
+/* lapb_in.c */
+extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+
+/* lapb_out.c */
+extern void lapb_kick(struct lapb_cb *lapb);
+extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+extern void lapb_establish_data_link(struct lapb_cb *lapb);
+extern void lapb_enquiry_response(struct lapb_cb *lapb);
+extern void lapb_timeout_response(struct lapb_cb *lapb);
+extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+extern void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+
+/* lapb_subr.c */
+extern void lapb_clear_queues(struct lapb_cb *lapb);
+extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+extern void lapb_requeue_frames(struct lapb_cb *lapb);
+extern int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+extern void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+extern void lapb_transmit_frmr(struct lapb_cb *lapb);
+
+/* lapb_timer.c */
+extern void lapb_start_t1timer(struct lapb_cb *lapb);
+extern void lapb_start_t2timer(struct lapb_cb *lapb);
+extern void lapb_stop_t1timer(struct lapb_cb *lapb);
+extern void lapb_stop_t2timer(struct lapb_cb *lapb);
+extern int lapb_t1timer_running(struct lapb_cb *lapb);
+
+/*
+ * Debug levels.
+ * 0 = Off
+ * 1 = State Changes
+ * 2 = Packets I/O and State Changes
+ * 3 = Hex dumps, Packets I/O and State Changes.
+ */
+#define LAPB_DEBUG 0
+
+#endif
diff --git a/include/net/llc.h b/include/net/llc.h
new file mode 100644
index 000000000000..c9aed2a8b4e2
--- /dev/null
+++ b/include/net/llc.h
@@ -0,0 +1,99 @@
+#ifndef LLC_H
+#define LLC_H
+/*
+ * Copyright (c) 1997 by Procom Technology, Inc.
+ * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
+struct llc_addr {
+ unsigned char lsap;
+ unsigned char mac[IFHWADDRLEN];
+};
+
+#define LLC_SAP_STATE_INACTIVE 1
+#define LLC_SAP_STATE_ACTIVE 2
+
+/**
+ * struct llc_sap - Defines the SAP component
+ *
+ * @station - station this sap belongs to
+ * @state - sap state
+ * @p_bit - only lowest-order bit used
+ * @f_bit - only lowest-order bit used
+ * @laddr - SAP value in this 'lsap'
+ * @node - entry in station sap_list
+ * @sk_list - LLC sockets this one manages
+ */
+struct llc_sap {
+ unsigned char state;
+ unsigned char p_bit;
+ unsigned char f_bit;
+ int (*rcv_func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt);
+ struct llc_addr laddr;
+ struct list_head node;
+ struct {
+ rwlock_t lock;
+ struct hlist_head list;
+ } sk_list;
+};
+
+#define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */
+#define LLC_DEST_SAP 1 /* Type 1 goes here */
+#define LLC_DEST_CONN 2 /* Type 2 goes here */
+
+extern struct list_head llc_sap_list;
+extern rwlock_t llc_sap_list_lock;
+extern unsigned char llc_station_mac_sa[ETH_ALEN];
+
+extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt);
+
+extern int llc_mac_hdr_init(struct sk_buff *skb,
+ unsigned char *sa, unsigned char *da);
+
+extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
+ struct sk_buff *skb));
+extern void llc_remove_pack(int type);
+
+extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+
+extern struct llc_sap *llc_sap_open(unsigned char lsap,
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt));
+extern void llc_sap_close(struct llc_sap *sap);
+
+extern struct llc_sap *llc_sap_find(unsigned char sap_value);
+
+extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
+
+extern int llc_station_init(void);
+extern void llc_station_exit(void);
+
+#ifdef CONFIG_PROC_FS
+extern int llc_proc_init(void);
+extern void llc_proc_exit(void);
+#else
+#define llc_proc_init() (0)
+#define llc_proc_exit() do { } while(0)
+#endif /* CONFIG_PROC_FS */
+#endif /* LLC_H */
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
new file mode 100644
index 000000000000..df83f69d2de4
--- /dev/null
+++ b/include/net/llc_c_ac.h
@@ -0,0 +1,202 @@
+#ifndef LLC_C_AC_H
+#define LLC_C_AC_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+/* Connection component state transition actions */
+/*
+ * Connection state transition actions
+ * (Fb = F bit; Pb = P bit; Xb = X bit)
+ */
+#define LLC_CONN_AC_CLR_REMOTE_BUSY 1
+#define LLC_CONN_AC_CONN_IND 2
+#define LLC_CONN_AC_CONN_CONFIRM 3
+#define LLC_CONN_AC_DATA_IND 4
+#define LLC_CONN_AC_DISC_IND 5
+#define LLC_CONN_AC_RESET_IND 6
+#define LLC_CONN_AC_RESET_CONFIRM 7
+#define LLC_CONN_AC_REPORT_STATUS 8
+#define LLC_CONN_AC_CLR_REMOTE_BUSY_IF_Fb_EQ_1 9
+#define LLC_CONN_AC_STOP_REJ_TMR_IF_DATA_FLAG_EQ_2 10
+#define LLC_CONN_AC_SEND_DISC_CMD_Pb_SET_X 11
+#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_Pb 12
+#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_1 13
+#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_F_FLAG 14
+#define LLC_CONN_AC_SEND_FRMR_RSP_Fb_SET_X 15
+#define LLC_CONN_AC_RESEND_FRMR_RSP_Fb_SET_0 16
+#define LLC_CONN_AC_RESEND_FRMR_RSP_Fb_SET_Pb 17
+#define LLC_CONN_AC_SEND_I_CMD_Pb_SET_1 18
+#define LLC_CONN_AC_RESEND_I_CMD_Pb_SET_1 19
+#define LLC_CONN_AC_RESEND_I_CMD_Pb_SET_1_OR_SEND_RR 20
+#define LLC_CONN_AC_SEND_I_XXX_Xb_SET_0 21
+#define LLC_CONN_AC_RESEND_I_XXX_Xb_SET_0 22
+#define LLC_CONN_AC_RESEND_I_XXX_Xb_SET_0_OR_SEND_RR 23
+#define LLC_CONN_AC_RESEND_I_RSP_Fb_SET_1 24
+#define LLC_CONN_AC_SEND_REJ_CMD_Pb_SET_1 25
+#define LLC_CONN_AC_SEND_REJ_RSP_Fb_SET_1 26
+#define LLC_CONN_AC_SEND_REJ_XXX_Xb_SET_0 27
+#define LLC_CONN_AC_SEND_RNR_CMD_Pb_SET_1 28
+#define LLC_CONN_AC_SEND_RNR_RSP_Fb_SET_1 29
+#define LLC_CONN_AC_SEND_RNR_XXX_Xb_SET_0 30
+#define LLC_CONN_AC_SET_REMOTE_BUSY 31
+#define LLC_CONN_AC_OPTIONAL_SEND_RNR_XXX_Xb_SET_0 32
+#define LLC_CONN_AC_SEND_RR_CMD_Pb_SET_1 33
+#define LLC_CONN_AC_SEND_ACK_CMD_Pb_SET_1 34
+#define LLC_CONN_AC_SEND_RR_RSP_Fb_SET_1 35
+#define LLC_CONN_AC_SEND_ACK_RSP_Fb_SET_1 36
+#define LLC_CONN_AC_SEND_RR_XXX_Xb_SET_0 37
+#define LLC_CONN_AC_SEND_ACK_XXX_Xb_SET_0 38
+#define LLC_CONN_AC_SEND_SABME_CMD_Pb_SET_X 39
+#define LLC_CONN_AC_SEND_UA_RSP_Fb_SET_Pb 40
+#define LLC_CONN_AC_SEND_UA_RSP_Fb_SET_F_FLAG 41
+#define LLC_CONN_AC_S_FLAG_SET_0 42
+#define LLC_CONN_AC_S_FLAG_SET_1 43
+#define LLC_CONN_AC_START_P_TMR 44
+#define LLC_CONN_AC_START_ACK_TMR 45
+#define LLC_CONN_AC_START_REJ_TMR 46
+#define LLC_CONN_AC_START_ACK_TMR_IF_NOT_RUNNING 47
+#define LLC_CONN_AC_STOP_ACK_TMR 48
+#define LLC_CONN_AC_STOP_P_TMR 49
+#define LLC_CONN_AC_STOP_REJ_TMR 50
+#define LLC_CONN_AC_STOP_ALL_TMRS 51
+#define LLC_CONN_AC_STOP_OTHER_TMRS 52
+#define LLC_CONN_AC_UPDATE_Nr_RECEIVED 53
+#define LLC_CONN_AC_UPDATE_P_FLAG 54
+#define LLC_CONN_AC_DATA_FLAG_SET_2 55
+#define LLC_CONN_AC_DATA_FLAG_SET_0 56
+#define LLC_CONN_AC_DATA_FLAG_SET_1 57
+#define LLC_CONN_AC_DATA_FLAG_SET_1_IF_DATA_FLAG_EQ_0 58
+#define LLC_CONN_AC_P_FLAG_SET_0 59
+#define LLC_CONN_AC_P_FLAG_SET_P 60
+#define LLC_CONN_AC_REMOTE_BUSY_SET_0 61
+#define LLC_CONN_AC_RETRY_CNT_SET_0 62
+#define LLC_CONN_AC_RETRY_CNT_INC_BY_1 63
+#define LLC_CONN_AC_Vr_SET_0 64
+#define LLC_CONN_AC_Vr_INC_BY_1 65
+#define LLC_CONN_AC_Vs_SET_0 66
+#define LLC_CONN_AC_Vs_SET_Nr 67
+#define LLC_CONN_AC_F_FLAG_SET_P 68
+#define LLC_CONN_AC_STOP_SENDACK_TMR 70
+#define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
+
+typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+
+extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk,
+ struct sk_buff *skb);
+extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb);
+extern u8 llc_circular_between(u8 a, u8 b, u8 c);
+extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb);
+extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb);
+
+extern void llc_conn_busy_tmr_cb(unsigned long timeout_data);
+extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
+extern void llc_conn_ack_tmr_cb(unsigned long timeout_data);
+extern void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+
+extern void llc_conn_set_p_flag(struct sock *sk, u8 value);
+#endif /* LLC_C_AC_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
new file mode 100644
index 000000000000..23a409381fa9
--- /dev/null
+++ b/include/net/llc_c_ev.h
@@ -0,0 +1,269 @@
+#ifndef LLC_C_EV_H
+#define LLC_C_EV_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <net/sock.h>
+
+/* Connection component state transition event qualifiers */
+/* Types of events (possible values in 'ev->type') */
+#define LLC_CONN_EV_TYPE_SIMPLE 1
+#define LLC_CONN_EV_TYPE_CONDITION 2
+#define LLC_CONN_EV_TYPE_PRIM 3
+#define LLC_CONN_EV_TYPE_PDU 4 /* command/response PDU */
+#define LLC_CONN_EV_TYPE_ACK_TMR 5
+#define LLC_CONN_EV_TYPE_P_TMR 6
+#define LLC_CONN_EV_TYPE_REJ_TMR 7
+#define LLC_CONN_EV_TYPE_BUSY_TMR 8
+#define LLC_CONN_EV_TYPE_RPT_STATUS 9
+#define LLC_CONN_EV_TYPE_SENDACK_TMR 10
+
+#define NBR_CONN_EV 5
+/* Connection events which cause state transitions when fully qualified */
+
+#define LLC_CONN_EV_CONN_REQ 1
+#define LLC_CONN_EV_CONN_RESP 2
+#define LLC_CONN_EV_DATA_REQ 3
+#define LLC_CONN_EV_DISC_REQ 4
+#define LLC_CONN_EV_RESET_REQ 5
+#define LLC_CONN_EV_RESET_RESP 6
+#define LLC_CONN_EV_LOCAL_BUSY_DETECTED 7
+#define LLC_CONN_EV_LOCAL_BUSY_CLEARED 8
+#define LLC_CONN_EV_RX_BAD_PDU 9
+#define LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X 10
+#define LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X 11
+#define LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X 12
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X 13
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_UNEXPD_Ns 14
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns 15
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X 16
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns 17
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns 18
+#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_X 19
+#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X 20
+#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_X 21
+#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_X 22
+#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_X 23
+#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_X 24
+#define LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X 25
+#define LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X 26
+#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X 27
+#define LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X 28
+#define LLC_CONN_EV_RX_XXX_YYY 29
+#define LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr 30
+#define LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr 31
+#define LLC_CONN_EV_P_TMR_EXP 32
+#define LLC_CONN_EV_ACK_TMR_EXP 33
+#define LLC_CONN_EV_REJ_TMR_EXP 34
+#define LLC_CONN_EV_BUSY_TMR_EXP 35
+#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 36
+#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_0 37
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns 38
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns 39
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns 40
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns 41
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 42
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 43
+#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 44
+#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 45
+#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 46
+#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 47
+#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 48
+#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 49
+#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 50
+#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 51
+#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 52
+#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 53
+#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 54
+#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 55
+#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 56
+#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 57
+#define LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 58
+#define LLC_CONN_EV_TX_BUFF_FULL 59
+
+#define LLC_CONN_EV_INIT_P_F_CYCLE 100
+/*
+ * Connection event qualifiers; for some events a certain combination of
+ * these qualifiers must be TRUE before event recognized valid for state;
+ * these constants act as indexes into the Event Qualifier function
+ * table
+ */
+#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_1 1
+#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_0 2
+#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_2 3
+#define LLC_CONN_EV_QFY_P_FLAG_EQ_1 4
+#define LLC_CONN_EV_QFY_P_FLAG_EQ_0 5
+#define LLC_CONN_EV_QFY_P_FLAG_EQ_Fbit 6
+#define LLC_CONN_EV_QFY_REMOTE_BUSY_EQ_0 7
+#define LLC_CONN_EV_QFY_RETRY_CNT_LT_N2 8
+#define LLC_CONN_EV_QFY_RETRY_CNT_GTE_N2 9
+#define LLC_CONN_EV_QFY_S_FLAG_EQ_1 10
+#define LLC_CONN_EV_QFY_S_FLAG_EQ_0 11
+#define LLC_CONN_EV_QFY_INIT_P_F_CYCLE 12
+
+struct llc_conn_state_ev {
+ u8 type;
+ u8 prim;
+ u8 prim_type;
+ u8 reason;
+ u8 status;
+ u8 ind_prim;
+ u8 cfm_prim;
+};
+
+static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
+{
+ return (struct llc_conn_state_ev *)skb->cb;
+}
+
+typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+
+extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_local_busy_detected(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+/* NOT_USED functions and their variations */
+extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+
+/* Available connection action qualifiers */
+extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk,
+ struct sk_buff *skb);
+extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
+ struct sk_buff *skb);
+
+static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
+{
+ return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
+ (unsigned)sk->sk_rcvbuf;
+}
+#endif /* LLC_C_EV_H */
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
new file mode 100644
index 000000000000..0e79cfba4b3b
--- /dev/null
+++ b/include/net/llc_c_st.h
@@ -0,0 +1,48 @@
+#ifndef LLC_C_ST_H
+#define LLC_C_ST_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+/* Connection component state management */
+/* connection states */
+#define LLC_CONN_OUT_OF_SVC 0 /* prior to allocation */
+
+#define LLC_CONN_STATE_ADM 1 /* disc, initial state */
+#define LLC_CONN_STATE_SETUP 2 /* disconnected state */
+#define LLC_CONN_STATE_NORMAL 3 /* connected state */
+#define LLC_CONN_STATE_BUSY 4 /* connected state */
+#define LLC_CONN_STATE_REJ 5 /* connected state */
+#define LLC_CONN_STATE_AWAIT 6 /* connected state */
+#define LLC_CONN_STATE_AWAIT_BUSY 7 /* connected state */
+#define LLC_CONN_STATE_AWAIT_REJ 8 /* connected state */
+#define LLC_CONN_STATE_D_CONN 9 /* disconnected state */
+#define LLC_CONN_STATE_RESET 10 /* disconnected state */
+#define LLC_CONN_STATE_ERROR 11 /* disconnected state */
+#define LLC_CONN_STATE_TEMP 12 /* disconnected state */
+
+#define NBR_CONN_STATES 12 /* size of state table */
+#define NO_STATE_CHANGE 100
+
+/* Connection state table structure */
+struct llc_conn_state_trans {
+ llc_conn_ev_t ev;
+ u8 next_state;
+ llc_conn_ev_qfyr_t *ev_qualifiers;
+ llc_conn_action_t *ev_actions;
+};
+
+struct llc_conn_state {
+ u8 current_state;
+ struct llc_conn_state_trans **transitions;
+};
+
+extern struct llc_conn_state llc_conn_state_table[];
+#endif /* LLC_C_ST_H */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
new file mode 100644
index 000000000000..8ad3bc2c23d7
--- /dev/null
+++ b/include/net/llc_conn.h
@@ -0,0 +1,119 @@
+#ifndef LLC_CONN_H
+#define LLC_CONN_H
+/*
+ * Copyright (c) 1997 by Procom Technology, Inc.
+ * 2001, 2002 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+#include <linux/timer.h>
+#include <net/llc_if.h>
+#include <net/sock.h>
+#include <linux/llc.h>
+
+#define LLC_EVENT 1
+#define LLC_PACKET 2
+
+#define LLC_P_TIME 2
+#define LLC_ACK_TIME 1
+#define LLC_REJ_TIME 3
+#define LLC_BUSY_TIME 3
+
+struct llc_timer {
+ struct timer_list timer;
+ u16 expire; /* timer expire time */
+};
+
+struct llc_sock {
+ /* struct sock must be the first member of llc_sock */
+ struct sock sk;
+ struct sockaddr_llc addr; /* address sock is bound to */
+ u8 state; /* state of connection */
+ struct llc_sap *sap; /* pointer to parent SAP */
+ struct llc_addr laddr; /* lsap/mac pair */
+ struct llc_addr daddr; /* dsap/mac pair */
+ struct net_device *dev; /* device to send to remote */
+ u8 retry_count; /* number of retries */
+ u8 ack_must_be_send;
+ u8 first_pdu_Ns;
+ u8 npta;
+ struct llc_timer ack_timer;
+ struct llc_timer pf_cycle_timer;
+ struct llc_timer rej_sent_timer;
+ struct llc_timer busy_state_timer; /* ind busy clr at remote LLC */
+ u8 vS; /* seq# next in-seq I-PDU tx'd*/
+ u8 vR; /* seq# next in-seq I-PDU rx'd*/
+ u32 n2; /* max nbr re-tx's for timeout*/
+ u32 n1; /* max nbr octets in I PDU */
+ u8 k; /* tx window size; max = 127 */
+ u8 rw; /* rx window size; max = 127 */
+ u8 p_flag; /* state flags */
+ u8 f_flag;
+ u8 s_flag;
+ u8 data_flag;
+ u8 remote_busy_flag;
+ u8 cause_flag;
+ struct sk_buff_head pdu_unack_q; /* PUDs sent/waiting ack */
+ u16 link; /* network layer link number */
+ u8 X; /* a temporary variable */
+ u8 ack_pf; /* this flag indicates what is
+ the P-bit of acknowledge */
+ u8 failed_data_req; /* recognize that already exist a
+ failed llc_data_req_handler
+ (tx_buffer_full or unacceptable
+ state */
+ u8 dec_step;
+ u8 inc_cntr;
+ u8 dec_cntr;
+ u8 connect_step;
+ u8 last_nr; /* NR of last pdu received */
+ u32 rx_pdu_hdr; /* used for saving header of last pdu
+ received and caused sending FRMR.
+ Used for resending FRMR */
+};
+
+static inline struct llc_sock *llc_sk(const struct sock *sk)
+{
+ return (struct llc_sock *)sk;
+}
+
+static __inline__ void llc_set_backlog_type(struct sk_buff *skb, char type)
+{
+ skb->cb[sizeof(skb->cb) - 1] = type;
+}
+
+static __inline__ char llc_backlog_type(struct sk_buff *skb)
+{
+ return skb->cb[sizeof(skb->cb) - 1];
+}
+
+extern struct sock *llc_sk_alloc(int family, int priority, struct proto *prot);
+extern void llc_sk_free(struct sock *sk);
+
+extern void llc_sk_reset(struct sock *sk);
+
+/* Access to a connection */
+extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr,
+ u8 first_p_bit);
+extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr,
+ u8 first_f_bit);
+extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr,
+ u16 *how_many_unacked);
+extern struct sock *llc_lookup_established(struct llc_sap *sap,
+ struct llc_addr *daddr,
+ struct llc_addr *laddr);
+extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+
+extern u8 llc_data_accept_state(u8 state);
+extern void llc_build_offset_table(void);
+extern int llc_release_sockets(struct llc_sap *sap);
+#endif /* LLC_CONN_H */
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
new file mode 100644
index 000000000000..090eaa0d71f9
--- /dev/null
+++ b/include/net/llc_if.h
@@ -0,0 +1,101 @@
+#ifndef LLC_IF_H
+#define LLC_IF_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+/* Defines LLC interface to network layer */
+/* Available primitives */
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/llc.h>
+#include <net/llc.h>
+
+#define LLC_DATAUNIT_PRIM 1
+#define LLC_CONN_PRIM 2
+#define LLC_DATA_PRIM 3
+#define LLC_DISC_PRIM 4
+#define LLC_RESET_PRIM 5
+#define LLC_FLOWCONTROL_PRIM 6 /* Not supported at this time */
+#define LLC_DISABLE_PRIM 7
+#define LLC_XID_PRIM 8
+#define LLC_TEST_PRIM 9
+#define LLC_SAP_ACTIVATION 10
+#define LLC_SAP_DEACTIVATION 11
+
+#define LLC_NBR_PRIMITIVES 11
+
+#define LLC_IND 1
+#define LLC_CONFIRM 2
+
+/* Primitive type */
+#define LLC_PRIM_TYPE_REQ 1
+#define LLC_PRIM_TYPE_IND 2
+#define LLC_PRIM_TYPE_RESP 3
+#define LLC_PRIM_TYPE_CONFIRM 4
+
+/* Reset reasons, remote entity or local LLC */
+#define LLC_RESET_REASON_REMOTE 1
+#define LLC_RESET_REASON_LOCAL 2
+
+/* Disconnect reasons */
+#define LLC_DISC_REASON_RX_DM_RSP_PDU 0
+#define LLC_DISC_REASON_RX_DISC_CMD_PDU 1
+#define LLC_DISC_REASON_ACK_TMR_EXP 2
+
+/* Confirm reasons */
+#define LLC_STATUS_CONN 0 /* connect confirm & reset confirm */
+#define LLC_STATUS_DISC 1 /* connect confirm & reset confirm */
+#define LLC_STATUS_FAILED 2 /* connect confirm & reset confirm */
+#define LLC_STATUS_IMPOSSIBLE 3 /* connect confirm */
+#define LLC_STATUS_RECEIVED 4 /* data conn */
+#define LLC_STATUS_REMOTE_BUSY 5 /* data conn */
+#define LLC_STATUS_REFUSE 6 /* data conn */
+#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
+#define LLC_STATUS_RESET_DONE 8 /* */
+
+extern u8 llc_mac_null_var[IFHWADDRLEN];
+
+/**
+ * llc_mac_null - determines if a address is a null mac address
+ * @mac: Mac address to test if null.
+ *
+ * Determines if a given address is a null mac address. Returns 0 if the
+ * address is not a null mac, 1 if the address is a null mac.
+ */
+static __inline__ int llc_mac_null(u8 *mac)
+{
+ return !memcmp(mac, llc_mac_null_var, IFHWADDRLEN);
+}
+
+static __inline__ int llc_addrany(struct llc_addr *addr)
+{
+ return llc_mac_null(addr->mac) && !addr->lsap;
+}
+
+/**
+ * llc_mac_match - determines if two mac addresses are the same
+ * @mac1: First mac address to compare.
+ * @mac2: Second mac address to compare.
+ *
+ * Determines if two given mac address are the same. Returns 0 if there
+ * is not a complete match up to len, 1 if a complete match up to len is
+ * found.
+ */
+static __inline__ int llc_mac_match(u8 *mac1, u8 *mac2)
+{
+ return !memcmp(mac1, mac2, IFHWADDRLEN);
+}
+
+extern int llc_establish_connection(struct sock *sk, u8 *lmac,
+ u8 *dmac, u8 dsap);
+extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+extern int llc_send_disc(struct sock *sk);
+#endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
new file mode 100644
index 000000000000..f45c37d89cf7
--- /dev/null
+++ b/include/net/llc_pdu.h
@@ -0,0 +1,436 @@
+#ifndef LLC_PDU_H
+#define LLC_PDU_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_tr.h>
+
+/* Lengths of frame formats */
+#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
+#define LLC_PDU_LEN_S 4
+#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+/* Known SAP addresses */
+#define LLC_GLOBAL_SAP 0xFF
+#define LLC_NULL_SAP 0x00 /* not network-layer visible */
+#define LLC_MGMT_INDIV 0x02 /* station LLC mgmt indiv addr */
+#define LLC_MGMT_GRP 0x03 /* station LLC mgmt group addr */
+#define LLC_RDE_SAP 0xA6 /* route ... */
+
+/* SAP field bit masks */
+#define LLC_ISO_RESERVED_SAP 0x02
+#define LLC_SAP_GROUP_DSAP 0x01
+#define LLC_SAP_RESP_SSAP 0x01
+
+/* Group/individual DSAP indicator is DSAP field */
+#define LLC_PDU_GROUP_DSAP_MASK 0x01
+#define LLC_PDU_IS_GROUP_DSAP(pdu) \
+ ((pdu->dsap & LLC_PDU_GROUP_DSAP_MASK) ? 0 : 1)
+#define LLC_PDU_IS_INDIV_DSAP(pdu) \
+ (!(pdu->dsap & LLC_PDU_GROUP_DSAP_MASK) ? 0 : 1)
+
+/* Command/response PDU indicator in SSAP field */
+#define LLC_PDU_CMD_RSP_MASK 0x01
+#define LLC_PDU_CMD 0
+#define LLC_PDU_RSP 1
+#define LLC_PDU_IS_CMD(pdu) ((pdu->ssap & LLC_PDU_RSP) ? 0 : 1)
+#define LLC_PDU_IS_RSP(pdu) ((pdu->ssap & LLC_PDU_RSP) ? 1 : 0)
+
+/* Get PDU type from 2 lowest-order bits of control field first byte */
+#define LLC_PDU_TYPE_I_MASK 0x01 /* 16-bit control field */
+#define LLC_PDU_TYPE_S_MASK 0x03
+#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
+#define LLC_PDU_TYPE_MASK 0x03
+
+#define LLC_PDU_TYPE_I 0 /* first bit */
+#define LLC_PDU_TYPE_S 1 /* first two bits */
+#define LLC_PDU_TYPE_U 3 /* first two bits */
+
+#define LLC_PDU_TYPE_IS_I(pdu) \
+ ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
+
+#define LLC_PDU_TYPE_IS_U(pdu) \
+ (((pdu->ctrl_1 & LLC_PDU_TYPE_U_MASK) == LLC_PDU_TYPE_U) ? 1 : 0)
+
+#define LLC_PDU_TYPE_IS_S(pdu) \
+ (((pdu->ctrl_1 & LLC_PDU_TYPE_S_MASK) == LLC_PDU_TYPE_S) ? 1 : 0)
+
+/* U-format PDU control field masks */
+#define LLC_U_PF_BIT_MASK 0x10 /* P/F bit mask */
+#define LLC_U_PF_IS_1(pdu) ((pdu->ctrl_1 & LLC_U_PF_BIT_MASK) ? 1 : 0)
+#define LLC_U_PF_IS_0(pdu) ((!(pdu->ctrl_1 & LLC_U_PF_BIT_MASK)) ? 1 : 0)
+
+#define LLC_U_PDU_CMD_MASK 0xEC /* cmd/rsp mask */
+#define LLC_U_PDU_CMD(pdu) (pdu->ctrl_1 & LLC_U_PDU_CMD_MASK)
+#define LLC_U_PDU_RSP(pdu) (pdu->ctrl_1 & LLC_U_PDU_CMD_MASK)
+
+#define LLC_1_PDU_CMD_UI 0x00 /* Type 1 cmds/rsps */
+#define LLC_1_PDU_CMD_XID 0xAC
+#define LLC_1_PDU_CMD_TEST 0xE0
+
+#define LLC_2_PDU_CMD_SABME 0x6C /* Type 2 cmds/rsps */
+#define LLC_2_PDU_CMD_DISC 0x40
+#define LLC_2_PDU_RSP_UA 0x60
+#define LLC_2_PDU_RSP_DM 0x0C
+#define LLC_2_PDU_RSP_FRMR 0x84
+
+/* Type 1 operations */
+
+/* XID information field bit masks */
+
+/* LLC format identifier (byte 1) */
+#define LLC_XID_FMT_ID 0x81 /* first byte must be this */
+
+/* LLC types/classes identifier (byte 2) */
+#define LLC_XID_CLASS_ZEROS_MASK 0xE0 /* these must be zeros */
+#define LLC_XID_CLASS_MASK 0x1F /* AND with byte to get below */
+
+#define LLC_XID_NULL_CLASS_1 0x01 /* if NULL LSAP...use these */
+#define LLC_XID_NULL_CLASS_2 0x03
+#define LLC_XID_NULL_CLASS_3 0x05
+#define LLC_XID_NULL_CLASS_4 0x07
+
+#define LLC_XID_NNULL_TYPE_1 0x01 /* if non-NULL LSAP...use these */
+#define LLC_XID_NNULL_TYPE_2 0x02
+#define LLC_XID_NNULL_TYPE_3 0x04
+#define LLC_XID_NNULL_TYPE_1_2 0x03
+#define LLC_XID_NNULL_TYPE_1_3 0x05
+#define LLC_XID_NNULL_TYPE_2_3 0x06
+#define LLC_XID_NNULL_ALL 0x07
+
+/* Sender Receive Window (byte 3) */
+#define LLC_XID_RW_MASK 0xFE /* AND with value to get below */
+
+#define LLC_XID_MIN_RW 0x02 /* lowest-order bit always zero */
+
+/* Type 2 operations */
+
+#define LLC_2_SEQ_NBR_MODULO ((u8) 128)
+
+/* I-PDU masks ('ctrl' is I-PDU control word) */
+#define LLC_I_GET_NS(pdu) (u8)((pdu->ctrl_1 & 0xFE) >> 1)
+#define LLC_I_GET_NR(pdu) (u8)((pdu->ctrl_2 & 0xFE) >> 1)
+
+#define LLC_I_PF_BIT_MASK 0x01
+
+#define LLC_I_PF_IS_0(pdu) ((!(pdu->ctrl_2 & LLC_I_PF_BIT_MASK)) ? 1 : 0)
+#define LLC_I_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_I_PF_BIT_MASK) ? 1 : 0)
+
+/* S-PDU supervisory commands and responses */
+
+#define LLC_S_PDU_CMD_MASK 0x0C
+#define LLC_S_PDU_CMD(pdu) (pdu->ctrl_1 & LLC_S_PDU_CMD_MASK)
+#define LLC_S_PDU_RSP(pdu) (pdu->ctrl_1 & LLC_S_PDU_CMD_MASK)
+
+#define LLC_2_PDU_CMD_RR 0x00 /* rx ready cmd */
+#define LLC_2_PDU_RSP_RR 0x00 /* rx ready rsp */
+#define LLC_2_PDU_CMD_REJ 0x08 /* reject PDU cmd */
+#define LLC_2_PDU_RSP_REJ 0x08 /* reject PDU rsp */
+#define LLC_2_PDU_CMD_RNR 0x04 /* rx not ready cmd */
+#define LLC_2_PDU_RSP_RNR 0x04 /* rx not ready rsp */
+
+#define LLC_S_PF_BIT_MASK 0x01
+#define LLC_S_PF_IS_0(pdu) ((!(pdu->ctrl_2 & LLC_S_PF_BIT_MASK)) ? 1 : 0)
+#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
+
+#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
+#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO)
+
+/* FRMR information field macros */
+
+#define FRMR_INFO_LENGTH 5 /* 5 bytes of information */
+
+/*
+ * info is pointer to FRMR info field structure; 'rej_ctrl' is byte pointer
+ * (if U-PDU) or word pointer to rejected PDU control field
+ */
+#define FRMR_INFO_SET_REJ_CNTRL(info,rej_ctrl) \
+ info->rej_pdu_ctrl = ((*((u8 *) rej_ctrl) & \
+ LLC_PDU_TYPE_U) != LLC_PDU_TYPE_U ? \
+ (u16)*((u16 *) rej_ctrl) : \
+ (((u16) *((u8 *) rej_ctrl)) & 0x00FF))
+
+/*
+ * Info is pointer to FRMR info field structure; 'vs' is a byte containing
+ * send state variable value in low-order 7 bits (insure the lowest-order
+ * bit remains zero (0))
+ */
+#define FRMR_INFO_SET_Vs(info,vs) (info->curr_ssv = (((u8) vs) << 1))
+#define FRMR_INFO_SET_Vr(info,vr) (info->curr_rsv = (((u8) vr) << 1))
+
+/*
+ * Info is pointer to FRMR info field structure; 'cr' is a byte containing
+ * the C/R bit value in the low-order bit
+ */
+#define FRMR_INFO_SET_C_R_BIT(info, cr) (info->curr_rsv |= (((u8) cr) & 0x01))
+
+/*
+ * In the remaining five macros, 'info' is pointer to FRMR info field
+ * structure; 'ind' is a byte containing the bit value to set in the
+ * lowest-order bit)
+ */
+#define FRMR_INFO_SET_INVALID_PDU_CTRL_IND(info, ind) \
+ (info->ind_bits = ((info->ind_bits & 0xFE) | (((u8) ind) & 0x01)))
+
+#define FRMR_INFO_SET_INVALID_PDU_INFO_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xFD) | (((u8) ind) & 0x02)))
+
+#define FRMR_INFO_SET_PDU_INFO_2LONG_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xFB) | (((u8) ind) & 0x04)))
+
+#define FRMR_INFO_SET_PDU_INVALID_Nr_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xF7) | (((u8) ind) & 0x08)))
+
+#define FRMR_INFO_SET_PDU_INVALID_Ns_IND(info, ind) \
+ (info->ind_bits = ( (info->ind_bits & 0xEF) | (((u8) ind) & 0x10)))
+
+/* Sequence-numbered PDU format (4 bytes in length) */
+struct llc_pdu_sn {
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl_1;
+ u8 ctrl_2;
+};
+
+static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
+{
+ return (struct llc_pdu_sn *)skb->nh.raw;
+}
+
+/* Un-numbered PDU format (3 bytes in length) */
+struct llc_pdu_un {
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl_1;
+};
+
+static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
+{
+ return (struct llc_pdu_un *)skb->nh.raw;
+}
+
+static inline void *llc_set_pdu_hdr(struct sk_buff *skb, void *ptr)
+{
+ return skb->nh.raw = ptr;
+}
+
+/**
+ * llc_pdu_header_init - initializes pdu header
+ * @skb: input skb that header must be set into it.
+ * @type: type of PDU (U, I or S).
+ * @ssap: source sap.
+ * @dsap: destination sap.
+ * @cr: command/response bit (0 or 1).
+ *
+ * This function sets DSAP, SSAP and command/Response bit in LLC header.
+ */
+static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
+ u8 ssap, u8 dsap, u8 cr)
+{
+ const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+ struct llc_pdu_un *pdu = llc_set_pdu_hdr(skb, skb_push(skb, hlen));
+ pdu->dsap = dsap;
+ pdu->ssap = ssap;
+ pdu->ssap |= cr;
+}
+
+/**
+ * llc_pdu_decode_sa - extracs source address (MAC) of input frame
+ * @skb: input skb that source address must be extracted from it.
+ * @sa: pointer to source address (6 byte array).
+ *
+ * This function extracts source address(MAC) of input frame.
+ */
+static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+{
+ if (skb->protocol == ntohs(ETH_P_802_2))
+ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ else if (skb->protocol == ntohs(ETH_P_TR_802_2))
+ memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN);
+}
+
+/**
+ * llc_pdu_decode_da - extracts dest address of input frame
+ * @skb: input skb that destination address must be extracted from it
+ * @sa: pointer to destination address (6 byte array).
+ *
+ * This function extracts destination address(MAC) of input frame.
+ */
+static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
+{
+ if (skb->protocol == ntohs(ETH_P_802_2))
+ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ else if (skb->protocol == ntohs(ETH_P_TR_802_2))
+ memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN);
+}
+
+/**
+ * llc_pdu_decode_ssap - extracts source SAP of input frame
+ * @skb: input skb that source SAP must be extracted from it.
+ * @ssap: source SAP (output argument).
+ *
+ * This function extracts source SAP of input frame. Right bit of SSAP is
+ * command/response bit.
+ */
+static inline void llc_pdu_decode_ssap(struct sk_buff *skb, u8 *ssap)
+{
+ *ssap = llc_pdu_un_hdr(skb)->ssap & 0xFE;
+}
+
+/**
+ * llc_pdu_decode_dsap - extracts dest SAP of input frame
+ * @skb: input skb that destination SAP must be extracted from it.
+ * @dsap: destination SAP (output argument).
+ *
+ * This function extracts destination SAP of input frame. right bit of
+ * DSAP designates individual/group SAP.
+ */
+static inline void llc_pdu_decode_dsap(struct sk_buff *skb, u8 *dsap)
+{
+ *dsap = llc_pdu_un_hdr(skb)->dsap & 0xFE;
+}
+
+/**
+ * llc_pdu_init_as_ui_cmd - sets LLC header as UI PDU
+ * @skb: input skb that header must be set into it.
+ *
+ * This function sets third byte of LLC header as a UI PDU.
+ */
+static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb)
+{
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_UI;
+}
+
+/**
+ * llc_pdu_init_as_test_cmd - sets PDU as TEST
+ * @skb - Address of the skb to build
+ *
+ * Sets a PDU as TEST
+ */
+static inline void llc_pdu_init_as_test_cmd(struct sk_buff *skb)
+{
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+}
+
+/**
+ * llc_pdu_init_as_test_rsp - build TEST response PDU
+ * @skb: Address of the skb to build
+ * @ev_skb: The received TEST command PDU frame
+ *
+ * Builds a pdu frame as a TEST response.
+ */
+static inline void llc_pdu_init_as_test_rsp(struct sk_buff *skb,
+ struct sk_buff *ev_skb)
+{
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+ if (ev_skb->protocol == ntohs(ETH_P_802_2)) {
+ struct llc_pdu_un *ev_pdu = llc_pdu_un_hdr(ev_skb);
+ int dsize;
+
+ dsize = ntohs(eth_hdr(ev_skb)->h_proto) - 3;
+ memcpy(((u8 *)pdu) + 3, ((u8 *)ev_pdu) + 3, dsize);
+ skb_put(skb, dsize);
+ }
+}
+
+/* LLC Type 1 XID command/response information fields format */
+struct llc_xid_info {
+ u8 fmt_id; /* always 0x18 for LLC */
+ u8 type; /* different if NULL/non-NULL LSAP */
+ u8 rw; /* sender receive window */
+};
+
+/**
+ * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
+ * @skb: input skb that header must be set into it.
+ *
+ * This function sets third,fourth,fifth and sixth bytes of LLC header as
+ * a XID PDU.
+ */
+static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
+ u8 svcs_supported, u8 rx_window)
+{
+ struct llc_xid_info *xid_info;
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_XID;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+ xid_info = (struct llc_xid_info *)(((u8 *)&pdu->ctrl_1) + 1);
+ xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
+ xid_info->type = svcs_supported;
+ xid_info->rw = rx_window << 1; /* size of receive window */
+ skb_put(skb, 3);
+}
+
+/**
+ * llc_pdu_init_as_xid_rsp - builds XID response PDU
+ * @skb: Address of the skb to build
+ * @svcs_supported: The class of the LLC (I or II)
+ * @rx_window: The size of the receive window of the LLC
+ *
+ * Builds a pdu frame as an XID response.
+ */
+static inline void llc_pdu_init_as_xid_rsp(struct sk_buff *skb,
+ u8 svcs_supported, u8 rx_window)
+{
+ struct llc_xid_info *xid_info;
+ struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+
+ pdu->ctrl_1 = LLC_PDU_TYPE_U;
+ pdu->ctrl_1 |= LLC_1_PDU_CMD_XID;
+ pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
+
+ xid_info = (struct llc_xid_info *)(((u8 *)&pdu->ctrl_1) + 1);
+ xid_info->fmt_id = LLC_XID_FMT_ID;
+ xid_info->type = svcs_supported;
+ xid_info->rw = rx_window << 1;
+ skb_put(skb, 3);
+}
+
+/* LLC Type 2 FRMR response information field format */
+struct llc_frmr_info {
+ u16 rej_pdu_ctrl; /* bits 1-8 if U-PDU */
+ u8 curr_ssv; /* current send state variable val */
+ u8 curr_rsv; /* current receive state variable */
+ u8 ind_bits; /* indicator bits set with macro */
+};
+
+extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb,
+ struct llc_pdu_sn *prev_pdu,
+ u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+#endif /* LLC_PDU_H */
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
new file mode 100644
index 000000000000..37a3bbd02394
--- /dev/null
+++ b/include/net/llc_s_ac.h
@@ -0,0 +1,39 @@
+#ifndef LLC_S_AC_H
+#define LLC_S_AC_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+/* SAP component actions */
+#define SAP_ACT_UNITDATA_IND 1
+#define SAP_ACT_SEND_UI 2
+#define SAP_ACT_SEND_XID_C 3
+#define SAP_ACT_SEND_XID_R 4
+#define SAP_ACT_SEND_TEST_C 5
+#define SAP_ACT_SEND_TEST_R 6
+#define SAP_ACT_REPORT_STATUS 7
+#define SAP_ACT_XID_IND 8
+#define SAP_ACT_TEST_IND 9
+
+/* All action functions must look like this */
+typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+
+extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
+ struct sk_buff *skb);
+extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_action_report_status(struct llc_sap *sap,
+ struct sk_buff *skb);
+extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+#endif /* LLC_S_AC_H */
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
new file mode 100644
index 000000000000..e3acb9329e4a
--- /dev/null
+++ b/include/net/llc_s_ev.h
@@ -0,0 +1,67 @@
+#ifndef LLC_S_EV_H
+#define LLC_S_EV_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#include <linux/skbuff.h>
+
+/* Defines SAP component events */
+/* Types of events (possible values in 'ev->type') */
+#define LLC_SAP_EV_TYPE_SIMPLE 1
+#define LLC_SAP_EV_TYPE_CONDITION 2
+#define LLC_SAP_EV_TYPE_PRIM 3
+#define LLC_SAP_EV_TYPE_PDU 4 /* command/response PDU */
+#define LLC_SAP_EV_TYPE_ACK_TMR 5
+#define LLC_SAP_EV_TYPE_RPT_STATUS 6
+
+#define LLC_SAP_EV_ACTIVATION_REQ 1
+#define LLC_SAP_EV_RX_UI 2
+#define LLC_SAP_EV_UNITDATA_REQ 3
+#define LLC_SAP_EV_XID_REQ 4
+#define LLC_SAP_EV_RX_XID_C 5
+#define LLC_SAP_EV_RX_XID_R 6
+#define LLC_SAP_EV_TEST_REQ 7
+#define LLC_SAP_EV_RX_TEST_C 8
+#define LLC_SAP_EV_RX_TEST_R 9
+#define LLC_SAP_EV_DEACTIVATION_REQ 10
+
+struct llc_sap_state_ev {
+ u8 prim;
+ u8 prim_type;
+ u8 type;
+ u8 reason;
+ u8 ind_cfm_flag;
+ struct llc_addr saddr;
+ struct llc_addr daddr;
+};
+
+static __inline__ struct llc_sap_state_ev *llc_sap_ev(struct sk_buff *skb)
+{
+ return (struct llc_sap_state_ev *)skb->cb;
+}
+
+struct llc_sap;
+
+typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
+
+extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+extern int llc_sap_ev_deactivation_req(struct llc_sap *sap,
+ struct sk_buff *skb);
+#endif /* LLC_S_EV_H */
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
new file mode 100644
index 000000000000..567c681f1f3e
--- /dev/null
+++ b/include/net/llc_s_st.h
@@ -0,0 +1,32 @@
+#ifndef LLC_S_ST_H
+#define LLC_S_ST_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+
+#define LLC_NR_SAP_STATES 2 /* size of state table */
+
+/* structures and types */
+/* SAP state table structure */
+struct llc_sap_state_trans {
+ llc_sap_ev_t ev;
+ u8 next_state;
+ llc_sap_action_t *ev_actions;
+};
+
+struct llc_sap_state {
+ u8 curr_state;
+ struct llc_sap_state_trans **transitions;
+};
+
+/* only access to SAP state table */
+extern struct llc_sap_state llc_sap_state_table[LLC_NR_SAP_STATES];
+#endif /* LLC_S_ST_H */
diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h
new file mode 100644
index 000000000000..353baaa627f3
--- /dev/null
+++ b/include/net/llc_sap.h
@@ -0,0 +1,29 @@
+#ifndef LLC_SAP_H
+#define LLC_SAP_H
+/*
+ * Copyright (c) 1997 by Procom Technology,Inc.
+ * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+struct llc_sap;
+struct sk_buff;
+
+extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+extern void llc_save_primitive(struct sk_buff* skb, unsigned char prim);
+extern struct sk_buff *llc_alloc_frame(void);
+
+extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
+ struct sk_buff *skb,
+ unsigned char *dmac,
+ unsigned char dsap);
+extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
+ struct sk_buff *skb,
+ unsigned char *dmac,
+ unsigned char dsap);
+#endif /* LLC_SAP_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
new file mode 100644
index 000000000000..f85d6e4b7442
--- /dev/null
+++ b/include/net/ndisc.h
@@ -0,0 +1,135 @@
+#ifndef _NDISC_H
+#define _NDISC_H
+
+/*
+ * ICMP codes for neighbour discovery messages
+ */
+
+#define NDISC_ROUTER_SOLICITATION 133
+#define NDISC_ROUTER_ADVERTISEMENT 134
+#define NDISC_NEIGHBOUR_SOLICITATION 135
+#define NDISC_NEIGHBOUR_ADVERTISEMENT 136
+#define NDISC_REDIRECT 137
+
+/*
+ * ndisc options
+ */
+
+enum {
+ __ND_OPT_PREFIX_INFO_END = 0,
+ ND_OPT_SOURCE_LL_ADDR = 1, /* RFC2461 */
+ ND_OPT_TARGET_LL_ADDR = 2, /* RFC2461 */
+ ND_OPT_PREFIX_INFO = 3, /* RFC2461 */
+ ND_OPT_REDIRECT_HDR = 4, /* RFC2461 */
+ ND_OPT_MTU = 5, /* RFC2461 */
+ __ND_OPT_MAX
+};
+
+#define MAX_RTR_SOLICITATION_DELAY HZ
+
+#define ND_REACHABLE_TIME (30*HZ)
+#define ND_RETRANS_TIMER HZ
+
+#define ND_MIN_RANDOM_FACTOR (1/2)
+#define ND_MAX_RANDOM_FACTOR (3/2)
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/icmpv6.h>
+#include <net/neighbour.h>
+#include <asm/atomic.h>
+
+extern struct neigh_table nd_tbl;
+
+struct nd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ __u8 opt[0];
+};
+
+struct rs_msg {
+ struct icmp6hdr icmph;
+ __u8 opt[0];
+};
+
+struct ra_msg {
+ struct icmp6hdr icmph;
+ __u32 reachable_time;
+ __u32 retrans_timer;
+};
+
+struct nd_opt_hdr {
+ __u8 nd_opt_type;
+ __u8 nd_opt_len;
+} __attribute__((__packed__));
+
+
+extern int ndisc_init(struct net_proto_family *ops);
+
+extern void ndisc_cleanup(void);
+
+extern int ndisc_rcv(struct sk_buff *skb);
+
+extern void ndisc_send_ns(struct net_device *dev,
+ struct neighbour *neigh,
+ struct in6_addr *solicit,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr);
+
+extern void ndisc_send_rs(struct net_device *dev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr);
+
+extern void ndisc_forwarding_on(void);
+extern void ndisc_forwarding_off(void);
+
+extern void ndisc_send_redirect(struct sk_buff *skb,
+ struct neighbour *neigh,
+ struct in6_addr *target);
+
+extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir);
+
+
+struct rt6_info * dflt_rt_lookup(void);
+
+/*
+ * IGMP
+ */
+extern int igmp6_init(struct net_proto_family *ops);
+
+extern void igmp6_cleanup(void);
+
+extern int igmp6_event_query(struct sk_buff *skb);
+
+extern int igmp6_event_report(struct sk_buff *skb);
+
+extern void igmp6_cleanup(void);
+
+#ifdef CONFIG_SYSCTL
+extern int ndisc_ifinfo_sysctl_change(ctl_table *ctl,
+ int write,
+ struct file * filp,
+ void __user *buffer,
+ size_t *lenp,
+ loff_t *ppos);
+#endif
+
+extern void inet6_ifinfo_notify(int event,
+ struct inet6_dev *idev);
+
+static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, struct in6_addr *addr)
+{
+
+ if (dev)
+ return __neigh_lookup(&nd_tbl, addr, dev, 1);
+
+ return NULL;
+}
+
+
+#endif /* __KERNEL__ */
+
+
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
new file mode 100644
index 000000000000..4f33bbc21e7f
--- /dev/null
+++ b/include/net/neighbour.h
@@ -0,0 +1,370 @@
+#ifndef _NET_NEIGHBOUR_H
+#define _NET_NEIGHBOUR_H
+
+/*
+ * Generic neighbour manipulation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes:
+ *
+ * Harald Welte: <laforge@gnumonks.org>
+ * - Add neighbour cache statistics like rtstat
+ */
+
+/* The following flags & states are exported to user space,
+ so that they should be moved to include/linux/ directory.
+ */
+
+/*
+ * Neighbor Cache Entry Flags
+ */
+
+#define NTF_PROXY 0x08 /* == ATF_PUBL */
+#define NTF_ROUTER 0x80
+
+/*
+ * Neighbor Cache Entry States.
+ */
+
+#define NUD_INCOMPLETE 0x01
+#define NUD_REACHABLE 0x02
+#define NUD_STALE 0x04
+#define NUD_DELAY 0x08
+#define NUD_PROBE 0x10
+#define NUD_FAILED 0x20
+
+/* Dummy states */
+#define NUD_NOARP 0x40
+#define NUD_PERMANENT 0x80
+#define NUD_NONE 0x00
+
+/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
+ and make no address resolution or NUD.
+ NUD_PERMANENT is also cannot be deleted by garbage collectors.
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+
+#include <linux/err.h>
+#include <linux/sysctl.h>
+
+#define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
+#define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
+#define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
+
+struct neighbour;
+
+struct neigh_parms
+{
+ struct neigh_parms *next;
+ int (*neigh_setup)(struct neighbour *);
+ struct neigh_table *tbl;
+ int entries;
+ void *priv;
+
+ void *sysctl_table;
+
+ int dead;
+ atomic_t refcnt;
+ struct rcu_head rcu_head;
+
+ int base_reachable_time;
+ int retrans_time;
+ int gc_staletime;
+ int reachable_time;
+ int delay_probe_time;
+
+ int queue_len;
+ int ucast_probes;
+ int app_probes;
+ int mcast_probes;
+ int anycast_delay;
+ int proxy_delay;
+ int proxy_qlen;
+ int locktime;
+};
+
+struct neigh_statistics
+{
+ unsigned long allocs; /* number of allocated neighs */
+ unsigned long destroys; /* number of destroyed neighs */
+ unsigned long hash_grows; /* number of hash resizes */
+
+ unsigned long res_failed; /* nomber of failed resolutions */
+
+ unsigned long lookups; /* number of lookups */
+ unsigned long hits; /* number of hits (among lookups) */
+
+ unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */
+ unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
+
+ unsigned long periodic_gc_runs; /* number of periodic GC runs */
+ unsigned long forced_gc_runs; /* number of forced GC runs */
+};
+
+#define NEIGH_CACHE_STAT_INC(tbl, field) \
+ do { \
+ preempt_disable(); \
+ (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
+ preempt_enable(); \
+ } while (0)
+
+struct neighbour
+{
+ struct neighbour *next;
+ struct neigh_table *tbl;
+ struct neigh_parms *parms;
+ struct net_device *dev;
+ unsigned long used;
+ unsigned long confirmed;
+ unsigned long updated;
+ __u8 flags;
+ __u8 nud_state;
+ __u8 type;
+ __u8 dead;
+ atomic_t probes;
+ rwlock_t lock;
+ unsigned char ha[(MAX_ADDR_LEN+sizeof(unsigned long)-1)&~(sizeof(unsigned long)-1)];
+ struct hh_cache *hh;
+ atomic_t refcnt;
+ int (*output)(struct sk_buff *skb);
+ struct sk_buff_head arp_queue;
+ struct timer_list timer;
+ struct neigh_ops *ops;
+ u8 primary_key[0];
+};
+
+struct neigh_ops
+{
+ int family;
+ void (*destructor)(struct neighbour *);
+ void (*solicit)(struct neighbour *, struct sk_buff*);
+ void (*error_report)(struct neighbour *, struct sk_buff*);
+ int (*output)(struct sk_buff*);
+ int (*connected_output)(struct sk_buff*);
+ int (*hh_output)(struct sk_buff*);
+ int (*queue_xmit)(struct sk_buff*);
+};
+
+struct pneigh_entry
+{
+ struct pneigh_entry *next;
+ struct net_device *dev;
+ u8 key[0];
+};
+
+/*
+ * neighbour table manipulation
+ */
+
+
+struct neigh_table
+{
+ struct neigh_table *next;
+ int family;
+ int entry_size;
+ int key_len;
+ __u32 (*hash)(const void *pkey, const struct net_device *);
+ int (*constructor)(struct neighbour *);
+ int (*pconstructor)(struct pneigh_entry *);
+ void (*pdestructor)(struct pneigh_entry *);
+ void (*proxy_redo)(struct sk_buff *skb);
+ char *id;
+ struct neigh_parms parms;
+ /* HACK. gc_* shoul follow parms without a gap! */
+ int gc_interval;
+ int gc_thresh1;
+ int gc_thresh2;
+ int gc_thresh3;
+ unsigned long last_flush;
+ struct timer_list gc_timer;
+ struct timer_list proxy_timer;
+ struct sk_buff_head proxy_queue;
+ atomic_t entries;
+ rwlock_t lock;
+ unsigned long last_rand;
+ struct neigh_parms *parms_list;
+ kmem_cache_t *kmem_cachep;
+ struct neigh_statistics *stats;
+ struct neighbour **hash_buckets;
+ unsigned int hash_mask;
+ __u32 hash_rnd;
+ unsigned int hash_chain_gc;
+ struct pneigh_entry **phash_buckets;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *pde;
+#endif
+};
+
+/* flags for neigh_update() */
+#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
+#define NEIGH_UPDATE_F_ISROUTER 0x40000000
+#define NEIGH_UPDATE_F_ADMIN 0x80000000
+
+extern void neigh_table_init(struct neigh_table *tbl);
+extern int neigh_table_clear(struct neigh_table *tbl);
+extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev);
+extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
+ const void *pkey);
+extern struct neighbour * neigh_create(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev);
+extern void neigh_destroy(struct neighbour *neigh);
+extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ u32 flags);
+extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
+extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+extern int neigh_resolve_output(struct sk_buff *skb);
+extern int neigh_connected_output(struct sk_buff *skb);
+extern int neigh_compat_output(struct sk_buff *skb);
+extern struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+ u8 *lladdr, void *saddr,
+ struct net_device *dev);
+
+extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl);
+extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
+extern void neigh_parms_destroy(struct neigh_parms *parms);
+extern unsigned long neigh_rand_reach_time(unsigned long base);
+
+extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb);
+extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, const void *key, struct net_device *dev, int creat);
+extern int pneigh_delete(struct neigh_table *tbl, const void *key, struct net_device *dev);
+
+struct netlink_callback;
+struct nlmsghdr;
+extern int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
+extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+extern void neigh_app_ns(struct neighbour *n);
+
+extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
+extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
+extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
+
+struct neigh_seq_state {
+ struct neigh_table *tbl;
+ void *(*neigh_sub_iter)(struct neigh_seq_state *state,
+ struct neighbour *n, loff_t *pos);
+ unsigned int bucket;
+ unsigned int flags;
+#define NEIGH_SEQ_NEIGH_ONLY 0x00000001
+#define NEIGH_SEQ_IS_PNEIGH 0x00000002
+#define NEIGH_SEQ_SKIP_NOARP 0x00000004
+};
+extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
+extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
+extern void neigh_seq_stop(struct seq_file *, void *);
+
+extern int neigh_sysctl_register(struct net_device *dev,
+ struct neigh_parms *p,
+ int p_id, int pdev_id,
+ char *p_name,
+ proc_handler *proc_handler,
+ ctl_handler *strategy);
+extern void neigh_sysctl_unregister(struct neigh_parms *p);
+
+static inline void __neigh_parms_put(struct neigh_parms *parms)
+{
+ atomic_dec(&parms->refcnt);
+}
+
+static inline void neigh_parms_put(struct neigh_parms *parms)
+{
+ if (atomic_dec_and_test(&parms->refcnt))
+ neigh_parms_destroy(parms);
+}
+
+static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
+{
+ atomic_inc(&parms->refcnt);
+ return parms;
+}
+
+/*
+ * Neighbour references
+ */
+
+static inline void neigh_release(struct neighbour *neigh)
+{
+ if (atomic_dec_and_test(&neigh->refcnt))
+ neigh_destroy(neigh);
+}
+
+static inline struct neighbour * neigh_clone(struct neighbour *neigh)
+{
+ if (neigh)
+ atomic_inc(&neigh->refcnt);
+ return neigh;
+}
+
+#define neigh_hold(n) atomic_inc(&(n)->refcnt)
+
+static inline void neigh_confirm(struct neighbour *neigh)
+{
+ if (neigh)
+ neigh->confirmed = jiffies;
+}
+
+static inline int neigh_is_connected(struct neighbour *neigh)
+{
+ return neigh->nud_state&NUD_CONNECTED;
+}
+
+static inline int neigh_is_valid(struct neighbour *neigh)
+{
+ return neigh->nud_state&NUD_VALID;
+}
+
+static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ neigh->used = jiffies;
+ if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
+ return __neigh_event_send(neigh, skb);
+ return 0;
+}
+
+static inline struct neighbour *
+__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
+{
+ struct neighbour *n = neigh_lookup(tbl, pkey, dev);
+
+ if (n || !creat)
+ return n;
+
+ n = neigh_create(tbl, pkey, dev);
+ return IS_ERR(n) ? NULL : n;
+}
+
+static inline struct neighbour *
+__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev)
+{
+ struct neighbour *n = neigh_lookup(tbl, pkey, dev);
+
+ if (n)
+ return n;
+
+ return neigh_create(tbl, pkey, dev);
+}
+
+#define LOCALLY_ENQUEUED -2
+
+#endif
+#endif
+
+
diff --git a/include/net/netrom.h b/include/net/netrom.h
new file mode 100644
index 000000000000..45f2c7616d8b
--- /dev/null
+++ b/include/net/netrom.h
@@ -0,0 +1,242 @@
+/*
+ * Declarations of NET/ROM type objects.
+ *
+ * Jonathan Naylor G4KLX 9/4/95
+ */
+
+#ifndef _NETROM_H
+#define _NETROM_H
+#include <linux/netrom.h>
+#include <linux/list.h>
+#include <net/sock.h>
+
+#define NR_NETWORK_LEN 15
+#define NR_TRANSPORT_LEN 5
+
+#define NR_PROTO_IP 0x0C
+
+#define NR_PROTOEXT 0x00
+#define NR_CONNREQ 0x01
+#define NR_CONNACK 0x02
+#define NR_DISCREQ 0x03
+#define NR_DISCACK 0x04
+#define NR_INFO 0x05
+#define NR_INFOACK 0x06
+
+#define NR_CHOKE_FLAG 0x80
+#define NR_NAK_FLAG 0x40
+#define NR_MORE_FLAG 0x20
+
+/* Define Link State constants. */
+enum {
+ NR_STATE_0,
+ NR_STATE_1,
+ NR_STATE_2,
+ NR_STATE_3
+};
+
+#define NR_COND_ACK_PENDING 0x01
+#define NR_COND_REJECT 0x02
+#define NR_COND_PEER_RX_BUSY 0x04
+#define NR_COND_OWN_RX_BUSY 0x08
+
+#define NR_DEFAULT_T1 (120 * HZ) /* Outstanding frames - 120 seconds */
+#define NR_DEFAULT_T2 (5 * HZ) /* Response delay - 5 seconds */
+#define NR_DEFAULT_N2 3 /* Number of Retries - 3 */
+#define NR_DEFAULT_T4 (180 * HZ) /* Busy Delay - 180 seconds */
+#define NR_DEFAULT_IDLE (0 * 60 * HZ) /* No Activity Timeout - none */
+#define NR_DEFAULT_WINDOW 4 /* Default Window Size - 4 */
+#define NR_DEFAULT_OBS 6 /* Default Obsolescence Count - 6 */
+#define NR_DEFAULT_QUAL 10 /* Default Neighbour Quality - 10 */
+#define NR_DEFAULT_TTL 16 /* Default Time To Live - 16 */
+#define NR_DEFAULT_ROUTING 1 /* Is routing enabled ? */
+#define NR_DEFAULT_FAILS 2 /* Link fails until route fails */
+
+#define NR_MODULUS 256
+#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */
+#define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */
+
+struct nr_sock {
+ struct sock sock;
+ ax25_address user_addr, source_addr, dest_addr;
+ struct net_device *device;
+ unsigned char my_index, my_id;
+ unsigned char your_index, your_id;
+ unsigned char state, condition, bpqext, window;
+ unsigned short vs, vr, va, vl;
+ unsigned char n2, n2count;
+ unsigned long t1, t2, t4, idle;
+ unsigned short fraglen;
+ struct timer_list t1timer;
+ struct timer_list t2timer;
+ struct timer_list t4timer;
+ struct timer_list idletimer;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head frag_queue;
+};
+
+#define nr_sk(sk) ((struct nr_sock *)(sk))
+
+struct nr_neigh {
+ struct hlist_node neigh_node;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct net_device *dev;
+ unsigned char quality;
+ unsigned char locked;
+ unsigned short count;
+ unsigned int number;
+ unsigned char failed;
+ atomic_t refcount;
+};
+
+struct nr_route {
+ unsigned char quality;
+ unsigned char obs_count;
+ struct nr_neigh *neighbour;
+};
+
+struct nr_node {
+ struct hlist_node node_node;
+ ax25_address callsign;
+ char mnemonic[7];
+ unsigned char which;
+ unsigned char count;
+ struct nr_route routes[3];
+ atomic_t refcount;
+ spinlock_t node_lock;
+};
+
+/*********************************************************************
+ * nr_node & nr_neigh lists, refcounting and locking
+ *********************************************************************/
+
+#define nr_node_hold(__nr_node) \
+ atomic_inc(&((__nr_node)->refcount))
+
+static __inline__ void nr_node_put(struct nr_node *nr_node)
+{
+ if (atomic_dec_and_test(&nr_node->refcount)) {
+ kfree(nr_node);
+ }
+}
+
+#define nr_neigh_hold(__nr_neigh) \
+ atomic_inc(&((__nr_neigh)->refcount))
+
+static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
+{
+ if (atomic_dec_and_test(&nr_neigh->refcount)) {
+ if (nr_neigh->digipeat != NULL)
+ kfree(nr_neigh->digipeat);
+ kfree(nr_neigh);
+ }
+}
+
+/* nr_node_lock and nr_node_unlock also hold/put the node's refcounter.
+ */
+static __inline__ void nr_node_lock(struct nr_node *nr_node)
+{
+ nr_node_hold(nr_node);
+ spin_lock_bh(&nr_node->node_lock);
+}
+
+static __inline__ void nr_node_unlock(struct nr_node *nr_node)
+{
+ spin_unlock_bh(&nr_node->node_lock);
+ nr_node_put(nr_node);
+}
+
+#define nr_neigh_for_each(__nr_neigh, node, list) \
+ hlist_for_each_entry(__nr_neigh, node, list, neigh_node)
+
+#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \
+ hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node)
+
+#define nr_node_for_each(__nr_node, node, list) \
+ hlist_for_each_entry(__nr_node, node, list, node_node)
+
+#define nr_node_for_each_safe(__nr_node, node, node2, list) \
+ hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node)
+
+
+/*********************************************************************/
+
+/* af_netrom.c */
+extern int sysctl_netrom_default_path_quality;
+extern int sysctl_netrom_obsolescence_count_initialiser;
+extern int sysctl_netrom_network_ttl_initialiser;
+extern int sysctl_netrom_transport_timeout;
+extern int sysctl_netrom_transport_maximum_tries;
+extern int sysctl_netrom_transport_acknowledge_delay;
+extern int sysctl_netrom_transport_busy_delay;
+extern int sysctl_netrom_transport_requested_window_size;
+extern int sysctl_netrom_transport_no_activity_timeout;
+extern int sysctl_netrom_routing_control;
+extern int sysctl_netrom_link_fails_count;
+extern int nr_rx_frame(struct sk_buff *, struct net_device *);
+extern void nr_destroy_socket(struct sock *);
+
+/* nr_dev.c */
+extern int nr_rx_ip(struct sk_buff *, struct net_device *);
+extern void nr_setup(struct net_device *);
+
+/* nr_in.c */
+extern int nr_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* nr_loopback.c */
+extern void nr_loopback_init(void);
+extern void nr_loopback_clear(void);
+extern int nr_loopback_queue(struct sk_buff *);
+
+/* nr_out.c */
+extern void nr_output(struct sock *, struct sk_buff *);
+extern void nr_send_nak_frame(struct sock *);
+extern void nr_kick(struct sock *);
+extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
+extern void nr_establish_data_link(struct sock *);
+extern void nr_enquiry_response(struct sock *);
+extern void nr_check_iframes_acked(struct sock *, unsigned short);
+
+/* nr_route.c */
+extern void nr_rt_device_down(struct net_device *);
+extern struct net_device *nr_dev_first(void);
+extern struct net_device *nr_dev_get(ax25_address *);
+extern int nr_rt_ioctl(unsigned int, void __user *);
+extern void nr_link_failed(ax25_cb *, int);
+extern int nr_route_frame(struct sk_buff *, ax25_cb *);
+extern struct file_operations nr_nodes_fops;
+extern struct file_operations nr_neigh_fops;
+extern void nr_rt_free(void);
+
+/* nr_subr.c */
+extern void nr_clear_queues(struct sock *);
+extern void nr_frames_acked(struct sock *, unsigned short);
+extern void nr_requeue_frames(struct sock *);
+extern int nr_validate_nr(struct sock *, unsigned short);
+extern int nr_in_rx_window(struct sock *, unsigned short);
+extern void nr_write_internal(struct sock *, int);
+extern void nr_transmit_refusal(struct sk_buff *, int);
+extern void nr_disconnect(struct sock *, int);
+
+/* nr_timer.c */
+extern void nr_init_timers(struct sock *sk);
+extern void nr_start_heartbeat(struct sock *);
+extern void nr_start_t1timer(struct sock *);
+extern void nr_start_t2timer(struct sock *);
+extern void nr_start_t4timer(struct sock *);
+extern void nr_start_idletimer(struct sock *);
+extern void nr_stop_heartbeat(struct sock *);
+extern void nr_stop_t1timer(struct sock *);
+extern void nr_stop_t2timer(struct sock *);
+extern void nr_stop_t4timer(struct sock *);
+extern void nr_stop_idletimer(struct sock *);
+extern int nr_t1timer_running(struct sock *);
+
+/* sysctl_net_netrom.c */
+extern void nr_register_sysctl(void);
+extern void nr_unregister_sysctl(void);
+
+#endif
diff --git a/include/net/p8022.h b/include/net/p8022.h
new file mode 100644
index 000000000000..3c99a86c3581
--- /dev/null
+++ b/include/net/p8022.h
@@ -0,0 +1,10 @@
+#ifndef _NET_P8022_H
+#define _NET_P8022_H
+extern struct datalink_proto *
+ register_8022_client(unsigned char type,
+ int (*func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt));
+extern void unregister_8022_client(struct datalink_proto *proto);
+
+#endif
diff --git a/include/net/pkt_act.h b/include/net/pkt_act.h
new file mode 100644
index 000000000000..bd08964b72c0
--- /dev/null
+++ b/include/net/pkt_act.h
@@ -0,0 +1,275 @@
+#ifndef __NET_PKT_ACT_H
+#define __NET_PKT_ACT_H
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+
+#define tca_st(val) (struct tcf_##val *)
+#define PRIV(a,name) ( tca_st(name) (a)->priv)
+
+#if 0 /* control */
+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define DPRINTK(format,args...)
+#endif
+
+#if 0 /* data */
+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define D2PRINTK(format,args...)
+#endif
+
+static __inline__ unsigned
+tcf_hash(u32 index)
+{
+ return index & MY_TAB_MASK;
+}
+
+/* probably move this from being inline
+ * and put into act_generic
+*/
+static inline void
+tcf_hash_destroy(struct tcf_st *p)
+{
+ unsigned h = tcf_hash(p->index);
+ struct tcf_st **p1p;
+
+ for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) {
+ if (*p1p == p) {
+ write_lock_bh(&tcf_t_lock);
+ *p1p = p->next;
+ write_unlock_bh(&tcf_t_lock);
+#ifdef CONFIG_NET_ESTIMATOR
+ gen_kill_estimator(&p->bstats, &p->rate_est);
+#endif
+ kfree(p);
+ return;
+ }
+ }
+ BUG_TRAP(0);
+}
+
+static inline int
+tcf_hash_release(struct tcf_st *p, int bind )
+{
+ int ret = 0;
+ if (p) {
+ if (bind) {
+ p->bindcnt--;
+ }
+ p->refcnt--;
+ if(p->bindcnt <=0 && p->refcnt <= 0) {
+ tcf_hash_destroy(p);
+ ret = 1;
+ }
+ }
+ return ret;
+}
+
+static __inline__ int
+tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tc_action *a)
+{
+ struct tcf_st *p;
+ int err =0, index = -1,i= 0, s_i = 0, n_i = 0;
+ struct rtattr *r ;
+
+ read_lock(&tcf_t_lock);
+
+ s_i = cb->args[0];
+
+ for (i = 0; i < MY_TAB_SIZE; i++) {
+ p = tcf_ht[tcf_hash(i)];
+
+ for (; p; p = p->next) {
+ index++;
+ if (index < s_i)
+ continue;
+ a->priv = p;
+ a->order = n_i;
+ r = (struct rtattr*) skb->tail;
+ RTA_PUT(skb, a->order, 0, NULL);
+ err = tcf_action_dump_1(skb, a, 0, 0);
+ if (0 > err) {
+ index--;
+ skb_trim(skb, (u8*)r - skb->data);
+ goto done;
+ }
+ r->rta_len = skb->tail - (u8*)r;
+ n_i++;
+ if (n_i >= TCA_ACT_MAX_PRIO) {
+ goto done;
+ }
+ }
+ }
+done:
+ read_unlock(&tcf_t_lock);
+ if (n_i)
+ cb->args[0] += n_i;
+ return n_i;
+
+rtattr_failure:
+ skb_trim(skb, (u8*)r - skb->data);
+ goto done;
+}
+
+static __inline__ int
+tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
+{
+ struct tcf_st *p, *s_p;
+ struct rtattr *r ;
+ int i= 0, n_i = 0;
+
+ r = (struct rtattr*) skb->tail;
+ RTA_PUT(skb, a->order, 0, NULL);
+ RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
+ for (i = 0; i < MY_TAB_SIZE; i++) {
+ p = tcf_ht[tcf_hash(i)];
+
+ while (p != NULL) {
+ s_p = p->next;
+ if (ACT_P_DELETED == tcf_hash_release(p, 0)) {
+ module_put(a->ops->owner);
+ }
+ n_i++;
+ p = s_p;
+ }
+ }
+ RTA_PUT(skb, TCA_FCNT, 4, &n_i);
+ r->rta_len = skb->tail - (u8*)r;
+
+ return n_i;
+rtattr_failure:
+ skb_trim(skb, (u8*)r - skb->data);
+ return -EINVAL;
+}
+
+static __inline__ int
+tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type,
+ struct tc_action *a)
+{
+ if (type == RTM_DELACTION) {
+ return tcf_del_walker(skb,a);
+ } else if (type == RTM_GETACTION) {
+ return tcf_dump_walker(skb,cb,a);
+ } else {
+ printk("tcf_generic_walker: unknown action %d\n",type);
+ return -EINVAL;
+ }
+}
+
+static __inline__ struct tcf_st *
+tcf_hash_lookup(u32 index)
+{
+ struct tcf_st *p;
+
+ read_lock(&tcf_t_lock);
+ for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) {
+ if (p->index == index)
+ break;
+ }
+ read_unlock(&tcf_t_lock);
+ return p;
+}
+
+static __inline__ u32
+tcf_hash_new_index(void)
+{
+ do {
+ if (++idx_gen == 0)
+ idx_gen = 1;
+ } while (tcf_hash_lookup(idx_gen));
+
+ return idx_gen;
+}
+
+
+static inline int
+tcf_hash_search(struct tc_action *a, u32 index)
+{
+ struct tcf_st *p = tcf_hash_lookup(index);
+
+ if (p != NULL) {
+ a->priv = p;
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_NET_ACT_INIT
+static inline struct tcf_st *
+tcf_hash_check(u32 index, struct tc_action *a, int ovr, int bind)
+{
+ struct tcf_st *p = NULL;
+ if (index && (p = tcf_hash_lookup(index)) != NULL) {
+ if (bind) {
+ p->bindcnt++;
+ p->refcnt++;
+ }
+ a->priv = p;
+ }
+ return p;
+}
+
+static inline struct tcf_st *
+tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
+{
+ struct tcf_st *p = NULL;
+
+ p = kmalloc(size, GFP_KERNEL);
+ if (p == NULL)
+ return p;
+
+ memset(p, 0, size);
+ p->refcnt = 1;
+
+ if (bind) {
+ p->bindcnt = 1;
+ }
+
+ spin_lock_init(&p->lock);
+ p->stats_lock = &p->lock;
+ p->index = index ? : tcf_hash_new_index();
+ p->tm.install = jiffies;
+ p->tm.lastuse = jiffies;
+#ifdef CONFIG_NET_ESTIMATOR
+ if (est)
+ gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
+#endif
+ a->priv = (void *) p;
+ return p;
+}
+
+static inline void tcf_hash_insert(struct tcf_st *p)
+{
+ unsigned h = tcf_hash(p->index);
+
+ write_lock_bh(&tcf_t_lock);
+ p->next = tcf_ht[h];
+ tcf_ht[h] = p;
+ write_unlock_bh(&tcf_t_lock);
+}
+
+#endif
+
+#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
new file mode 100644
index 000000000000..4abda6aec05a
--- /dev/null
+++ b/include/net/pkt_cls.h
@@ -0,0 +1,366 @@
+#ifndef __NET_PKT_CLS_H
+#define __NET_PKT_CLS_H
+
+#include <linux/pkt_cls.h>
+#include <net/sch_generic.h>
+#include <net/act_api.h>
+
+/* Basic packet classifier frontend definitions. */
+
+struct tcf_walker
+{
+ int stop;
+ int skip;
+ int count;
+ int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
+};
+
+extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
+extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+
+static inline unsigned long
+__cls_set_class(unsigned long *clp, unsigned long cl)
+{
+ unsigned long old_cl;
+
+ old_cl = *clp;
+ *clp = cl;
+ return old_cl;
+}
+
+static inline unsigned long
+cls_set_class(struct tcf_proto *tp, unsigned long *clp,
+ unsigned long cl)
+{
+ unsigned long old_cl;
+
+ tcf_tree_lock(tp);
+ old_cl = __cls_set_class(clp, cl);
+ tcf_tree_unlock(tp);
+
+ return old_cl;
+}
+
+static inline void
+tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
+{
+ unsigned long cl;
+
+ cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
+ cl = cls_set_class(tp, &r->class, cl);
+ if (cl)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+}
+
+static inline void
+tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
+{
+ unsigned long cl;
+
+ if ((cl = __cls_set_class(&r->class, 0)) != 0)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+}
+
+struct tcf_exts
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tc_action *action;
+#elif defined CONFIG_NET_CLS_POLICE
+ struct tcf_police *police;
+#endif
+};
+
+/* Map to export classifier specific extension TLV types to the
+ * generic extensions API. Unsupported extensions must be set to 0.
+ */
+struct tcf_ext_map
+{
+ int action;
+ int police;
+};
+
+/**
+ * tcf_exts_is_predicative - check if a predicative extension is present
+ * @exts: tc filter extensions handle
+ *
+ * Returns 1 if a predicative extension is present, i.e. an extension which
+ * might cause further actions and thus overrule the regular tcf_result.
+ */
+static inline int
+tcf_exts_is_predicative(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return !!exts->action;
+#elif defined CONFIG_NET_CLS_POLICE
+ return !!exts->police;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * tcf_exts_is_available - check if at least one extension is present
+ * @exts: tc filter extensions handle
+ *
+ * Returns 1 if at least one extension is present.
+ */
+static inline int
+tcf_exts_is_available(struct tcf_exts *exts)
+{
+ /* All non-predicative extensions must be added here. */
+ return tcf_exts_is_predicative(exts);
+}
+
+/**
+ * tcf_exts_exec - execute tc filter extensions
+ * @skb: socket buffer
+ * @exts: tc filter extensions handle
+ * @res: desired result
+ *
+ * Executes all configured extensions. Returns 0 on a normal execution,
+ * a negative number if the filter must be considered unmatched or
+ * a positive action code (TC_ACT_*) which must be returned to the
+ * underlying layer.
+ */
+static inline int
+tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
+ struct tcf_result *res)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (exts->action)
+ return tcf_action_exec(skb, exts->action, res);
+#elif defined CONFIG_NET_CLS_POLICE
+ if (exts->police)
+ return tcf_police(skb, exts->police);
+#endif
+
+ return 0;
+}
+
+extern int tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
+ struct rtattr *rate_tlv, struct tcf_exts *exts,
+ struct tcf_ext_map *map);
+extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
+ struct tcf_exts *src);
+extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
+ struct tcf_ext_map *map);
+extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
+ struct tcf_ext_map *map);
+
+/**
+ * struct tcf_pkt_info - packet information
+ */
+struct tcf_pkt_info
+{
+ unsigned char * ptr;
+ int nexthdr;
+};
+
+#ifdef CONFIG_NET_EMATCH
+
+struct tcf_ematch_ops;
+
+/**
+ * struct tcf_ematch - extended match (ematch)
+ *
+ * @matchid: identifier to allow userspace to reidentify a match
+ * @flags: flags specifying attributes and the relation to other matches
+ * @ops: the operations lookup table of the corresponding ematch module
+ * @datalen: length of the ematch specific configuration data
+ * @data: ematch specific data
+ */
+struct tcf_ematch
+{
+ struct tcf_ematch_ops * ops;
+ unsigned long data;
+ unsigned int datalen;
+ u16 matchid;
+ u16 flags;
+};
+
+static inline int tcf_em_is_container(struct tcf_ematch *em)
+{
+ return !em->ops;
+}
+
+static inline int tcf_em_is_simple(struct tcf_ematch *em)
+{
+ return em->flags & TCF_EM_SIMPLE;
+}
+
+static inline int tcf_em_is_inverted(struct tcf_ematch *em)
+{
+ return em->flags & TCF_EM_INVERT;
+}
+
+static inline int tcf_em_last_match(struct tcf_ematch *em)
+{
+ return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
+}
+
+static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
+{
+ if (tcf_em_last_match(em))
+ return 1;
+
+ if (result == 0 && em->flags & TCF_EM_REL_AND)
+ return 1;
+
+ if (result != 0 && em->flags & TCF_EM_REL_OR)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * struct tcf_ematch_tree - ematch tree handle
+ *
+ * @hdr: ematch tree header supplied by userspace
+ * @matches: array of ematches
+ */
+struct tcf_ematch_tree
+{
+ struct tcf_ematch_tree_hdr hdr;
+ struct tcf_ematch * matches;
+
+};
+
+/**
+ * struct tcf_ematch_ops - ematch module operations
+ *
+ * @kind: identifier (kind) of this ematch module
+ * @datalen: length of expected configuration data (optional)
+ * @change: called during validation (optional)
+ * @match: called during ematch tree evaluation, must return 1/0
+ * @destroy: called during destroyage (optional)
+ * @dump: called during dumping process (optional)
+ * @owner: owner, must be set to THIS_MODULE
+ * @link: link to previous/next ematch module (internal use)
+ */
+struct tcf_ematch_ops
+{
+ int kind;
+ int datalen;
+ int (*change)(struct tcf_proto *, void *,
+ int, struct tcf_ematch *);
+ int (*match)(struct sk_buff *, struct tcf_ematch *,
+ struct tcf_pkt_info *);
+ void (*destroy)(struct tcf_proto *,
+ struct tcf_ematch *);
+ int (*dump)(struct sk_buff *, struct tcf_ematch *);
+ struct module *owner;
+ struct list_head link;
+};
+
+extern int tcf_em_register(struct tcf_ematch_ops *);
+extern int tcf_em_unregister(struct tcf_ematch_ops *);
+extern int tcf_em_tree_validate(struct tcf_proto *, struct rtattr *,
+ struct tcf_ematch_tree *);
+extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
+extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
+ struct tcf_pkt_info *);
+
+/**
+ * tcf_em_tree_change - replace ematch tree of a running classifier
+ *
+ * @tp: classifier kind handle
+ * @dst: destination ematch tree variable
+ * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
+ *
+ * This functions replaces the ematch tree in @dst with the ematch
+ * tree in @src. The classifier in charge of the ematch tree may be
+ * running.
+ */
+static inline void tcf_em_tree_change(struct tcf_proto *tp,
+ struct tcf_ematch_tree *dst,
+ struct tcf_ematch_tree *src)
+{
+ tcf_tree_lock(tp);
+ memcpy(dst, src, sizeof(*dst));
+ tcf_tree_unlock(tp);
+}
+
+/**
+ * tcf_em_tree_match - evaulate an ematch tree
+ *
+ * @skb: socket buffer of the packet in question
+ * @tree: ematch tree to be used for evaluation
+ * @info: packet information examined by classifier
+ *
+ * This function matches @skb against the ematch tree in @tree by going
+ * through all ematches respecting their logic relations returning
+ * as soon as the result is obvious.
+ *
+ * Returns 1 if the ematch tree as-one matches, no ematches are configured
+ * or ematch is not enabled in the kernel, otherwise 0 is returned.
+ */
+static inline int tcf_em_tree_match(struct sk_buff *skb,
+ struct tcf_ematch_tree *tree,
+ struct tcf_pkt_info *info)
+{
+ if (tree->hdr.nmatches)
+ return __tcf_em_tree_match(skb, tree, info);
+ else
+ return 1;
+}
+
+#else /* CONFIG_NET_EMATCH */
+
+struct tcf_ematch_tree
+{
+};
+
+#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
+#define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0)
+#define tcf_em_tree_dump(skb, t, tlv) (0)
+#define tcf_em_tree_change(tp, dst, src) do { } while(0)
+#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
+
+#endif /* CONFIG_NET_EMATCH */
+
+static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
+{
+ switch (layer) {
+ case TCF_LAYER_LINK:
+ return skb->data;
+ case TCF_LAYER_NETWORK:
+ return skb->nh.raw;
+ case TCF_LAYER_TRANSPORT:
+ return skb->h.raw;
+ }
+
+ return NULL;
+}
+
+static inline int tcf_valid_offset(struct sk_buff *skb, unsigned char *ptr,
+ int len)
+{
+ return unlikely((ptr + len) < skb->tail && ptr > skb->head);
+}
+
+#ifdef CONFIG_NET_CLS_IND
+static inline int
+tcf_change_indev(struct tcf_proto *tp, char *indev, struct rtattr *indev_tlv)
+{
+ if (rtattr_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
+ return -EINVAL;
+ return 0;
+}
+
+static inline int
+tcf_match_indev(struct sk_buff *skb, char *indev)
+{
+ if (0 != indev[0]) {
+ if (NULL == skb->input_dev)
+ return 0;
+ else if (0 != strcmp(indev, skb->input_dev->name))
+ return 0;
+ }
+
+ return 1;
+}
+#endif /* CONFIG_NET_CLS_IND */
+
+#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
new file mode 100644
index 000000000000..87496e3aa330
--- /dev/null
+++ b/include/net/pkt_sched.h
@@ -0,0 +1,249 @@
+#ifndef __NET_PKT_SCHED_H
+#define __NET_PKT_SCHED_H
+
+#include <net/sch_generic.h>
+
+struct qdisc_walker
+{
+ int stop;
+ int skip;
+ int count;
+ int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
+};
+
+extern rwlock_t qdisc_tree_lock;
+
+#define QDISC_ALIGN 32
+#define QDISC_ALIGN_CONST (QDISC_ALIGN - 1)
+
+static inline void *qdisc_priv(struct Qdisc *q)
+{
+ return (char *)q + ((sizeof(struct Qdisc) + QDISC_ALIGN_CONST)
+ & ~QDISC_ALIGN_CONST);
+}
+
+/*
+ Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
+
+ Normal IP packet size ~ 512byte, hence:
+
+ 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
+ 10Mbit ethernet.
+
+ 10msec resolution -> <50Kbit/sec.
+
+ The result: [34]86 is not good choice for QoS router :-(
+
+ The things are not so bad, because we may use artifical
+ clock evaluated by integration of network data flow
+ in the most critical places.
+
+ Note: we do not use fastgettimeofday.
+ The reason is that, when it is not the same thing as
+ gettimeofday, it returns invalid timestamp, which is
+ not updated, when net_bh is active.
+ */
+
+/* General note about internal clock.
+
+ Any clock source returns time intervals, measured in units
+ close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
+ microseconds, otherwise something close but different chosen to minimize
+ arithmetic cost. Ratio usec/internal untis in form nominator/denominator
+ may be read from /proc/net/psched.
+ */
+
+
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+
+typedef struct timeval psched_time_t;
+typedef long psched_tdiff_t;
+
+#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
+#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
+#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
+
+#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+
+typedef u64 psched_time_t;
+typedef long psched_tdiff_t;
+
+#ifdef CONFIG_NET_SCH_CLK_JIFFIES
+
+#if HZ < 96
+#define PSCHED_JSCALE 14
+#elif HZ >= 96 && HZ < 192
+#define PSCHED_JSCALE 13
+#elif HZ >= 192 && HZ < 384
+#define PSCHED_JSCALE 12
+#elif HZ >= 384 && HZ < 768
+#define PSCHED_JSCALE 11
+#elif HZ >= 768
+#define PSCHED_JSCALE 10
+#endif
+
+#define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<<PSCHED_JSCALE))
+#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
+#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
+
+#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
+#ifdef CONFIG_NET_SCH_CLK_CPU
+#include <asm/timex.h>
+
+extern psched_tdiff_t psched_clock_per_hz;
+extern int psched_clock_scale;
+extern psched_time_t psched_time_base;
+extern cycles_t psched_time_mark;
+
+#define PSCHED_GET_TIME(stamp) \
+do { \
+ cycles_t cur = get_cycles(); \
+ if (sizeof(cycles_t) == sizeof(u32)) { \
+ if (cur <= psched_time_mark) \
+ psched_time_base += 0x100000000ULL; \
+ psched_time_mark = cur; \
+ (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
+ } else { \
+ (stamp) = cur>>psched_clock_scale; \
+ } \
+} while (0)
+#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
+#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
+
+#endif /* CONFIG_NET_SCH_CLK_CPU */
+
+#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#define PSCHED_TDIFF(tv1, tv2) \
+({ \
+ int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
+ int __delta = (tv1).tv_usec - (tv2).tv_usec; \
+ if (__delta_sec) { \
+ switch (__delta_sec) { \
+ default: \
+ __delta = 0; \
+ case 2: \
+ __delta += 1000000; \
+ case 1: \
+ __delta += 1000000; \
+ } \
+ } \
+ __delta; \
+})
+
+static inline int
+psched_tod_diff(int delta_sec, int bound)
+{
+ int delta;
+
+ if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1)
+ return bound;
+ delta = delta_sec * 1000000;
+ if (delta > bound)
+ delta = bound;
+ return delta;
+}
+
+#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
+({ \
+ int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
+ int __delta = (tv1).tv_usec - (tv2).tv_usec; \
+ switch (__delta_sec) { \
+ default: \
+ __delta = psched_tod_diff(__delta_sec, bound); break; \
+ case 2: \
+ __delta += 1000000; \
+ case 1: \
+ __delta += 1000000; \
+ case 0: ; \
+ } \
+ __delta; \
+})
+
+#define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \
+ (tv1).tv_sec <= (tv2).tv_sec) || \
+ (tv1).tv_sec < (tv2).tv_sec)
+
+#define PSCHED_TADD2(tv, delta, tv_res) \
+({ \
+ int __delta = (tv).tv_usec + (delta); \
+ (tv_res).tv_sec = (tv).tv_sec; \
+ if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \
+ (tv_res).tv_usec = __delta; \
+})
+
+#define PSCHED_TADD(tv, delta) \
+({ \
+ (tv).tv_usec += (delta); \
+ if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \
+ (tv).tv_usec -= 1000000; } \
+})
+
+/* Set/check that time is in the "past perfect";
+ it depends on concrete representation of system time
+ */
+
+#define PSCHED_SET_PASTPERFECT(t) ((t).tv_sec = 0)
+#define PSCHED_IS_PASTPERFECT(t) ((t).tv_sec == 0)
+
+#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
+
+#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+
+#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
+#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
+ min_t(long long, (tv1) - (tv2), bound)
+
+
+#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
+#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
+#define PSCHED_TADD(tv, delta) ((tv) += (delta))
+#define PSCHED_SET_PASTPERFECT(t) ((t) = 0)
+#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
+#define PSCHED_AUDIT_TDIFF(t)
+
+#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+
+extern struct Qdisc noop_qdisc;
+extern struct Qdisc_ops noop_qdisc_ops;
+extern struct Qdisc_ops pfifo_qdisc_ops;
+extern struct Qdisc_ops bfifo_qdisc_ops;
+
+extern int register_qdisc(struct Qdisc_ops *qops);
+extern int unregister_qdisc(struct Qdisc_ops *qops);
+extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
+extern void dev_init_scheduler(struct net_device *dev);
+extern void dev_shutdown(struct net_device *dev);
+extern void dev_activate(struct net_device *dev);
+extern void dev_deactivate(struct net_device *dev);
+extern void qdisc_reset(struct Qdisc *qdisc);
+extern void qdisc_destroy(struct Qdisc *qdisc);
+extern struct Qdisc * qdisc_create_dflt(struct net_device *dev,
+ struct Qdisc_ops *ops);
+extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct rtattr *tab);
+extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
+
+extern int qdisc_restart(struct net_device *dev);
+
+static inline void qdisc_run(struct net_device *dev)
+{
+ while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
+ /* NOTHING */;
+}
+
+extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
+ struct tcf_result *res);
+
+/* Calculate maximal size of packet seen by hard_start_xmit
+ routine of this device.
+ */
+static inline unsigned psched_mtu(struct net_device *dev)
+{
+ unsigned mtu = dev->mtu;
+ return dev->hard_header ? mtu + dev->hard_header_len : mtu;
+}
+
+#endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
new file mode 100644
index 000000000000..357691f6a45f
--- /dev/null
+++ b/include/net/protocol.h
@@ -0,0 +1,99 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the protocol dispatcher.
+ *
+ * Version: @(#)protocol.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes:
+ * Alan Cox : Added a name field and a frag handler
+ * field for later.
+ * Alan Cox : Cleaned up, and sorted types.
+ * Pedro Roque : inet6 protocols
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+#include <linux/config.h>
+#include <linux/in6.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#include <linux/ipv6.h>
+#endif
+
+#define MAX_INET_PROTOS 256 /* Must be a power of 2 */
+
+
+/* This is used to register protocols. */
+struct net_protocol {
+ int (*handler)(struct sk_buff *skb);
+ void (*err_handler)(struct sk_buff *skb, u32 info);
+ int no_policy;
+};
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+struct inet6_protocol
+{
+ int (*handler)(struct sk_buff **skb, unsigned int *nhoffp);
+
+ void (*err_handler)(struct sk_buff *skb,
+ struct inet6_skb_parm *opt,
+ int type, int code, int offset,
+ __u32 info);
+ unsigned int flags; /* INET6_PROTO_xxx */
+};
+
+#define INET6_PROTO_NOPOLICY 0x1
+#define INET6_PROTO_FINAL 0x2
+#endif
+
+/* This is used to register socket interfaces for IP protocols. */
+struct inet_protosw {
+ struct list_head list;
+
+ /* These two fields form the lookup key. */
+ unsigned short type; /* This is the 2nd argument to socket(2). */
+ int protocol; /* This is the L4 protocol number. */
+
+ struct proto *prot;
+ struct proto_ops *ops;
+
+ int capability; /* Which (if any) capability do
+ * we need to use this socket
+ * interface?
+ */
+ char no_check; /* checksum on rcv/xmit/none? */
+ unsigned char flags; /* See INET_PROTOSW_* below. */
+};
+#define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */
+#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
+
+extern struct net_protocol *inet_protocol_base;
+extern struct net_protocol *inet_protos[MAX_INET_PROTOS];
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+extern struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
+#endif
+
+extern int inet_add_protocol(struct net_protocol *prot, unsigned char num);
+extern int inet_del_protocol(struct net_protocol *prot, unsigned char num);
+extern void inet_register_protosw(struct inet_protosw *p);
+extern void inet_unregister_protosw(struct inet_protosw *p);
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+extern int inet6_add_protocol(struct inet6_protocol *prot, unsigned char num);
+extern int inet6_del_protocol(struct inet6_protocol *prot, unsigned char num);
+extern void inet6_register_protosw(struct inet_protosw *p);
+extern void inet6_unregister_protosw(struct inet_protosw *p);
+#endif
+
+#endif /* _PROTOCOL_H */
diff --git a/include/net/psnap.h b/include/net/psnap.h
new file mode 100644
index 000000000000..9c94e8f98b36
--- /dev/null
+++ b/include/net/psnap.h
@@ -0,0 +1,7 @@
+#ifndef _NET_PSNAP_H
+#define _NET_PSNAP_H
+
+extern struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *));
+extern void unregister_snap_client(struct datalink_proto *proto);
+
+#endif
diff --git a/include/net/raw.h b/include/net/raw.h
new file mode 100644
index 000000000000..1c411c45587a
--- /dev/null
+++ b/include/net/raw.h
@@ -0,0 +1,42 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the RAW-IP module.
+ *
+ * Version: @(#)raw.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _RAW_H
+#define _RAW_H
+
+
+extern struct proto raw_prot;
+
+
+extern void raw_err(struct sock *, struct sk_buff *, u32 info);
+extern int raw_rcv(struct sock *, struct sk_buff *);
+
+/* Note: v4 ICMP wants to get at this stuff, if you change the
+ * hashing mechanism, make sure you update icmp.c as well.
+ */
+#define RAWV4_HTABLE_SIZE MAX_INET_PROTOS
+extern struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE];
+
+extern rwlock_t raw_v4_lock;
+
+
+extern struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
+ unsigned long raddr, unsigned long laddr,
+ int dif);
+
+extern void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash);
+
+#endif /* _RAW_H */
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
new file mode 100644
index 000000000000..23fd9a6a221a
--- /dev/null
+++ b/include/net/rawv6.h
@@ -0,0 +1,27 @@
+#ifndef _NET_RAWV6_H
+#define _NET_RAWV6_H
+
+#ifdef __KERNEL__
+
+#define RAWV6_HTABLE_SIZE MAX_INET_PROTOS
+extern struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
+extern rwlock_t raw_v6_lock;
+
+extern void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr);
+
+extern struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
+ struct in6_addr *loc_addr, struct in6_addr *rmt_addr);
+
+extern int rawv6_rcv(struct sock *sk,
+ struct sk_buff *skb);
+
+
+extern void rawv6_err(struct sock *sk,
+ struct sk_buff *skb,
+ struct inet6_skb_parm *opt,
+ int type, int code,
+ int offset, u32 info);
+
+#endif
+
+#endif
diff --git a/include/net/rose.h b/include/net/rose.h
new file mode 100644
index 000000000000..3249b979605a
--- /dev/null
+++ b/include/net/rose.h
@@ -0,0 +1,235 @@
+/*
+ * Declarations of Rose type objects.
+ *
+ * Jonathan Naylor G4KLX 25/8/96
+ */
+
+#ifndef _ROSE_H
+#define _ROSE_H
+
+#include <linux/rose.h>
+#include <net/sock.h>
+
+#define ROSE_ADDR_LEN 5
+
+#define ROSE_MIN_LEN 3
+
+#define ROSE_GFI 0x10
+#define ROSE_Q_BIT 0x80
+#define ROSE_D_BIT 0x40
+#define ROSE_M_BIT 0x10
+
+#define ROSE_CALL_REQUEST 0x0B
+#define ROSE_CALL_ACCEPTED 0x0F
+#define ROSE_CLEAR_REQUEST 0x13
+#define ROSE_CLEAR_CONFIRMATION 0x17
+#define ROSE_DATA 0x00
+#define ROSE_INTERRUPT 0x23
+#define ROSE_INTERRUPT_CONFIRMATION 0x27
+#define ROSE_RR 0x01
+#define ROSE_RNR 0x05
+#define ROSE_REJ 0x09
+#define ROSE_RESET_REQUEST 0x1B
+#define ROSE_RESET_CONFIRMATION 0x1F
+#define ROSE_REGISTRATION_REQUEST 0xF3
+#define ROSE_REGISTRATION_CONFIRMATION 0xF7
+#define ROSE_RESTART_REQUEST 0xFB
+#define ROSE_RESTART_CONFIRMATION 0xFF
+#define ROSE_DIAGNOSTIC 0xF1
+#define ROSE_ILLEGAL 0xFD
+
+/* Define Link State constants. */
+
+enum {
+ ROSE_STATE_0, /* Ready */
+ ROSE_STATE_1, /* Awaiting Call Accepted */
+ ROSE_STATE_2, /* Awaiting Clear Confirmation */
+ ROSE_STATE_3, /* Data Transfer */
+ ROSE_STATE_4, /* Awaiting Reset Confirmation */
+ ROSE_STATE_5 /* Deferred Call Acceptance */
+};
+
+#define ROSE_DEFAULT_T0 (180 * HZ) /* Default T10 T20 value */
+#define ROSE_DEFAULT_T1 (200 * HZ) /* Default T11 T21 value */
+#define ROSE_DEFAULT_T2 (180 * HZ) /* Default T12 T22 value */
+#define ROSE_DEFAULT_T3 (180 * HZ) /* Default T13 T23 value */
+#define ROSE_DEFAULT_HB (5 * HZ) /* Default Holdback value */
+#define ROSE_DEFAULT_IDLE (0 * 60 * HZ) /* No Activity Timeout - none */
+#define ROSE_DEFAULT_ROUTING 1 /* Default routing flag */
+#define ROSE_DEFAULT_FAIL_TIMEOUT (120 * HZ) /* Time until link considered usable */
+#define ROSE_DEFAULT_MAXVC 50 /* Maximum number of VCs per neighbour */
+#define ROSE_DEFAULT_WINDOW_SIZE 7 /* Default window size */
+
+#define ROSE_MODULUS 8
+#define ROSE_MAX_PACKET_SIZE 251 /* Maximum packet size */
+
+#define ROSE_COND_ACK_PENDING 0x01
+#define ROSE_COND_PEER_RX_BUSY 0x02
+#define ROSE_COND_OWN_RX_BUSY 0x04
+
+#define FAC_NATIONAL 0x00
+#define FAC_CCITT 0x0F
+
+#define FAC_NATIONAL_RAND 0x7F
+#define FAC_NATIONAL_FLAGS 0x3F
+#define FAC_NATIONAL_DEST_DIGI 0xE9
+#define FAC_NATIONAL_SRC_DIGI 0xEB
+#define FAC_NATIONAL_FAIL_CALL 0xED
+#define FAC_NATIONAL_FAIL_ADD 0xEE
+#define FAC_NATIONAL_DIGIS 0xEF
+
+#define FAC_CCITT_DEST_NSAP 0xC9
+#define FAC_CCITT_SRC_NSAP 0xCB
+
+struct rose_neigh {
+ struct rose_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct net_device *dev;
+ unsigned short count;
+ unsigned short use;
+ unsigned int number;
+ char restarted;
+ char dce_mode;
+ char loopback;
+ struct sk_buff_head queue;
+ struct timer_list t0timer;
+ struct timer_list ftimer;
+};
+
+struct rose_node {
+ struct rose_node *next;
+ rose_address address;
+ unsigned short mask;
+ unsigned char count;
+ char loopback;
+ struct rose_neigh *neighbour[3];
+};
+
+struct rose_route {
+ struct rose_route *next;
+ unsigned int lci1, lci2;
+ rose_address src_addr, dest_addr;
+ ax25_address src_call, dest_call;
+ struct rose_neigh *neigh1, *neigh2;
+ unsigned int rand;
+};
+
+struct rose_sock {
+ struct sock sock;
+ rose_address source_addr, dest_addr;
+ ax25_address source_call, dest_call;
+ unsigned char source_ndigis, dest_ndigis;
+ ax25_address source_digis[ROSE_MAX_DIGIS];
+ ax25_address dest_digis[ROSE_MAX_DIGIS];
+ struct rose_neigh *neighbour;
+ struct net_device *device;
+ unsigned int lci, rand;
+ unsigned char state, condition, qbitincl, defer;
+ unsigned char cause, diagnostic;
+ unsigned short vs, vr, va, vl;
+ unsigned long t1, t2, t3, hb, idle;
+#ifdef M_BIT
+ unsigned short fraglen;
+ struct sk_buff_head frag_queue;
+#endif
+ struct sk_buff_head ack_queue;
+ struct rose_facilities_struct facilities;
+ struct timer_list timer;
+ struct timer_list idletimer;
+};
+
+#define rose_sk(sk) ((struct rose_sock *)(sk))
+
+/* af_rose.c */
+extern ax25_address rose_callsign;
+extern int sysctl_rose_restart_request_timeout;
+extern int sysctl_rose_call_request_timeout;
+extern int sysctl_rose_reset_request_timeout;
+extern int sysctl_rose_clear_request_timeout;
+extern int sysctl_rose_no_activity_timeout;
+extern int sysctl_rose_ack_hold_back_timeout;
+extern int sysctl_rose_routing_control;
+extern int sysctl_rose_link_fail_timeout;
+extern int sysctl_rose_maximum_vcs;
+extern int sysctl_rose_window_size;
+extern int rosecmp(rose_address *, rose_address *);
+extern int rosecmpm(rose_address *, rose_address *, unsigned short);
+extern const char *rose2asc(const rose_address *);
+extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+extern void rose_kill_by_neigh(struct rose_neigh *);
+extern unsigned int rose_new_lci(struct rose_neigh *);
+extern int rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
+extern void rose_destroy_socket(struct sock *);
+
+/* rose_dev.c */
+extern void rose_setup(struct net_device *);
+
+/* rose_in.c */
+extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* rose_link.c */
+extern void rose_start_ftimer(struct rose_neigh *);
+extern void rose_stop_ftimer(struct rose_neigh *);
+extern void rose_stop_t0timer(struct rose_neigh *);
+extern int rose_ftimer_running(struct rose_neigh *);
+extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
+extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
+extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+
+/* rose_loopback.c */
+extern void rose_loopback_init(void);
+extern void rose_loopback_clear(void);
+extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+
+/* rose_out.c */
+extern void rose_kick(struct sock *);
+extern void rose_enquiry_response(struct sock *);
+
+/* rose_route.c */
+extern struct rose_neigh *rose_loopback_neigh;
+extern struct file_operations rose_neigh_fops;
+extern struct file_operations rose_nodes_fops;
+extern struct file_operations rose_routes_fops;
+
+extern int rose_add_loopback_neigh(void);
+extern int rose_add_loopback_node(rose_address *);
+extern void rose_del_loopback_node(rose_address *);
+extern void rose_rt_device_down(struct net_device *);
+extern void rose_link_device_down(struct net_device *);
+extern struct net_device *rose_dev_first(void);
+extern struct net_device *rose_dev_get(rose_address *);
+extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *);
+extern int rose_rt_ioctl(unsigned int, void __user *);
+extern void rose_link_failed(ax25_cb *, int);
+extern int rose_route_frame(struct sk_buff *, ax25_cb *);
+extern void rose_rt_free(void);
+
+/* rose_subr.c */
+extern void rose_clear_queues(struct sock *);
+extern void rose_frames_acked(struct sock *, unsigned short);
+extern void rose_requeue_frames(struct sock *);
+extern int rose_validate_nr(struct sock *, unsigned short);
+extern void rose_write_internal(struct sock *, int);
+extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
+extern void rose_disconnect(struct sock *, int, int, int);
+
+/* rose_timer.c */
+extern void rose_start_heartbeat(struct sock *);
+extern void rose_start_t1timer(struct sock *);
+extern void rose_start_t2timer(struct sock *);
+extern void rose_start_t3timer(struct sock *);
+extern void rose_start_hbtimer(struct sock *);
+extern void rose_start_idletimer(struct sock *);
+extern void rose_stop_heartbeat(struct sock *);
+extern void rose_stop_timer(struct sock *);
+extern void rose_stop_idletimer(struct sock *);
+
+/* sysctl_net_rose.c */
+extern void rose_register_sysctl(void);
+extern void rose_unregister_sysctl(void);
+
+#endif
diff --git a/include/net/route.h b/include/net/route.h
new file mode 100644
index 000000000000..22da7579d5de
--- /dev/null
+++ b/include/net/route.h
@@ -0,0 +1,205 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP router.
+ *
+ * Version: @(#)route.h 1.0.4 05/27/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Fixes:
+ * Alan Cox : Reformatted. Added ip_rt_local()
+ * Alan Cox : Support for TCP parameters.
+ * Alexey Kuznetsov: Major changes for new routing code.
+ * Mike McLagan : Routing by source
+ * Robert Olsson : Added rt_cache statistics
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ROUTE_H
+#define _ROUTE_H
+
+#include <linux/config.h>
+#include <net/dst.h>
+#include <net/inetpeer.h>
+#include <net/flow.h>
+#include <linux/in_route.h>
+#include <linux/rtnetlink.h>
+#include <linux/route.h>
+#include <linux/ip.h>
+#include <linux/cache.h>
+
+#ifndef __KERNEL__
+#warning This file is not supposed to be used outside of kernel.
+#endif
+
+#define RTO_ONLINK 0x01
+
+#define RTO_CONN 0
+/* RTO_CONN is not used (being alias for 0), but preserved not to break
+ * some modules referring to it. */
+
+#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+
+struct fib_nh;
+struct inet_peer;
+struct rtable
+{
+ union
+ {
+ struct dst_entry dst;
+ struct rtable *rt_next;
+ } u;
+
+ struct in_device *idev;
+
+ unsigned rt_flags;
+ __u16 rt_type;
+ __u16 rt_multipath_alg;
+
+ __u32 rt_dst; /* Path destination */
+ __u32 rt_src; /* Path source */
+ int rt_iif;
+
+ /* Info on neighbour */
+ __u32 rt_gateway;
+
+ /* Cache lookup keys */
+ struct flowi fl;
+
+ /* Miscellaneous cached information */
+ __u32 rt_spec_dst; /* RFC1122 specific destination */
+ struct inet_peer *peer; /* long-living peer info */
+};
+
+struct ip_rt_acct
+{
+ __u32 o_bytes;
+ __u32 o_packets;
+ __u32 i_bytes;
+ __u32 i_packets;
+};
+
+struct rt_cache_stat
+{
+ unsigned int in_hit;
+ unsigned int in_slow_tot;
+ unsigned int in_slow_mc;
+ unsigned int in_no_route;
+ unsigned int in_brd;
+ unsigned int in_martian_dst;
+ unsigned int in_martian_src;
+ unsigned int out_hit;
+ unsigned int out_slow_tot;
+ unsigned int out_slow_mc;
+ unsigned int gc_total;
+ unsigned int gc_ignored;
+ unsigned int gc_goal_miss;
+ unsigned int gc_dst_overflow;
+ unsigned int in_hlist_search;
+ unsigned int out_hlist_search;
+};
+
+extern struct rt_cache_stat *rt_cache_stat;
+#define RT_CACHE_STAT_INC(field) \
+ (per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++)
+
+extern struct ip_rt_acct *ip_rt_acct;
+
+struct in_device;
+extern int ip_rt_init(void);
+extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw,
+ u32 src, u8 tos, struct net_device *dev);
+extern void ip_rt_advice(struct rtable **rp, int advice);
+extern void rt_cache_flush(int how);
+extern int __ip_route_output_key(struct rtable **, const struct flowi *flp);
+extern int ip_route_output_key(struct rtable **, struct flowi *flp);
+extern int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
+extern int ip_route_input(struct sk_buff*, u32 dst, u32 src, u8 tos, struct net_device *devin);
+extern unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu);
+extern void ip_rt_send_redirect(struct sk_buff *skb);
+
+extern unsigned inet_addr_type(u32 addr);
+extern void ip_rt_multicast_event(struct in_device *);
+extern int ip_rt_ioctl(unsigned int cmd, void __user *arg);
+extern void ip_rt_get_source(u8 *src, struct rtable *rt);
+extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb);
+
+static inline void ip_rt_put(struct rtable * rt)
+{
+ if (rt)
+ dst_release(&rt->u.dst);
+}
+
+#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
+
+extern __u8 ip_tos2prio[16];
+
+static inline char rt_tos2priority(u8 tos)
+{
+ return ip_tos2prio[IPTOS_TOS(tos)>>1];
+}
+
+static inline int ip_route_connect(struct rtable **rp, u32 dst,
+ u32 src, u32 tos, int oif, u8 protocol,
+ u16 sport, u16 dport, struct sock *sk)
+{
+ struct flowi fl = { .oif = oif,
+ .nl_u = { .ip4_u = { .daddr = dst,
+ .saddr = src,
+ .tos = tos } },
+ .proto = protocol,
+ .uli_u = { .ports =
+ { .sport = sport,
+ .dport = dport } } };
+
+ int err;
+ if (!dst || !src) {
+ err = __ip_route_output_key(rp, &fl);
+ if (err)
+ return err;
+ fl.fl4_dst = (*rp)->rt_dst;
+ fl.fl4_src = (*rp)->rt_src;
+ ip_rt_put(*rp);
+ *rp = NULL;
+ }
+ return ip_route_output_flow(rp, &fl, sk, 0);
+}
+
+static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport,
+ struct sock *sk)
+{
+ if (sport != (*rp)->fl.fl_ip_sport ||
+ dport != (*rp)->fl.fl_ip_dport) {
+ struct flowi fl;
+
+ memcpy(&fl, &(*rp)->fl, sizeof(fl));
+ fl.fl_ip_sport = sport;
+ fl.fl_ip_dport = dport;
+#if defined(CONFIG_IP_ROUTE_MULTIPATH_CACHED)
+ fl.flags |= FLOWI_FLAG_MULTIPATHOLDROUTE;
+#endif
+ ip_rt_put(*rp);
+ *rp = NULL;
+ return ip_route_output_flow(rp, &fl, sk, 0);
+ }
+ return 0;
+}
+
+extern void rt_bind_peer(struct rtable *rt, int create);
+
+static inline struct inet_peer *rt_get_peer(struct rtable *rt)
+{
+ if (rt->peer)
+ return rt->peer;
+
+ rt_bind_peer(rt, 0);
+ return rt->peer;
+}
+
+#endif /* _ROUTE_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
new file mode 100644
index 000000000000..c57504b3b518
--- /dev/null
+++ b/include/net/sch_generic.h
@@ -0,0 +1,175 @@
+#ifndef __NET_SCHED_GENERIC_H
+#define __NET_SCHED_GENERIC_H
+
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/pkt_cls.h>
+#include <net/gen_stats.h>
+
+struct Qdisc_ops;
+struct qdisc_walker;
+struct tcf_walker;
+struct module;
+
+struct qdisc_rate_table
+{
+ struct tc_ratespec rate;
+ u32 data[256];
+ struct qdisc_rate_table *next;
+ int refcnt;
+};
+
+struct Qdisc
+{
+ int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
+ struct sk_buff * (*dequeue)(struct Qdisc *dev);
+ unsigned flags;
+#define TCQ_F_BUILTIN 1
+#define TCQ_F_THROTTLED 2
+#define TCQ_F_INGRESS 4
+ int padded;
+ struct Qdisc_ops *ops;
+ u32 handle;
+ u32 parent;
+ atomic_t refcnt;
+ struct sk_buff_head q;
+ struct net_device *dev;
+ struct list_head list;
+
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_rate_est rate_est;
+ spinlock_t *stats_lock;
+ struct rcu_head q_rcu;
+ int (*reshape_fail)(struct sk_buff *skb,
+ struct Qdisc *q);
+
+ /* This field is deprecated, but it is still used by CBQ
+ * and it will live until better solution will be invented.
+ */
+ struct Qdisc *__parent;
+};
+
+struct Qdisc_class_ops
+{
+ /* Child qdisc manipulation */
+ int (*graft)(struct Qdisc *, unsigned long cl,
+ struct Qdisc *, struct Qdisc **);
+ struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
+
+ /* Class manipulation routines */
+ unsigned long (*get)(struct Qdisc *, u32 classid);
+ void (*put)(struct Qdisc *, unsigned long);
+ int (*change)(struct Qdisc *, u32, u32,
+ struct rtattr **, unsigned long *);
+ int (*delete)(struct Qdisc *, unsigned long);
+ void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
+
+ /* Filter manipulation */
+ struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
+ u32 classid);
+ void (*unbind_tcf)(struct Qdisc *, unsigned long);
+
+ /* rtnetlink specific */
+ int (*dump)(struct Qdisc *, unsigned long,
+ struct sk_buff *skb, struct tcmsg*);
+ int (*dump_stats)(struct Qdisc *, unsigned long,
+ struct gnet_dump *);
+};
+
+struct Qdisc_ops
+{
+ struct Qdisc_ops *next;
+ struct Qdisc_class_ops *cl_ops;
+ char id[IFNAMSIZ];
+ int priv_size;
+
+ int (*enqueue)(struct sk_buff *, struct Qdisc *);
+ struct sk_buff * (*dequeue)(struct Qdisc *);
+ int (*requeue)(struct sk_buff *, struct Qdisc *);
+ unsigned int (*drop)(struct Qdisc *);
+
+ int (*init)(struct Qdisc *, struct rtattr *arg);
+ void (*reset)(struct Qdisc *);
+ void (*destroy)(struct Qdisc *);
+ int (*change)(struct Qdisc *, struct rtattr *arg);
+
+ int (*dump)(struct Qdisc *, struct sk_buff *);
+ int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
+
+ struct module *owner;
+};
+
+
+struct tcf_result
+{
+ unsigned long class;
+ u32 classid;
+};
+
+struct tcf_proto_ops
+{
+ struct tcf_proto_ops *next;
+ char kind[IFNAMSIZ];
+
+ int (*classify)(struct sk_buff*, struct tcf_proto*,
+ struct tcf_result *);
+ int (*init)(struct tcf_proto*);
+ void (*destroy)(struct tcf_proto*);
+
+ unsigned long (*get)(struct tcf_proto*, u32 handle);
+ void (*put)(struct tcf_proto*, unsigned long);
+ int (*change)(struct tcf_proto*, unsigned long,
+ u32 handle, struct rtattr **,
+ unsigned long *);
+ int (*delete)(struct tcf_proto*, unsigned long);
+ void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+
+ /* rtnetlink specific */
+ int (*dump)(struct tcf_proto*, unsigned long,
+ struct sk_buff *skb, struct tcmsg*);
+
+ struct module *owner;
+};
+
+struct tcf_proto
+{
+ /* Fast access part */
+ struct tcf_proto *next;
+ void *root;
+ int (*classify)(struct sk_buff*, struct tcf_proto*,
+ struct tcf_result *);
+ u32 protocol;
+
+ /* All the rest */
+ u32 prio;
+ u32 classid;
+ struct Qdisc *q;
+ void *data;
+ struct tcf_proto_ops *ops;
+};
+
+
+extern void qdisc_lock_tree(struct net_device *dev);
+extern void qdisc_unlock_tree(struct net_device *dev);
+
+#define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
+#define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
+#define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
+#define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
+
+static inline void
+tcf_destroy(struct tcf_proto *tp)
+{
+ tp->ops->destroy(tp);
+ module_put(tp->ops->owner);
+ kfree(tp);
+}
+
+#endif
diff --git a/include/net/scm.h b/include/net/scm.h
new file mode 100644
index 000000000000..c3fa3d5ab606
--- /dev/null
+++ b/include/net/scm.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_NET_SCM_H
+#define __LINUX_NET_SCM_H
+
+#include <linux/limits.h>
+#include <linux/net.h>
+
+/* Well, we should have at least one descriptor open
+ * to accept passed FDs 8)
+ */
+#define SCM_MAX_FD (OPEN_MAX-1)
+
+struct scm_fp_list
+{
+ int count;
+ struct file *fp[SCM_MAX_FD];
+};
+
+struct scm_cookie
+{
+ struct ucred creds; /* Skb credentials */
+ struct scm_fp_list *fp; /* Passed files */
+ unsigned long seq; /* Connection seqno */
+};
+
+extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+extern void __scm_destroy(struct scm_cookie *scm);
+extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl);
+
+static __inline__ void scm_destroy(struct scm_cookie *scm)
+{
+ if (scm && scm->fp)
+ __scm_destroy(scm);
+}
+
+static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm)
+{
+ memset(scm, 0, sizeof(*scm));
+ scm->creds.uid = current->uid;
+ scm->creds.gid = current->gid;
+ scm->creds.pid = current->tgid;
+ if (msg->msg_controllen <= 0)
+ return 0;
+ return __scm_send(sock, msg, scm);
+}
+
+static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
+{
+ if (!msg->msg_control)
+ {
+ if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
+ msg->msg_flags |= MSG_CTRUNC;
+ scm_destroy(scm);
+ return;
+ }
+
+ if (test_bit(SOCK_PASSCRED, &sock->flags))
+ put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+
+ if (!scm->fp)
+ return;
+
+ scm_detach_fds(msg, scm);
+}
+
+
+#endif /* __LINUX_NET_SCM_H */
+
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
new file mode 100644
index 000000000000..ebc5282e6d58
--- /dev/null
+++ b/include/net/sctp/command.h
@@ -0,0 +1,211 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (C) 1999-2001 Cisco, Motorola
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * These are the definitions needed for the command object.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * the SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to one of the
+ * following email addresses:
+ *
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+
+#ifndef __net_sctp_command_h__
+#define __net_sctp_command_h__
+
+#include <net/sctp/constants.h>
+#include <net/sctp/structs.h>
+
+
+typedef enum {
+ SCTP_CMD_NOP = 0, /* Do nothing. */
+ SCTP_CMD_NEW_ASOC, /* Register a new association. */
+ SCTP_CMD_DELETE_TCB, /* Delete the current association. */
+ SCTP_CMD_NEW_STATE, /* Enter a new state. */
+ SCTP_CMD_REPORT_TSN, /* Record the arrival of a TSN. */
+ SCTP_CMD_GEN_SACK, /* Send a Selective ACK (maybe). */
+ SCTP_CMD_PROCESS_SACK, /* Process an inbound SACK. */
+ SCTP_CMD_GEN_INIT_ACK, /* Generate an INIT ACK chunk. */
+ SCTP_CMD_PEER_INIT, /* Process a INIT from the peer. */
+ SCTP_CMD_GEN_COOKIE_ECHO, /* Generate a COOKIE ECHO chunk. */
+ SCTP_CMD_CHUNK_ULP, /* Send a chunk to the sockets layer. */
+ SCTP_CMD_EVENT_ULP, /* Send a notification to the sockets layer. */
+ SCTP_CMD_REPLY, /* Send a chunk to our peer. */
+ SCTP_CMD_SEND_PKT, /* Send a full packet to our peer. */
+ SCTP_CMD_RETRAN, /* Mark a transport for retransmission. */
+ SCTP_CMD_ECN_CE, /* Do delayed CE processing. */
+ SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
+ SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
+ SCTP_CMD_TIMER_START, /* Start a timer. */
+ SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
+ SCTP_CMD_TIMER_STOP, /* Stop a timer. */
+ SCTP_CMD_COUNTER_RESET, /* Reset a counter. */
+ SCTP_CMD_COUNTER_INC, /* Increment a counter. */
+ SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
+ SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
+ SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
+ SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
+ SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
+ SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
+ SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
+ SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
+ SCTP_CMD_TRANSPORT_RESET, /* Reset the status of a transport. */
+ SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
+ SCTP_CMD_REPORT_ERROR, /* Pass this error back out of the sm. */
+ SCTP_CMD_REPORT_BAD_TAG, /* Verification tags didn't match. */
+ SCTP_CMD_PROCESS_CTSN, /* Sideeffect from shutdown. */
+ SCTP_CMD_ASSOC_FAILED, /* Handle association failure. */
+ SCTP_CMD_DISCARD_PACKET, /* Discard the whole packet. */
+ SCTP_CMD_GEN_SHUTDOWN, /* Generate a SHUTDOWN chunk. */
+ SCTP_CMD_UPDATE_ASSOC, /* Update association information. */
+ SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
+ SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
+ SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
+ SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
+ SCTP_CMD_RENEGE, /* Renege data on an association. */
+ SCTP_CMD_SETUP_T4, /* ADDIP, setup T4 RTO timer parms. */
+ SCTP_CMD_PROCESS_OPERR, /* Process an ERROR chunk. */
+ SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
+ SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
+ SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
+ SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
+ SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
+ SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
+ SCTP_CMD_LAST
+} sctp_verb_t;
+
+#define SCTP_CMD_MAX (SCTP_CMD_LAST - 1)
+#define SCTP_CMD_NUM_VERBS (SCTP_CMD_MAX + 1)
+
+/* How many commands can you put in an sctp_cmd_seq_t?
+ * This is a rather arbitrary number, ideally derived from a careful
+ * analysis of the state functions, but in reality just taken from
+ * thin air in the hopes othat we don't trigger a kernel panic.
+ */
+#define SCTP_MAX_NUM_COMMANDS 14
+
+typedef union {
+ __s32 i32;
+ __u32 u32;
+ __u16 u16;
+ __u8 u8;
+ int error;
+ sctp_state_t state;
+ sctp_event_timeout_t to;
+ sctp_counter_t counter;
+ void *ptr;
+ struct sctp_chunk *chunk;
+ struct sctp_association *asoc;
+ struct sctp_transport *transport;
+ struct sctp_bind_addr *bp;
+ sctp_init_chunk_t *init;
+ struct sctp_ulpevent *ulpevent;
+ struct sctp_packet *packet;
+ sctp_sackhdr_t *sackh;
+} sctp_arg_t;
+
+/* We are simulating ML type constructors here.
+ *
+ * SCTP_ARG_CONSTRUCTOR(NAME, TYPE, ELT) builds a function called
+ * SCTP_NAME() which takes an argument of type TYPE and returns an
+ * sctp_arg_t. It does this by inserting the sole argument into the
+ * ELT union element of a local sctp_arg_t.
+ *
+ * E.g., SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) builds SCTP_I32(arg),
+ * which takes an __s32 and returns a sctp_arg_t containing the
+ * __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg.
+ */
+static inline sctp_arg_t SCTP_NULL(void)
+{
+ sctp_arg_t retval; retval.ptr = NULL; return retval;
+}
+static inline sctp_arg_t SCTP_NOFORCE(void)
+{
+ sctp_arg_t retval; retval.i32 = 0; return retval;
+}
+static inline sctp_arg_t SCTP_FORCE(void)
+{
+ sctp_arg_t retval; retval.i32 = 1; return retval;
+}
+
+#define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
+static inline sctp_arg_t \
+SCTP_## name (type arg) \
+{ sctp_arg_t retval; retval.elt = arg; return retval; }
+
+SCTP_ARG_CONSTRUCTOR(I32, __s32, i32)
+SCTP_ARG_CONSTRUCTOR(U32, __u32, u32)
+SCTP_ARG_CONSTRUCTOR(U16, __u16, u16)
+SCTP_ARG_CONSTRUCTOR(U8, __u8, u8)
+SCTP_ARG_CONSTRUCTOR(ERROR, int, error)
+SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state)
+SCTP_ARG_CONSTRUCTOR(COUNTER, sctp_counter_t, counter)
+SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to)
+SCTP_ARG_CONSTRUCTOR(PTR, void *, ptr)
+SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk)
+SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc)
+SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
+SCTP_ARG_CONSTRUCTOR(BA, struct sctp_bind_addr *, bp)
+SCTP_ARG_CONSTRUCTOR(PEER_INIT, sctp_init_chunk_t *, init)
+SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
+SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet)
+SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh)
+
+typedef struct {
+ sctp_arg_t obj;
+ sctp_verb_t verb;
+} sctp_cmd_t;
+
+typedef struct {
+ sctp_cmd_t cmds[SCTP_MAX_NUM_COMMANDS];
+ __u8 next_free_slot;
+ __u8 next_cmd;
+} sctp_cmd_seq_t;
+
+
+/* Initialize a block of memory as a command sequence.
+ * Return 0 if the initialization fails.
+ */
+int sctp_init_cmd_seq(sctp_cmd_seq_t *seq);
+
+/* Add a command to an sctp_cmd_seq_t.
+ * Return 0 if the command sequence is full.
+ *
+ * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
+ * to wrap data which goes in the obj argument.
+ */
+int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj);
+
+/* Return the next command structure in an sctp_cmd_seq.
+ * Return NULL at the end of the sequence.
+ */
+sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq);
+
+#endif /* __net_sctp_command_h__ */
+
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
new file mode 100644
index 000000000000..2b76c0f4babc
--- /dev/null
+++ b/include/net/sctp/constants.h
@@ -0,0 +1,432 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Randall Stewart <randall@stewart.chicago.il.us>
+ * Ken Morneau <kmorneau@cisco.com>
+ * Qiaobing Xie <qxie1@motorola.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Sridhar Samudrala <samudrala@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#ifndef __sctp_constants_h__
+#define __sctp_constants_h__
+
+#include <linux/tcp.h> /* For TCP states used in sctp_sock_state_t */
+#include <linux/sctp.h>
+#include <linux/ipv6.h> /* For ipv6hdr. */
+#include <net/sctp/user.h>
+
+/* Value used for stream negotiation. */
+enum { SCTP_MAX_STREAM = 0xffff };
+enum { SCTP_DEFAULT_OUTSTREAMS = 10 };
+enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
+
+/* Since CIDs are sparse, we need all four of the following
+ * symbols. CIDs are dense through SCTP_CID_BASE_MAX.
+ */
+#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE
+#define SCTP_CID_MAX SCTP_CID_ASCONF_ACK
+
+#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1)
+#define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNKTYPES + 2)
+
+#define SCTP_NUM_ADDIP_CHUNK_TYPES 2
+
+#define SCTP_NUM_PRSCTP_CHUNK_TYPES 1
+
+/* These are the different flavours of event. */
+typedef enum {
+
+ SCTP_EVENT_T_CHUNK = 1,
+ SCTP_EVENT_T_TIMEOUT,
+ SCTP_EVENT_T_OTHER,
+ SCTP_EVENT_T_PRIMITIVE
+
+} sctp_event_t;
+
+#define SCTP_EVENT_T_MAX SCTP_EVENT_T_PRIMITIVE
+#define SCTP_EVENT_T_NUM (SCTP_EVENT_T_MAX + 1)
+
+/* As a convenience for the state machine, we append SCTP_EVENT_* and
+ * SCTP_ULP_* to the list of possible chunks.
+ */
+
+typedef enum {
+ SCTP_EVENT_TIMEOUT_NONE = 0,
+ SCTP_EVENT_TIMEOUT_T1_COOKIE,
+ SCTP_EVENT_TIMEOUT_T1_INIT,
+ SCTP_EVENT_TIMEOUT_T2_SHUTDOWN,
+ SCTP_EVENT_TIMEOUT_T3_RTX,
+ SCTP_EVENT_TIMEOUT_T4_RTO,
+ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
+ SCTP_EVENT_TIMEOUT_HEARTBEAT,
+ SCTP_EVENT_TIMEOUT_SACK,
+ SCTP_EVENT_TIMEOUT_AUTOCLOSE,
+} sctp_event_timeout_t;
+
+#define SCTP_EVENT_TIMEOUT_MAX SCTP_EVENT_TIMEOUT_AUTOCLOSE
+#define SCTP_NUM_TIMEOUT_TYPES (SCTP_EVENT_TIMEOUT_MAX + 1)
+
+typedef enum {
+ SCTP_EVENT_NO_PENDING_TSN = 0,
+ SCTP_EVENT_ICMP_PROTO_UNREACH,
+} sctp_event_other_t;
+
+#define SCTP_EVENT_OTHER_MAX SCTP_EVENT_ICMP_PROTO_UNREACH
+#define SCTP_NUM_OTHER_TYPES (SCTP_EVENT_OTHER_MAX + 1)
+
+/* These are primitive requests from the ULP. */
+typedef enum {
+ SCTP_PRIMITIVE_ASSOCIATE = 0,
+ SCTP_PRIMITIVE_SHUTDOWN,
+ SCTP_PRIMITIVE_ABORT,
+ SCTP_PRIMITIVE_SEND,
+ SCTP_PRIMITIVE_REQUESTHEARTBEAT,
+ SCTP_PRIMITIVE_ASCONF,
+} sctp_event_primitive_t;
+
+#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_ASCONF
+#define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1)
+
+/* We define here a utility type for manipulating subtypes.
+ * The subtype constructors all work like this:
+ *
+ * sctp_subtype_t foo = SCTP_ST_CHUNK(SCTP_CID_INIT);
+ */
+
+typedef union {
+ sctp_cid_t chunk;
+ sctp_event_timeout_t timeout;
+ sctp_event_other_t other;
+ sctp_event_primitive_t primitive;
+} sctp_subtype_t;
+
+#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
+static inline sctp_subtype_t \
+SCTP_ST_## _name (_type _arg) \
+{ sctp_subtype_t _retval; _retval._elt = _arg; return _retval; }
+
+SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, sctp_cid_t, chunk)
+SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, sctp_event_timeout_t, timeout)
+SCTP_SUBTYPE_CONSTRUCTOR(OTHER, sctp_event_other_t, other)
+SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
+
+
+#define sctp_chunk_is_control(a) (a->chunk_hdr->type != SCTP_CID_DATA)
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
+
+/* Calculate the actual data size in a data chunk */
+#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\
+ - (unsigned long)(c->chunk_hdr)\
+ - sizeof(sctp_data_chunk_t)))
+
+#define SCTP_MAX_ERROR_CAUSE SCTP_ERROR_NONEXIST_IP
+#define SCTP_NUM_ERROR_CAUSE 10
+
+/* Internal error codes */
+typedef enum {
+
+ SCTP_IERROR_NO_ERROR = 0,
+ SCTP_IERROR_BASE = 1000,
+ SCTP_IERROR_NO_COOKIE,
+ SCTP_IERROR_BAD_SIG,
+ SCTP_IERROR_STALE_COOKIE,
+ SCTP_IERROR_NOMEM,
+ SCTP_IERROR_MALFORMED,
+ SCTP_IERROR_BAD_TAG,
+ SCTP_IERROR_BIG_GAP,
+ SCTP_IERROR_DUP_TSN,
+ SCTP_IERROR_HIGH_TSN,
+ SCTP_IERROR_IGNORE_TSN,
+ SCTP_IERROR_NO_DATA,
+ SCTP_IERROR_BAD_STREAM,
+ SCTP_IERROR_BAD_PORTS,
+
+} sctp_ierror_t;
+
+
+
+/* SCTP state defines for internal state machine */
+typedef enum {
+
+ SCTP_STATE_EMPTY = 0,
+ SCTP_STATE_CLOSED = 1,
+ SCTP_STATE_COOKIE_WAIT = 2,
+ SCTP_STATE_COOKIE_ECHOED = 3,
+ SCTP_STATE_ESTABLISHED = 4,
+ SCTP_STATE_SHUTDOWN_PENDING = 5,
+ SCTP_STATE_SHUTDOWN_SENT = 6,
+ SCTP_STATE_SHUTDOWN_RECEIVED = 7,
+ SCTP_STATE_SHUTDOWN_ACK_SENT = 8,
+
+} sctp_state_t;
+
+#define SCTP_STATE_MAX SCTP_STATE_SHUTDOWN_ACK_SENT
+#define SCTP_STATE_NUM_STATES (SCTP_STATE_MAX + 1)
+
+/* These are values for sk->state.
+ * For a UDP-style SCTP socket, the states are defined as follows
+ * - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
+ * accept new associations, but it can initiate the creation of new ones.
+ * - A socket in SCTP_SS_LISTENING state indicates that it is willing to
+ * accept new associations and can initiate the creation of new ones.
+ * - A socket in SCTP_SS_ESTABLISHED state indicates that it is a peeled off
+ * socket with one association.
+ * For a TCP-style SCTP socket, the states are defined as follows
+ * - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
+ * accept new associations, but it can initiate the creation of new ones.
+ * - A socket in SCTP_SS_LISTENING state indicates that it is willing to
+ * accept new associations, but cannot initiate the creation of new ones.
+ * - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single
+ * association.
+ */
+typedef enum {
+ SCTP_SS_CLOSED = TCP_CLOSE,
+ SCTP_SS_LISTENING = TCP_LISTEN,
+ SCTP_SS_ESTABLISHING = TCP_SYN_SENT,
+ SCTP_SS_ESTABLISHED = TCP_ESTABLISHED,
+ SCTP_SS_DISCONNECTING = TCP_CLOSING,
+} sctp_sock_state_t;
+
+/* These functions map various type to printable names. */
+const char *sctp_cname(const sctp_subtype_t); /* chunk types */
+const char *sctp_oname(const sctp_subtype_t); /* other events */
+const char *sctp_tname(const sctp_subtype_t); /* timeouts */
+const char *sctp_pname(const sctp_subtype_t); /* primitives */
+
+/* This is a table of printable names of sctp_state_t's. */
+extern const char *sctp_state_tbl[], *sctp_evttype_tbl[], *sctp_status_tbl[];
+
+/* Maximum chunk length considering padding requirements. */
+enum { SCTP_MAX_CHUNK_LEN = ((1<<16) - sizeof(__u32)) };
+
+/* Encourage Cookie-Echo bundling by pre-fragmenting chunks a little
+ * harder (until reaching ESTABLISHED state).
+ */
+enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
+
+/* Guess at how big to make the TSN mapping array.
+ * We guarantee that we can handle at least this big a gap between the
+ * cumulative ACK and the highest TSN. In practice, we can often
+ * handle up to twice this value.
+ *
+ * NEVER make this more than 32767 (2^15-1). The Gap Ack Blocks in a
+ * SACK (see section 3.3.4) are only 16 bits, so 2*SCTP_TSN_MAP_SIZE
+ * must be less than 65535 (2^16 - 1), or we will have overflow
+ * problems creating SACK's.
+ */
+#define SCTP_TSN_MAP_SIZE 2048
+#define SCTP_TSN_MAX_GAP 65535
+
+/* We will not record more than this many duplicate TSNs between two
+ * SACKs. The minimum PMTU is 576. Remove all the headers and there
+ * is enough room for 131 duplicate reports. Round down to the
+ * nearest power of 2.
+ */
+enum { SCTP_MIN_PMTU = 576 };
+enum { SCTP_MAX_DUP_TSNS = 16 };
+enum { SCTP_MAX_GABS = 16 };
+
+typedef enum {
+ SCTP_COUNTER_INIT_ERROR,
+} sctp_counter_t;
+
+/* How many counters does an association need? */
+#define SCTP_NUMBER_COUNTERS 5
+
+/* Here we define the default timers. */
+
+/* cookie timer def = ? seconds */
+#define SCTP_DEFAULT_TIMEOUT_T1_COOKIE (3 * HZ)
+
+/* init timer def = 3 seconds */
+#define SCTP_DEFAULT_TIMEOUT_T1_INIT (3 * HZ)
+
+/* shutdown timer def = 300 ms */
+#define SCTP_DEFAULT_TIMEOUT_T2_SHUTDOWN ((300 * HZ) / 1000)
+
+/* 0 seconds + RTO */
+#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (10 * HZ)
+
+/* recv timer def = 200ms (in usec) */
+#define SCTP_DEFAULT_TIMEOUT_SACK ((200 * HZ) / 1000)
+#define SCTP_DEFAULT_TIMEOUT_SACK_MAX ((500 * HZ) / 1000) /* 500 ms */
+
+/* RTO.Initial - 3 seconds
+ * RTO.Min - 1 second
+ * RTO.Max - 60 seconds
+ * RTO.Alpha - 1/8
+ * RTO.Beta - 1/4
+ */
+#define SCTP_RTO_INITIAL (3 * HZ)
+#define SCTP_RTO_MIN (1 * HZ)
+#define SCTP_RTO_MAX (60 * HZ)
+
+#define SCTP_RTO_ALPHA 3 /* 1/8 when converted to right shifts. */
+#define SCTP_RTO_BETA 2 /* 1/4 when converted to right shifts. */
+
+/* Maximum number of new data packets that can be sent in a burst. */
+#define SCTP_MAX_BURST 4
+
+#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */
+
+#define SCTP_DEF_MAX_INIT 6
+#define SCTP_DEF_MAX_SEND 10
+
+#define SCTP_DEFAULT_COOKIE_LIFE_SEC 60 /* seconds */
+#define SCTP_DEFAULT_COOKIE_LIFE_USEC 0 /* microseconds */
+
+#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */
+#define SCTP_DEFAULT_MAXWINDOW 65535 /* default rwnd size */
+#define SCTP_DEFAULT_MAXSEGMENT 1500 /* MTU size, this is the limit
+ * to which we will raise the P-MTU.
+ */
+#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
+#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */
+#define SCTP_HOW_LONG_COOKIE_LIVE 3600 /* How many seconds the current
+ * secret will live?
+ */
+#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
+
+#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
+
+#define SCTP_COOKIE_MULTIPLE 32 /* Pad out our cookie to make our hash
+ * functions simpler to write.
+ */
+
+#if defined (CONFIG_SCTP_HMAC_MD5)
+#define SCTP_COOKIE_HMAC_ALG "md5"
+#elif defined (CONFIG_SCTP_HMAC_SHA1)
+#define SCTP_COOKIE_HMAC_ALG "sha1"
+#else
+#define SCTP_COOKIE_HMAC_ALG NULL
+#endif
+
+/* These return values describe the success or failure of a number of
+ * routines which form the lower interface to SCTP_outqueue.
+ */
+typedef enum {
+ SCTP_XMIT_OK,
+ SCTP_XMIT_PMTU_FULL,
+ SCTP_XMIT_RWND_FULL,
+ SCTP_XMIT_NAGLE_DELAY,
+} sctp_xmit_t;
+
+/* These are the commands for manipulating transports. */
+typedef enum {
+ SCTP_TRANSPORT_UP,
+ SCTP_TRANSPORT_DOWN,
+} sctp_transport_cmd_t;
+
+/* These are the address scopes defined mainly for IPv4 addresses
+ * based on draft of SCTP IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
+ * These scopes are hopefully generic enough to be used on scoping both
+ * IPv4 and IPv6 addresses in SCTP.
+ * At this point, the IPv6 scopes will be mapped to these internal scopes
+ * as much as possible.
+ */
+typedef enum {
+ SCTP_SCOPE_GLOBAL, /* IPv4 global addresses */
+ SCTP_SCOPE_PRIVATE, /* IPv4 private addresses */
+ SCTP_SCOPE_LINK, /* IPv4 link local address */
+ SCTP_SCOPE_LOOPBACK, /* IPv4 loopback address */
+ SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */
+} sctp_scope_t;
+
+/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
+ * 192.88.99.0/24.
+ * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
+ * addresses.
+ */
+#define IS_IPV4_UNUSABLE_ADDRESS(a) \
+ ((INADDR_BROADCAST == *a) || \
+ (MULTICAST(*a)) || \
+ (((unsigned char *)(a))[0] == 0) || \
+ ((((unsigned char *)(a))[0] == 198) && \
+ (((unsigned char *)(a))[1] == 18) && \
+ (((unsigned char *)(a))[2] == 0)) || \
+ ((((unsigned char *)(a))[0] == 192) && \
+ (((unsigned char *)(a))[1] == 88) && \
+ (((unsigned char *)(a))[2] == 99)))
+
+/* IPv4 Link-local addresses: 169.254.0.0/16. */
+#define IS_IPV4_LINK_ADDRESS(a) \
+ ((((unsigned char *)(a))[0] == 169) && \
+ (((unsigned char *)(a))[1] == 254))
+
+/* RFC 1918 "Address Allocation for Private Internets" defines the IPv4
+ * private address space as the following:
+ *
+ * 10.0.0.0 - 10.255.255.255 (10/8 prefix)
+ * 172.16.0.0.0 - 172.31.255.255 (172.16/12 prefix)
+ * 192.168.0.0 - 192.168.255.255 (192.168/16 prefix)
+ */
+#define IS_IPV4_PRIVATE_ADDRESS(a) \
+ ((((unsigned char *)(a))[0] == 10) || \
+ ((((unsigned char *)(a))[0] == 172) && \
+ (((unsigned char *)(a))[1] >= 16) && \
+ (((unsigned char *)(a))[1] < 32)) || \
+ ((((unsigned char *)(a))[0] == 192) && \
+ (((unsigned char *)(a))[1] == 168)))
+
+/* Flags used for the bind address copy functions. */
+#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
+ peer */
+#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
+ peer */
+
+/* Reasons to retransmit. */
+typedef enum {
+ SCTP_RTXR_T3_RTX,
+ SCTP_RTXR_FAST_RTX,
+ SCTP_RTXR_PMTUD,
+} sctp_retransmit_reason_t;
+
+/* Reasons to lower cwnd. */
+typedef enum {
+ SCTP_LOWER_CWND_T3_RTX,
+ SCTP_LOWER_CWND_FAST_RTX,
+ SCTP_LOWER_CWND_ECNE,
+ SCTP_LOWER_CWND_INACTIVE,
+} sctp_lower_cwnd_t;
+
+#endif /* __sctp_constants_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
new file mode 100644
index 000000000000..960abfa48d68
--- /dev/null
+++ b/include/net/sctp/sctp.h
@@ -0,0 +1,620 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 Intel Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * The base lksctp header.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#ifndef __net_sctp_h__
+#define __net_sctp_h__
+
+/* Header Strategy.
+ * Start getting some control over the header file depencies:
+ * includes
+ * constants
+ * structs
+ * prototypes
+ * macros, externs, and inlines
+ *
+ * Move test_frame specific items out of the kernel headers
+ * and into the test frame headers. This is not perfect in any sense
+ * and will continue to evolve.
+ */
+
+
+#include <linux/config.h>
+
+#ifdef TEST_FRAME
+#undef CONFIG_PROC_FS
+#undef CONFIG_SCTP_DBG_OBJCNT
+#undef CONFIG_SYSCTL
+#endif /* TEST_FRAME */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/idr.h>
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#endif
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <net/sctp/structs.h>
+#include <net/sctp/constants.h>
+
+
+/* Set SCTP_DEBUG flag via config if not already set. */
+#ifndef SCTP_DEBUG
+#ifdef CONFIG_SCTP_DBG_MSG
+#define SCTP_DEBUG 1
+#else
+#define SCTP_DEBUG 0
+#endif /* CONFIG_SCTP_DBG */
+#endif /* SCTP_DEBUG */
+
+#ifdef CONFIG_IP_SCTP_MODULE
+#define SCTP_PROTOSW_FLAG 0
+#else /* static! */
+#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
+#endif
+
+
+/* Certain internal static functions need to be exported when
+ * compiled into the test frame.
+ */
+#ifndef SCTP_STATIC
+#define SCTP_STATIC static
+#endif
+
+/*
+ * Function declarations.
+ */
+
+/*
+ * sctp/protocol.c
+ */
+extern struct sock *sctp_get_ctl_sock(void);
+extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
+ sctp_scope_t, int gfp, int flags);
+extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
+
+/*
+ * sctp/socket.c
+ */
+int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int sctp_inet_listen(struct socket *sock, int backlog);
+void sctp_write_space(struct sock *sk);
+unsigned int sctp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+
+/*
+ * sctp/primitive.c
+ */
+int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
+
+/*
+ * sctp/crc32c.c
+ */
+__u32 sctp_start_cksum(__u8 *ptr, __u16 count);
+__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum);
+__u32 sctp_end_cksum(__u32 cksum);
+__u32 sctp_update_copy_cksum(__u8 *, __u8 *, __u16 count, __u32 cksum);
+
+/*
+ * sctp/input.c
+ */
+int sctp_rcv(struct sk_buff *skb);
+void sctp_v4_err(struct sk_buff *skb, u32 info);
+void sctp_hash_established(struct sctp_association *);
+void sctp_unhash_established(struct sctp_association *);
+void sctp_hash_endpoint(struct sctp_endpoint *);
+void sctp_unhash_endpoint(struct sctp_endpoint *);
+struct sock *sctp_err_lookup(int family, struct sk_buff *,
+ struct sctphdr *, struct sctp_endpoint **,
+ struct sctp_association **,
+ struct sctp_transport **);
+void sctp_err_finish(struct sock *, struct sctp_endpoint *,
+ struct sctp_association *);
+void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
+ struct sctp_transport *t, __u32 pmtu);
+void sctp_icmp_proto_unreachable(struct sock *sk,
+ struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ struct sctp_transport *t);
+
+/*
+ * Section: Macros, externs, and inlines
+ */
+
+
+#ifdef TEST_FRAME
+#include <test_frame.h>
+#else
+
+/* spin lock wrappers. */
+#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
+#define sctp_spin_unlock_irqrestore(lock, flags) \
+ spin_unlock_irqrestore(lock, flags)
+#define sctp_local_bh_disable() local_bh_disable()
+#define sctp_local_bh_enable() local_bh_enable()
+#define sctp_spin_lock(lock) spin_lock(lock)
+#define sctp_spin_unlock(lock) spin_unlock(lock)
+#define sctp_write_lock(lock) write_lock(lock)
+#define sctp_write_unlock(lock) write_unlock(lock)
+#define sctp_read_lock(lock) read_lock(lock)
+#define sctp_read_unlock(lock) read_unlock(lock)
+
+/* sock lock wrappers. */
+#define sctp_lock_sock(sk) lock_sock(sk)
+#define sctp_release_sock(sk) release_sock(sk)
+#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
+#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
+#define SCTP_SOCK_SLEEP_PRE(sk) SOCK_SLEEP_PRE(sk)
+#define SCTP_SOCK_SLEEP_POST(sk) SOCK_SLEEP_POST(sk)
+
+/* SCTP SNMP MIB stats handlers */
+DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field)
+#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field)
+#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
+#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field)
+
+#endif /* !TEST_FRAME */
+
+
+/* Print debugging messages. */
+#if SCTP_DEBUG
+extern int sctp_debug_flag;
+#define SCTP_DEBUG_PRINTK(whatever...) \
+ ((void) (sctp_debug_flag && printk(KERN_DEBUG whatever)))
+#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; }
+#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; }
+
+#define SCTP_ASSERT(expr, str, func) \
+ if (!(expr)) { \
+ SCTP_DEBUG_PRINTK("Assertion Failed: %s(%s) at %s:%s:%d\n", \
+ str, (#expr), __FILE__, __FUNCTION__, __LINE__); \
+ func; \
+ }
+
+#else /* SCTP_DEBUG */
+
+#define SCTP_DEBUG_PRINTK(whatever...)
+#define SCTP_ENABLE_DEBUG
+#define SCTP_DISABLE_DEBUG
+#define SCTP_ASSERT(expr, str, func)
+
+#endif /* SCTP_DEBUG */
+
+
+/*
+ * Macros for keeping a global reference of object allocations.
+ */
+#ifdef CONFIG_SCTP_DBG_OBJCNT
+
+extern atomic_t sctp_dbg_objcnt_sock;
+extern atomic_t sctp_dbg_objcnt_ep;
+extern atomic_t sctp_dbg_objcnt_assoc;
+extern atomic_t sctp_dbg_objcnt_transport;
+extern atomic_t sctp_dbg_objcnt_chunk;
+extern atomic_t sctp_dbg_objcnt_bind_addr;
+extern atomic_t sctp_dbg_objcnt_bind_bucket;
+extern atomic_t sctp_dbg_objcnt_addr;
+extern atomic_t sctp_dbg_objcnt_ssnmap;
+extern atomic_t sctp_dbg_objcnt_datamsg;
+
+/* Macros to atomically increment/decrement objcnt counters. */
+#define SCTP_DBG_OBJCNT_INC(name) \
+atomic_inc(&sctp_dbg_objcnt_## name)
+#define SCTP_DBG_OBJCNT_DEC(name) \
+atomic_dec(&sctp_dbg_objcnt_## name)
+#define SCTP_DBG_OBJCNT(name) \
+atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
+
+/* Macro to help create new entries in in the global array of
+ * objcnt counters.
+ */
+#define SCTP_DBG_OBJCNT_ENTRY(name) \
+{.label= #name, .counter= &sctp_dbg_objcnt_## name}
+
+void sctp_dbg_objcnt_init(void);
+void sctp_dbg_objcnt_exit(void);
+
+#else
+
+#define SCTP_DBG_OBJCNT_INC(name)
+#define SCTP_DBG_OBJCNT_DEC(name)
+
+static inline void sctp_dbg_objcnt_init(void) { return; }
+static inline void sctp_dbg_objcnt_exit(void) { return; }
+
+#endif /* CONFIG_SCTP_DBG_OBJCOUNT */
+
+#if defined CONFIG_SYSCTL
+void sctp_sysctl_register(void);
+void sctp_sysctl_unregister(void);
+#else
+static inline void sctp_sysctl_register(void) { return; }
+static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context) {
+ return -ENOSYS;
+}
+#endif
+
+/* Size of Supported Address Parameter for 'x' address types. */
+#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+int sctp_v6_init(void);
+void sctp_v6_exit(void);
+
+#else /* #ifdef defined(CONFIG_IPV6) */
+
+static inline int sctp_v6_init(void) { return 0; }
+static inline void sctp_v6_exit(void) { return; }
+
+#endif /* #if defined(CONFIG_IPV6) */
+
+/* Some wrappers, in case crypto not available. */
+#if defined (CONFIG_CRYPTO_HMAC)
+#define sctp_crypto_alloc_tfm crypto_alloc_tfm
+#define sctp_crypto_free_tfm crypto_free_tfm
+#define sctp_crypto_hmac crypto_hmac
+#else
+#define sctp_crypto_alloc_tfm(x...) NULL
+#define sctp_crypto_free_tfm(x...)
+#define sctp_crypto_hmac(x...)
+#endif
+
+
+/* Map an association to an assoc_id. */
+static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
+{
+ return (asoc?asoc->assoc_id:0);
+}
+
+/* Look up the association by its id. */
+struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
+
+
+/* A macro to walk a list of skbs. */
+#define sctp_skb_for_each(pos, head, tmp) \
+for (pos = (head)->next;\
+ tmp = (pos)->next, pos != ((struct sk_buff *)(head));\
+ pos = tmp)
+
+
+/* A helper to append an entire skb list (list) to another (head). */
+static inline void sctp_skb_list_tail(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ unsigned long flags;
+
+ sctp_spin_lock_irqsave(&head->lock, flags);
+ sctp_spin_lock(&list->lock);
+
+ list_splice((struct list_head *)list, (struct list_head *)head->prev);
+
+ head->qlen += list->qlen;
+ list->qlen = 0;
+
+ sctp_spin_unlock(&list->lock);
+ sctp_spin_unlock_irqrestore(&head->lock, flags);
+}
+
+/**
+ * sctp_list_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct list_head *sctp_list_dequeue(struct list_head *list)
+{
+ struct list_head *result = NULL;
+
+ if (list->next != list) {
+ result = list->next;
+ list->next = result->next;
+ list->next->prev = list;
+ INIT_LIST_HEAD(result);
+ }
+ return result;
+}
+
+/* Tests if the list has one and only one entry. */
+static inline int sctp_list_single_entry(struct list_head *head)
+{
+ return ((head->next != head) && (head->next == head->prev));
+}
+
+/* Calculate the size (in bytes) occupied by the data of an iovec. */
+static inline size_t get_user_iov_size(struct iovec *iov, int iovlen)
+{
+ size_t retval = 0;
+
+ for (; iovlen > 0; --iovlen) {
+ retval += iov->iov_len;
+ iov++;
+ }
+
+ return retval;
+}
+
+/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
+static inline __s32 sctp_jitter(__u32 rto)
+{
+ static __u32 sctp_rand;
+ __s32 ret;
+
+ /* Avoid divide by zero. */
+ if (!rto)
+ rto = 1;
+
+ sctp_rand += jiffies;
+ sctp_rand ^= (sctp_rand << 12);
+ sctp_rand ^= (sctp_rand >> 20);
+
+ /* Choose random number from 0 to rto, then move to -50% ~ +50%
+ * of rto.
+ */
+ ret = sctp_rand % rto - (rto >> 1);
+ return ret;
+}
+
+/* Break down data chunks at this point. */
+static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu)
+{
+ int frag = pmtu;
+
+ frag -= sp->pf->af->net_header_len;
+ frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
+
+ if (sp->user_frag)
+ frag = min_t(int, frag, sp->user_frag);
+
+ frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN);
+
+ return frag;
+}
+
+/* Walk through a list of TLV parameters. Don't trust the
+ * individual parameter lengths and instead depend on
+ * the chunk length to indicate when to stop. Make sure
+ * there is room for a param header too.
+ */
+#define sctp_walk_params(pos, chunk, member)\
+_sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member)
+
+#define _sctp_walk_params(pos, chunk, end, member)\
+for (pos.v = chunk->member;\
+ pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
+ pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\
+ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
+ pos.v += WORD_ROUND(ntohs(pos.p->length)))
+
+#define sctp_walk_errors(err, chunk_hdr)\
+_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
+
+#define _sctp_walk_errors(err, chunk_hdr, end)\
+for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+ sizeof(sctp_chunkhdr_t));\
+ (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\
+ (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\
+ ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+
+#define sctp_walk_fwdtsn(pos, chunk)\
+_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
+
+#define _sctp_walk_fwdtsn(pos, chunk, end)\
+for (pos = chunk->subh.fwdtsn_hdr->skip;\
+ (void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
+ pos++)
+
+/* Round an int up to the next multiple of 4. */
+#define WORD_ROUND(s) (((s)+3)&~3)
+
+/* Make a new instance of type. */
+#define t_new(type, flags) (type *)kmalloc(sizeof(type), flags)
+
+/* Compare two timevals. */
+#define tv_lt(s, t) \
+ (s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec))
+
+/* Add tv1 to tv2. */
+#define TIMEVAL_ADD(tv1, tv2) \
+({ \
+ suseconds_t usecs = (tv2).tv_usec + (tv1).tv_usec; \
+ time_t secs = (tv2).tv_sec + (tv1).tv_sec; \
+\
+ if (usecs >= 1000000) { \
+ usecs -= 1000000; \
+ secs++; \
+ } \
+ (tv2).tv_sec = secs; \
+ (tv2).tv_usec = usecs; \
+})
+
+/* External references. */
+
+extern struct proto sctp_prot;
+extern struct proto sctpv6_prot;
+extern struct proc_dir_entry *proc_net_sctp;
+void sctp_put_port(struct sock *sk);
+
+extern struct idr sctp_assocs_id;
+extern spinlock_t sctp_assocs_id_lock;
+
+/* Static inline functions. */
+
+/* Convert from an IP version number to an Address Family symbol. */
+static inline int ipver2af(__u8 ipver)
+{
+ switch (ipver) {
+ case 4:
+ return AF_INET;
+ case 6:
+ return AF_INET6;
+ default:
+ return 0;
+ };
+}
+
+/* Convert from an address parameter type to an address family. */
+static inline int param_type2af(__u16 type)
+{
+ switch (type) {
+ case SCTP_PARAM_IPV4_ADDRESS:
+ return AF_INET;
+ case SCTP_PARAM_IPV6_ADDRESS:
+ return AF_INET6;
+ default:
+ return 0;
+ };
+}
+
+/* Perform some sanity checks. */
+static inline int sctp_sanity_check(void)
+{
+ SCTP_ASSERT(sizeof(struct sctp_ulpevent) <=
+ sizeof(((struct sk_buff *)0)->cb),
+ "SCTP: ulpevent does not fit in skb!\n", return 0);
+
+ return 1;
+}
+
+/* Warning: The following hash functions assume a power of two 'size'. */
+/* This is the hash function for the SCTP port hash table. */
+static inline int sctp_phashfn(__u16 lport)
+{
+ return (lport & (sctp_port_hashsize - 1));
+}
+
+/* This is the hash function for the endpoint hash table. */
+static inline int sctp_ep_hashfn(__u16 lport)
+{
+ return (lport & (sctp_ep_hashsize - 1));
+}
+
+/* This is the hash function for the association hash table. */
+static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
+{
+ int h = (lport << 16) + rport;
+ h ^= h>>8;
+ return (h & (sctp_assoc_hashsize - 1));
+}
+
+/* This is the hash function for the association hash table. This is
+ * not used yet, but could be used as a better hash function when
+ * we have a vtag.
+ */
+static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
+{
+ int h = (lport << 16) + rport;
+ h ^= vtag;
+ return (h & (sctp_assoc_hashsize-1));
+}
+
+/* Is a socket of this style? */
+#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
+static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style)
+{
+ return sctp_sk(sk)->type == style;
+}
+
+/* Is the association in this state? */
+#define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state))
+static inline int __sctp_state(const struct sctp_association *asoc,
+ sctp_state_t state)
+{
+ return asoc->state == state;
+}
+
+/* Is the socket in this state? */
+#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
+static inline int __sctp_sstate(const struct sock *sk, sctp_sock_state_t state)
+{
+ return sk->sk_state == state;
+}
+
+/* Map v4-mapped v6 address back to v4 address */
+static inline void sctp_v6_map_v4(union sctp_addr *addr)
+{
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = addr->v6.sin6_port;
+ addr->v4.sin_addr.s_addr = addr->v6.sin6_addr.s6_addr32[3];
+}
+
+/* Map v4 address to v4-mapped v6 address */
+static inline void sctp_v4_map_v6(union sctp_addr *addr)
+{
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_addr.s6_addr32[0] = 0;
+ addr->v6.sin6_addr.s6_addr32[1] = 0;
+ addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+}
+
+#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
new file mode 100644
index 000000000000..5576db56324d
--- /dev/null
+++ b/include/net/sctp/sm.h
@@ -0,0 +1,442 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * These are definitions needed by the state machine.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <net/sctp/command.h>
+#include <net/sctp/sctp.h>
+
+#ifndef __sctp_sm_h__
+#define __sctp_sm_h__
+
+/*
+ * Possible values for the disposition are:
+ */
+typedef enum {
+ SCTP_DISPOSITION_DISCARD, /* No further processing. */
+ SCTP_DISPOSITION_CONSUME, /* Process return values normally. */
+ SCTP_DISPOSITION_NOMEM, /* We ran out of memory--recover. */
+ SCTP_DISPOSITION_DELETE_TCB, /* Close the association. */
+ SCTP_DISPOSITION_ABORT, /* Close the association NOW. */
+ SCTP_DISPOSITION_VIOLATION, /* The peer is misbehaving. */
+ SCTP_DISPOSITION_NOT_IMPL, /* This entry is not implemented. */
+ SCTP_DISPOSITION_ERROR, /* This is plain old user error. */
+ SCTP_DISPOSITION_BUG, /* This is a bug. */
+} sctp_disposition_t;
+
+typedef struct {
+ int name;
+ int action;
+} sctp_sm_command_t;
+
+typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *,
+ const struct sctp_association *,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *);
+typedef void (sctp_timer_event_t) (unsigned long);
+typedef struct {
+ sctp_state_fn_t *fn;
+ const char *name;
+} sctp_sm_table_entry_t;
+
+/* A naming convention of "sctp_sf_xxx" applies to all the state functions
+ * currently in use.
+ */
+
+/* Prototypes for generic state functions. */
+sctp_state_fn_t sctp_sf_not_impl;
+sctp_state_fn_t sctp_sf_bug;
+
+/* Prototypes for gener timer state functions. */
+sctp_state_fn_t sctp_sf_timer_ignore;
+
+/* Prototypes for chunk state functions. */
+sctp_state_fn_t sctp_sf_do_9_1_abort;
+sctp_state_fn_t sctp_sf_cookie_wait_abort;
+sctp_state_fn_t sctp_sf_cookie_echoed_abort;
+sctp_state_fn_t sctp_sf_shutdown_pending_abort;
+sctp_state_fn_t sctp_sf_shutdown_sent_abort;
+sctp_state_fn_t sctp_sf_shutdown_ack_sent_abort;
+sctp_state_fn_t sctp_sf_do_5_1B_init;
+sctp_state_fn_t sctp_sf_do_5_1C_ack;
+sctp_state_fn_t sctp_sf_do_5_1D_ce;
+sctp_state_fn_t sctp_sf_do_5_1E_ca;
+sctp_state_fn_t sctp_sf_do_4_C;
+sctp_state_fn_t sctp_sf_eat_data_6_2;
+sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
+sctp_state_fn_t sctp_sf_eat_sack_6_2;
+sctp_state_fn_t sctp_sf_tabort_8_4_8;
+sctp_state_fn_t sctp_sf_operr_notify;
+sctp_state_fn_t sctp_sf_t1_timer_expire;
+sctp_state_fn_t sctp_sf_t2_timer_expire;
+sctp_state_fn_t sctp_sf_t4_timer_expire;
+sctp_state_fn_t sctp_sf_t5_timer_expire;
+sctp_state_fn_t sctp_sf_sendbeat_8_3;
+sctp_state_fn_t sctp_sf_beat_8_3;
+sctp_state_fn_t sctp_sf_backbeat_8_3;
+sctp_state_fn_t sctp_sf_do_9_2_final;
+sctp_state_fn_t sctp_sf_do_9_2_shutdown;
+sctp_state_fn_t sctp_sf_do_ecn_cwr;
+sctp_state_fn_t sctp_sf_do_ecne;
+sctp_state_fn_t sctp_sf_ootb;
+sctp_state_fn_t sctp_sf_pdiscard;
+sctp_state_fn_t sctp_sf_violation;
+sctp_state_fn_t sctp_sf_violation_chunklen;
+sctp_state_fn_t sctp_sf_discard_chunk;
+sctp_state_fn_t sctp_sf_do_5_2_1_siminit;
+sctp_state_fn_t sctp_sf_do_5_2_2_dupinit;
+sctp_state_fn_t sctp_sf_do_5_2_4_dupcook;
+sctp_state_fn_t sctp_sf_unk_chunk;
+sctp_state_fn_t sctp_sf_do_8_5_1_E_sa;
+sctp_state_fn_t sctp_sf_cookie_echoed_err;
+sctp_state_fn_t sctp_sf_do_asconf;
+sctp_state_fn_t sctp_sf_do_asconf_ack;
+sctp_state_fn_t sctp_sf_do_9_2_reshutack;
+sctp_state_fn_t sctp_sf_eat_fwd_tsn;
+sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast;
+
+/* Prototypes for primitive event state functions. */
+sctp_state_fn_t sctp_sf_do_prm_asoc;
+sctp_state_fn_t sctp_sf_do_prm_send;
+sctp_state_fn_t sctp_sf_do_9_2_prm_shutdown;
+sctp_state_fn_t sctp_sf_cookie_wait_prm_shutdown;
+sctp_state_fn_t sctp_sf_cookie_echoed_prm_shutdown;
+sctp_state_fn_t sctp_sf_do_9_1_prm_abort;
+sctp_state_fn_t sctp_sf_cookie_wait_prm_abort;
+sctp_state_fn_t sctp_sf_cookie_echoed_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_pending_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_sent_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_ack_sent_prm_abort;
+sctp_state_fn_t sctp_sf_error_closed;
+sctp_state_fn_t sctp_sf_error_shutdown;
+sctp_state_fn_t sctp_sf_ignore_primitive;
+sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
+sctp_state_fn_t sctp_sf_do_prm_asconf;
+
+/* Prototypes for other event state functions. */
+sctp_state_fn_t sctp_sf_do_9_2_start_shutdown;
+sctp_state_fn_t sctp_sf_do_9_2_shutdown_ack;
+sctp_state_fn_t sctp_sf_ignore_other;
+sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
+
+/* Prototypes for timeout event state functions. */
+sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
+sctp_state_fn_t sctp_sf_do_6_2_sack;
+sctp_state_fn_t sctp_sf_autoclose_timer_expire;
+
+/* Prototypes for utility support functions. */
+__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
+ sctp_state_t,
+ sctp_subtype_t);
+int sctp_chunk_iif(const struct sctp_chunk *);
+struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
+ struct sctp_chunk *,
+ int gfp);
+__u32 sctp_generate_verification_tag(void);
+void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
+
+/* Prototypes for chunk-building functions. */
+struct sctp_chunk *sctp_make_init(const struct sctp_association *,
+ const struct sctp_bind_addr *,
+ int gfp, int vparam_len);
+struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *,
+ const struct sctp_chunk *,
+ const int gfp,
+ const int unkparam_len);
+struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *,
+ const struct sctp_chunk *);
+struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *,
+ const struct sctp_chunk *);
+struct sctp_chunk *sctp_make_cwr(const struct sctp_association *,
+ const __u32 lowest_tsn,
+ const struct sctp_chunk *);
+struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, const __u8 flags,
+ __u16 ssn);
+struct sctp_chunk *sctp_make_ecne(const struct sctp_association *,
+ const __u32);
+struct sctp_chunk *sctp_make_sack(const struct sctp_association *);
+struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *);
+struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
+ const struct sctp_chunk *);
+void sctp_init_cause(struct sctp_chunk *, __u16 cause, const void *, size_t);
+struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
+ const struct sctp_chunk *,
+ const size_t hint);
+struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
+ const struct sctp_chunk *,
+ __u32 tsn);
+struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
+ const struct sctp_chunk *,
+ const struct msghdr *);
+struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
+ const struct sctp_chunk *,
+ const __u8 *,
+ const size_t );
+struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *,
+ const struct sctp_transport *,
+ const void *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *,
+ const struct sctp_chunk *,
+ const void *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_op_error(const struct sctp_association *,
+ const struct sctp_chunk *chunk,
+ __u16 cause_code,
+ const void *payload,
+ size_t paylen);
+
+struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
+ union sctp_addr *,
+ struct sockaddr *,
+ int, __u16);
+struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr);
+struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf);
+int sctp_process_asconf_ack(struct sctp_association *asoc,
+ struct sctp_chunk *asconf_ack);
+struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_fwdtsn_skip *skiplist);
+
+void sctp_chunk_assign_tsn(struct sctp_chunk *);
+void sctp_chunk_assign_ssn(struct sctp_chunk *);
+
+void sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, __u16 error);
+
+/* Prototypes for statetable processing. */
+
+int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+ sctp_state_t state,
+ struct sctp_endpoint *,
+ struct sctp_association *asoc,
+ void *event_arg,
+ int gfp);
+
+/* 2nd level prototypes */
+void sctp_generate_t3_rtx_event(unsigned long peer);
+void sctp_generate_heartbeat_event(unsigned long peer);
+
+void sctp_ootb_pkt_free(struct sctp_packet *);
+
+struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *,
+ const struct sctp_association *,
+ struct sctp_chunk *, int gfp, int *err,
+ struct sctp_chunk **err_chk_p);
+int sctp_addip_addr_config(struct sctp_association *, sctp_param_t,
+ struct sockaddr_storage*, int);
+
+/* 3rd level prototypes */
+__u32 sctp_generate_tag(const struct sctp_endpoint *);
+__u32 sctp_generate_tsn(const struct sctp_endpoint *);
+
+/* Extern declarations for major data structures. */
+extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+
+
+/* Get the size of a DATA chunk payload. */
+static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
+{
+ __u16 size;
+
+ size = ntohs(chunk->chunk_hdr->length);
+ size -= sizeof(sctp_data_chunk_t);
+
+ return size;
+}
+
+/* Compare two TSNs */
+
+/* RFC 1982 - Serial Number Arithmetic
+ *
+ * 2. Comparison
+ * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2,
+ * in all other cases, s1 is not equal to s2.
+ *
+ * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
+ * and
+ *
+ * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
+ * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
+ *
+ * s1 is said to be greater than s2 if, and only if, s1 is not equal to
+ * s2, and
+ *
+ * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
+ * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
+ */
+
+/*
+ * RFC 2960
+ * 1.6 Serial Number Arithmetic
+ *
+ * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
+ * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
+ */
+
+enum {
+ TSN_SIGN_BIT = (1<<31)
+};
+
+static inline int TSN_lt(__u32 s, __u32 t)
+{
+ return (((s) - (t)) & TSN_SIGN_BIT);
+}
+
+static inline int TSN_lte(__u32 s, __u32 t)
+{
+ return (((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT));
+}
+
+/* Compare two SSNs */
+
+/*
+ * RFC 2960
+ * 1.6 Serial Number Arithmetic
+ *
+ * Comparisons and arithmetic on Stream Sequence Numbers in this document
+ * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where
+ * SERIAL_BITS = 16.
+ */
+enum {
+ SSN_SIGN_BIT = (1<<15)
+};
+
+static inline int SSN_lt(__u16 s, __u16 t)
+{
+ return (((s) - (t)) & SSN_SIGN_BIT);
+}
+
+static inline int SSN_lte(__u16 s, __u16 t)
+{
+ return (((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT));
+}
+
+/*
+ * ADDIP 3.1.1
+ * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
+ * Numbers wrap back to 0 after reaching 4294967295.
+ */
+enum {
+ ADDIP_SERIAL_SIGN_BIT = (1<<31)
+};
+
+static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
+{
+ return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT));
+}
+
+
+/* Run sctp_add_cmd() generating a BUG() if there is a failure. */
+static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
+{
+ if (unlikely(!sctp_add_cmd(seq, verb, obj)))
+ BUG();
+}
+
+/* Check VTAG of the packet matches the sender's own tag. */
+static inline int
+sctp_vtag_verify(const struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ /* RFC 2960 Sec 8.5 When receiving an SCTP packet, the endpoint
+ * MUST ensure that the value in the Verification Tag field of
+ * the received SCTP packet matches its own Tag. If the received
+ * Verification Tag value does not match the receiver's own
+ * tag value, the receiver shall silently discard the packet...
+ */
+ if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
+ return 1;
+
+ return 0;
+}
+
+/* Check VTAG of the packet matches the sender's own tag OR its peer's
+ * tag and the T bit is set in the Chunk Flags.
+ */
+static inline int
+sctp_vtag_verify_either(const struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ /* RFC 2960 Section 8.5.1, sctpimpguide-06 Section 2.13.2
+ *
+ * B) The receiver of a ABORT shall accept the packet if the
+ * Verification Tag field of the packet matches its own tag OR it
+ * is set to its peer's tag and the T bit is set in the Chunk
+ * Flags. Otherwise, the receiver MUST silently discard the packet
+ * and take no further action.
+ *
+ * (C) The receiver of a SHUTDOWN COMPLETE shall accept the
+ * packet if the Verification Tag field of the packet
+ * matches its own tag OR it is set to its peer's tag and
+ * the T bit is set in the Chunk Flags. Otherwise, the
+ * receiver MUST silently discard the packet and take no
+ * further action....
+ *
+ */
+ if ((ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag) ||
+ (sctp_test_T_bit(chunk) && (ntohl(chunk->sctp_hdr->vtag)
+ == asoc->c.peer_vtag))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __sctp_sm_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
new file mode 100644
index 000000000000..7e64cf6bda1e
--- /dev/null
+++ b/include/net/sctp/structs.h
@@ -0,0 +1,1752 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * Randall Stewart <randall@sctp.chicago.il.us>
+ * Ken Morneau <kmorneau@cisco.com>
+ * Qiaobing Xie <qxie1@email.mot.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Anup Pemmaiah <pemmaiah@cc.usu.edu>
+ * Kevin Gao <kevin.gao@intel.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#ifndef __sctp_structs_h__
+#define __sctp_structs_h__
+
+#include <linux/time.h> /* We get struct timespec. */
+#include <linux/socket.h> /* linux/in.h needs this!! */
+#include <linux/in.h> /* We get struct sockaddr_in. */
+#include <linux/in6.h> /* We get struct in6_addr */
+#include <linux/ipv6.h>
+#include <asm/param.h> /* We get MAXHOSTNAMELEN. */
+#include <asm/atomic.h> /* This gets us atomic counters. */
+#include <linux/skbuff.h> /* We need sk_buff_head. */
+#include <linux/workqueue.h> /* We need tq_struct. */
+#include <linux/sctp.h> /* We need sctp* header structs. */
+
+/* A convenience structure for handling sockaddr structures.
+ * We should wean ourselves off this.
+ */
+union sctp_addr {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ struct sockaddr sa;
+};
+
+/* Forward declarations for data structures. */
+struct sctp_globals;
+struct sctp_endpoint;
+struct sctp_association;
+struct sctp_transport;
+struct sctp_packet;
+struct sctp_chunk;
+struct sctp_inq;
+struct sctp_outq;
+struct sctp_bind_addr;
+struct sctp_ulpq;
+struct sctp_ep_common;
+struct sctp_ssnmap;
+
+
+#include <net/sctp/tsnmap.h>
+#include <net/sctp/ulpevent.h>
+#include <net/sctp/ulpqueue.h>
+
+/* Structures useful for managing bind/connect. */
+
+struct sctp_bind_bucket {
+ unsigned short port;
+ unsigned short fastreuse;
+ struct sctp_bind_bucket *next;
+ struct sctp_bind_bucket **pprev;
+ struct hlist_head owner;
+};
+
+struct sctp_bind_hashbucket {
+ spinlock_t lock;
+ struct sctp_bind_bucket *chain;
+};
+
+/* Used for hashing all associations. */
+struct sctp_hashbucket {
+ rwlock_t lock;
+ struct sctp_ep_common *chain;
+} __attribute__((__aligned__(8)));
+
+
+/* The SCTP globals structure. */
+extern struct sctp_globals {
+ /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+ *
+ * The following protocol parameters are RECOMMENDED:
+ *
+ * RTO.Initial - 3 seconds
+ * RTO.Min - 1 second
+ * RTO.Max - 60 seconds
+ * RTO.Alpha - 1/8 (3 when converted to right shifts.)
+ * RTO.Beta - 1/4 (2 when converted to right shifts.)
+ */
+ __u32 rto_initial;
+ __u32 rto_min;
+ __u32 rto_max;
+
+ /* Note: rto_alpha and rto_beta are really defined as inverse
+ * powers of two to facilitate integer operations.
+ */
+ int rto_alpha;
+ int rto_beta;
+
+ /* Max.Burst - 4 */
+ int max_burst;
+
+ /* Valid.Cookie.Life - 60 seconds */
+ int valid_cookie_life;
+
+ /* Whether Cookie Preservative is enabled(1) or not(0) */
+ int cookie_preserve_enable;
+
+ /* Association.Max.Retrans - 10 attempts
+ * Path.Max.Retrans - 5 attempts (per destination address)
+ * Max.Init.Retransmits - 8 attempts
+ */
+ int max_retrans_association;
+ int max_retrans_path;
+ int max_retrans_init;
+
+ /* HB.interval - 30 seconds */
+ int hb_interval;
+
+ /* The following variables are implementation specific. */
+
+ /* Default initialization values to be applied to new associations. */
+ __u16 max_instreams;
+ __u16 max_outstreams;
+
+ /* This is a list of groups of functions for each address
+ * family that we support.
+ */
+ struct list_head address_families;
+
+ /* This is the hash of all endpoints. */
+ int ep_hashsize;
+ struct sctp_hashbucket *ep_hashtable;
+
+ /* This is the hash of all associations. */
+ int assoc_hashsize;
+ struct sctp_hashbucket *assoc_hashtable;
+
+ /* This is the sctp port control hash. */
+ int port_hashsize;
+ int port_rover;
+ spinlock_t port_alloc_lock; /* Protects port_rover. */
+ struct sctp_bind_hashbucket *port_hashtable;
+
+ /* This is the global local address list.
+ * We actively maintain this complete list of interfaces on
+ * the system by catching routing events.
+ *
+ * It is a list of sctp_sockaddr_entry.
+ */
+ struct list_head local_addr_list;
+ spinlock_t local_addr_lock;
+
+ /* Flag to indicate if addip is enabled. */
+ int addip_enable;
+
+ /* Flag to indicate if PR-SCTP is enabled. */
+ int prsctp_enable;
+} sctp_globals;
+
+#define sctp_rto_initial (sctp_globals.rto_initial)
+#define sctp_rto_min (sctp_globals.rto_min)
+#define sctp_rto_max (sctp_globals.rto_max)
+#define sctp_rto_alpha (sctp_globals.rto_alpha)
+#define sctp_rto_beta (sctp_globals.rto_beta)
+#define sctp_max_burst (sctp_globals.max_burst)
+#define sctp_valid_cookie_life (sctp_globals.valid_cookie_life)
+#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable)
+#define sctp_max_retrans_association (sctp_globals.max_retrans_association)
+#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
+#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
+#define sctp_hb_interval (sctp_globals.hb_interval)
+#define sctp_max_instreams (sctp_globals.max_instreams)
+#define sctp_max_outstreams (sctp_globals.max_outstreams)
+#define sctp_address_families (sctp_globals.address_families)
+#define sctp_ep_hashsize (sctp_globals.ep_hashsize)
+#define sctp_ep_hashtable (sctp_globals.ep_hashtable)
+#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize)
+#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
+#define sctp_port_hashsize (sctp_globals.port_hashsize)
+#define sctp_port_rover (sctp_globals.port_rover)
+#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock)
+#define sctp_port_hashtable (sctp_globals.port_hashtable)
+#define sctp_local_addr_list (sctp_globals.local_addr_list)
+#define sctp_local_addr_lock (sctp_globals.local_addr_lock)
+#define sctp_addip_enable (sctp_globals.addip_enable)
+#define sctp_prsctp_enable (sctp_globals.prsctp_enable)
+
+/* SCTP Socket type: UDP or TCP style. */
+typedef enum {
+ SCTP_SOCKET_UDP = 0,
+ SCTP_SOCKET_UDP_HIGH_BANDWIDTH,
+ SCTP_SOCKET_TCP
+} sctp_socket_type_t;
+
+/* Per socket SCTP information. */
+struct sctp_sock {
+ /* inet_sock has to be the first member of sctp_sock */
+ struct inet_sock inet;
+ /* What kind of a socket is this? */
+ sctp_socket_type_t type;
+
+ /* PF_ family specific functions. */
+ struct sctp_pf *pf;
+
+ /* Access to HMAC transform. */
+ struct crypto_tfm *hmac;
+
+ /* What is our base endpointer? */
+ struct sctp_endpoint *ep;
+
+ struct sctp_bind_bucket *bind_hash;
+ /* Various Socket Options. */
+ __u16 default_stream;
+ __u32 default_ppid;
+ __u16 default_flags;
+ __u32 default_context;
+ __u32 default_timetolive;
+
+ struct sctp_initmsg initmsg;
+ struct sctp_rtoinfo rtoinfo;
+ struct sctp_paddrparams paddrparam;
+ struct sctp_event_subscribe subscribe;
+ struct sctp_assocparams assocparams;
+ int user_frag;
+ __u32 autoclose;
+ __u8 nodelay;
+ __u8 disable_fragments;
+ __u8 pd_mode;
+ __u8 v4mapped;
+ __u32 adaption_ind;
+
+ /* Receive to here while partial delivery is in effect. */
+ struct sk_buff_head pd_lobby;
+};
+
+static inline struct sctp_sock *sctp_sk(const struct sock *sk)
+{
+ return (struct sctp_sock *)sk;
+}
+
+static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
+{
+ return (struct sock *)sp;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+struct sctp6_sock {
+ struct sctp_sock sctp;
+ struct ipv6_pinfo inet6;
+};
+#endif /* CONFIG_IPV6 */
+
+
+/* This is our APPLICATION-SPECIFIC state cookie.
+ * THIS IS NOT DICTATED BY THE SPECIFICATION.
+ */
+/* These are the parts of an association which we send in the cookie.
+ * Most of these are straight out of:
+ * RFC2960 12.2 Parameters necessary per association (i.e. the TCB)
+ *
+ */
+
+struct sctp_cookie {
+
+ /* My : Tag expected in every inbound packet and sent
+ * Verification: in the INIT or INIT ACK chunk.
+ * Tag :
+ */
+ __u32 my_vtag;
+
+ /* Peer's : Tag expected in every outbound packet except
+ * Verification: in the INIT chunk.
+ * Tag :
+ */
+ __u32 peer_vtag;
+
+ /* The rest of these are not from the spec, but really need to
+ * be in the cookie.
+ */
+
+ /* My Tie Tag : Assist in discovering a restarting association. */
+ __u32 my_ttag;
+
+ /* Peer's Tie Tag: Assist in discovering a restarting association. */
+ __u32 peer_ttag;
+
+ /* When does this cookie expire? */
+ struct timeval expiration;
+
+ /* Number of inbound/outbound streams which are set
+ * and negotiated during the INIT process.
+ */
+ __u16 sinit_num_ostreams;
+ __u16 sinit_max_instreams;
+
+ /* This is the first sequence number I used. */
+ __u32 initial_tsn;
+
+ /* This holds the originating address of the INIT packet. */
+ union sctp_addr peer_addr;
+
+ /* IG Section 2.35.3
+ * Include the source port of the INIT-ACK
+ */
+ __u16 my_port;
+
+ __u8 prsctp_capable;
+
+ /* Padding for future use */
+ __u8 padding;
+
+ __u32 adaption_ind;
+
+
+ /* This is a shim for my peer's INIT packet, followed by
+ * a copy of the raw address list of the association.
+ * The length of the raw address list is saved in the
+ * raw_addr_list_len field, which will be used at the time when
+ * the association TCB is re-constructed from the cookie.
+ */
+ __u32 raw_addr_list_len;
+ struct sctp_init_chunk peer_init[0];
+};
+
+
+/* The format of our cookie that we send to our peer. */
+struct sctp_signed_cookie {
+ __u8 signature[SCTP_SECRET_SIZE];
+ struct sctp_cookie c;
+};
+
+/* This is another convenience type to allocate memory for address
+ * params for the maximum size and pass such structures around
+ * internally.
+ */
+union sctp_addr_param {
+ struct sctp_ipv4addr_param v4;
+ struct sctp_ipv6addr_param v6;
+};
+
+/* A convenience type to allow walking through the various
+ * parameters and avoid casting all over the place.
+ */
+union sctp_params {
+ void *v;
+ struct sctp_paramhdr *p;
+ struct sctp_cookie_preserve_param *life;
+ struct sctp_hostname_param *dns;
+ struct sctp_cookie_param *cookie;
+ struct sctp_supported_addrs_param *sat;
+ struct sctp_ipv4addr_param *v4;
+ struct sctp_ipv6addr_param *v6;
+ union sctp_addr_param *addr;
+ struct sctp_adaption_ind_param *aind;
+};
+
+/* RFC 2960. Section 3.3.5 Heartbeat.
+ * Heartbeat Information: variable length
+ * The Sender-specific Heartbeat Info field should normally include
+ * information about the sender's current time when this HEARTBEAT
+ * chunk is sent and the destination transport address to which this
+ * HEARTBEAT is sent (see Section 8.3).
+ */
+typedef struct sctp_sender_hb_info {
+ struct sctp_paramhdr param_hdr;
+ union sctp_addr daddr;
+ unsigned long sent_at;
+} __attribute__((packed)) sctp_sender_hb_info_t;
+
+/*
+ * RFC 2960 1.3.2 Sequenced Delivery within Streams
+ *
+ * The term "stream" is used in SCTP to refer to a sequence of user
+ * messages that are to be delivered to the upper-layer protocol in
+ * order with respect to other messages within the same stream. This is
+ * in contrast to its usage in TCP, where it refers to a sequence of
+ * bytes (in this document a byte is assumed to be eight bits).
+ * ...
+ *
+ * This is the structure we use to track both our outbound and inbound
+ * SSN, or Stream Sequence Numbers.
+ */
+
+struct sctp_stream {
+ __u16 *ssn;
+ unsigned int len;
+};
+
+struct sctp_ssnmap {
+ struct sctp_stream in;
+ struct sctp_stream out;
+ int malloced;
+};
+
+struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int gfp);
+void sctp_ssnmap_free(struct sctp_ssnmap *map);
+void sctp_ssnmap_clear(struct sctp_ssnmap *map);
+
+/* What is the current SSN number for this stream? */
+static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id)
+{
+ return stream->ssn[id];
+}
+
+/* Return the next SSN number for this stream. */
+static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
+{
+ return stream->ssn[id]++;
+}
+
+/* Skip over this ssn and all below. */
+static inline void sctp_ssn_skip(struct sctp_stream *stream, __u16 id,
+ __u16 ssn)
+{
+ stream->ssn[id] = ssn+1;
+}
+
+/*
+ * Pointers to address related SCTP functions.
+ * (i.e. things that depend on the address family.)
+ */
+struct sctp_af {
+ int (*sctp_xmit) (struct sk_buff *skb,
+ struct sctp_transport *,
+ int ipfragok);
+ int (*setsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ char __user *optval,
+ int optlen);
+ int (*getsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ char __user *optval,
+ int __user *optlen);
+ struct dst_entry *(*get_dst) (struct sctp_association *asoc,
+ union sctp_addr *daddr,
+ union sctp_addr *saddr);
+ void (*get_saddr) (struct sctp_association *asoc,
+ struct dst_entry *dst,
+ union sctp_addr *daddr,
+ union sctp_addr *saddr);
+ void (*copy_addrlist) (struct list_head *,
+ struct net_device *);
+ void (*dst_saddr) (union sctp_addr *saddr,
+ struct dst_entry *dst,
+ unsigned short port);
+ int (*cmp_addr) (const union sctp_addr *addr1,
+ const union sctp_addr *addr2);
+ void (*addr_copy) (union sctp_addr *dst,
+ union sctp_addr *src);
+ void (*from_skb) (union sctp_addr *,
+ struct sk_buff *skb,
+ int saddr);
+ void (*from_sk) (union sctp_addr *,
+ struct sock *sk);
+ void (*to_sk_saddr) (union sctp_addr *,
+ struct sock *sk);
+ void (*to_sk_daddr) (union sctp_addr *,
+ struct sock *sk);
+ void (*from_addr_param) (union sctp_addr *,
+ union sctp_addr_param *,
+ __u16 port, int iif);
+ int (*to_addr_param) (const union sctp_addr *,
+ union sctp_addr_param *);
+ int (*addr_valid) (union sctp_addr *,
+ struct sctp_sock *);
+ sctp_scope_t (*scope) (union sctp_addr *);
+ void (*inaddr_any) (union sctp_addr *, unsigned short);
+ int (*is_any) (const union sctp_addr *);
+ int (*available) (union sctp_addr *,
+ struct sctp_sock *);
+ int (*skb_iif) (const struct sk_buff *sk);
+ int (*is_ce) (const struct sk_buff *sk);
+ void (*seq_dump_addr)(struct seq_file *seq,
+ union sctp_addr *addr);
+ __u16 net_header_len;
+ int sockaddr_len;
+ sa_family_t sa_family;
+ struct list_head list;
+};
+
+struct sctp_af *sctp_get_af_specific(sa_family_t);
+int sctp_register_af(struct sctp_af *);
+
+/* Protocol family functions. */
+struct sctp_pf {
+ void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
+ void (*skb_msgname) (struct sk_buff *, char *, int *);
+ int (*af_supported) (sa_family_t, struct sctp_sock *);
+ int (*cmp_addr) (const union sctp_addr *,
+ const union sctp_addr *,
+ struct sctp_sock *);
+ int (*bind_verify) (struct sctp_sock *, union sctp_addr *);
+ int (*send_verify) (struct sctp_sock *, union sctp_addr *);
+ int (*supported_addrs)(const struct sctp_sock *, __u16 *);
+ struct sock *(*create_accept_sk) (struct sock *sk,
+ struct sctp_association *asoc);
+ void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
+ struct sctp_af *af;
+};
+
+
+/* Structure to track chunk fragments that have been acked, but peer
+ * fragments of the same message have not.
+ */
+struct sctp_datamsg {
+ /* Chunks waiting to be submitted to lower layer. */
+ struct list_head chunks;
+ /* Chunks that have been transmitted. */
+ struct list_head track;
+ /* Reference counting. */
+ atomic_t refcnt;
+ /* When is this message no longer interesting to the peer? */
+ unsigned long expires_at;
+ /* Did the messenge fail to send? */
+ int send_error;
+ char send_failed;
+ /* Control whether chunks from this message can be abandoned. */
+ char can_abandon;
+};
+
+struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
+ struct sctp_sndrcvinfo *,
+ struct msghdr *, int len);
+void sctp_datamsg_put(struct sctp_datamsg *);
+void sctp_datamsg_free(struct sctp_datamsg *);
+void sctp_datamsg_track(struct sctp_chunk *);
+void sctp_chunk_fail(struct sctp_chunk *, int error);
+int sctp_chunk_abandoned(struct sctp_chunk *);
+
+
+/* RFC2960 1.4 Key Terms
+ *
+ * o Chunk: A unit of information within an SCTP packet, consisting of
+ * a chunk header and chunk-specific content.
+ *
+ * As a matter of convenience, we remember the SCTP common header for
+ * each chunk as well as a few other header pointers...
+ */
+struct sctp_chunk {
+ /* These first three elements MUST PRECISELY match the first
+ * three elements of struct sk_buff. This allows us to reuse
+ * all the skb_* queue management functions.
+ */
+ struct sctp_chunk *next;
+ struct sctp_chunk *prev;
+ struct sk_buff_head *list;
+ atomic_t refcnt;
+
+ /* This is our link to the per-transport transmitted list. */
+ struct list_head transmitted_list;
+
+ /* This field is used by chunks that hold fragmented data.
+ * For the first fragment this is the list that holds the rest of
+ * fragments. For the remaining fragments, this is the link to the
+ * frag_list maintained in the first fragment.
+ */
+ struct list_head frag_list;
+
+ /* This points to the sk_buff containing the actual data. */
+ struct sk_buff *skb;
+
+ /* These are the SCTP headers by reverse order in a packet.
+ * Note that some of these may happen more than once. In that
+ * case, we point at the "current" one, whatever that means
+ * for that level of header.
+ */
+
+ /* We point this at the FIRST TLV parameter to chunk_hdr. */
+ union sctp_params param_hdr;
+ union {
+ __u8 *v;
+ struct sctp_datahdr *data_hdr;
+ struct sctp_inithdr *init_hdr;
+ struct sctp_sackhdr *sack_hdr;
+ struct sctp_heartbeathdr *hb_hdr;
+ struct sctp_sender_hb_info *hbs_hdr;
+ struct sctp_shutdownhdr *shutdown_hdr;
+ struct sctp_signed_cookie *cookie_hdr;
+ struct sctp_ecnehdr *ecne_hdr;
+ struct sctp_cwrhdr *ecn_cwr_hdr;
+ struct sctp_errhdr *err_hdr;
+ struct sctp_addiphdr *addip_hdr;
+ struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ } subh;
+
+ __u8 *chunk_end;
+
+ struct sctp_chunkhdr *chunk_hdr;
+ struct sctphdr *sctp_hdr;
+
+ /* This needs to be recoverable for SCTP_SEND_FAILED events. */
+ struct sctp_sndrcvinfo sinfo;
+
+ /* Which association does this belong to? */
+ struct sctp_association *asoc;
+
+ /* What endpoint received this chunk? */
+ struct sctp_ep_common *rcvr;
+
+ /* We fill this in if we are calculating RTT. */
+ unsigned long sent_at;
+
+ /* What is the origin IP address for this chunk? */
+ union sctp_addr source;
+ /* Destination address for this chunk. */
+ union sctp_addr dest;
+
+ /* For outbound message, track all fragments for SEND_FAILED. */
+ struct sctp_datamsg *msg;
+
+ /* For an inbound chunk, this tells us where it came from.
+ * For an outbound chunk, it tells us where we'd like it to
+ * go. It is NULL if we have no preference.
+ */
+ struct sctp_transport *transport;
+
+ __u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */
+ __u8 resent; /* Has this chunk ever been retransmitted. */
+ __u8 has_tsn; /* Does this chunk have a TSN yet? */
+ __u8 has_ssn; /* Does this chunk have a SSN yet? */
+ __u8 singleton; /* Was this the only chunk in the packet? */
+ __u8 end_of_packet; /* Was this the last chunk in the packet? */
+ __u8 ecn_ce_done; /* Have we processed the ECN CE bit? */
+ __u8 pdiscard; /* Discard the whole packet now? */
+ __u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */
+ __u8 fast_retransmit; /* Is this chunk fast retransmitted? */
+ __u8 tsn_missing_report; /* Data chunk missing counter. */
+};
+
+void sctp_chunk_hold(struct sctp_chunk *);
+void sctp_chunk_put(struct sctp_chunk *);
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
+ struct iovec *data);
+void sctp_chunk_free(struct sctp_chunk *);
+void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+struct sctp_chunk *sctp_chunkify(struct sk_buff *,
+ const struct sctp_association *,
+ struct sock *);
+void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
+ union sctp_addr *);
+const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
+
+/* This is a structure for holding either an IPv6 or an IPv4 address. */
+/* sin_family -- AF_INET or AF_INET6
+ * sin_port -- ordinary port number
+ * sin_addr -- cast to either (struct in_addr) or (struct in6_addr)
+ */
+struct sctp_sockaddr_entry {
+ struct list_head list;
+ union sctp_addr a;
+};
+
+typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);
+
+/* This structure holds lists of chunks as we are assembling for
+ * transmission.
+ */
+struct sctp_packet {
+ /* These are the SCTP header values (host order) for the packet. */
+ __u16 source_port;
+ __u16 destination_port;
+ __u32 vtag;
+
+ /* This contains the payload chunks. */
+ struct sk_buff_head chunks;
+
+ /* This is the overhead of the sctp and ip headers. */
+ size_t overhead;
+ /* This is the total size of all chunks INCLUDING padding. */
+ size_t size;
+
+ /* The packet is destined for this transport address.
+ * The function we finally use to pass down to the next lower
+ * layer lives in the transport structure.
+ */
+ struct sctp_transport *transport;
+
+ /* This packet contains a COOKIE-ECHO chunk. */
+ char has_cookie_echo;
+
+ /* This packet containsa SACK chunk. */
+ char has_sack;
+
+ /* SCTP cannot fragment this packet. So let ip fragment it. */
+ char ipfragok;
+
+ int malloced;
+};
+
+struct sctp_packet *sctp_packet_init(struct sctp_packet *,
+ struct sctp_transport *,
+ __u16 sport, __u16 dport);
+struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
+sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *,
+ struct sctp_chunk *);
+sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *,
+ struct sctp_chunk *);
+int sctp_packet_transmit(struct sctp_packet *);
+void sctp_packet_free(struct sctp_packet *);
+
+static inline int sctp_packet_empty(struct sctp_packet *packet)
+{
+ return (packet->size == packet->overhead);
+}
+
+/* This represents a remote transport address.
+ * For local transport addresses, we just use union sctp_addr.
+ *
+ * RFC2960 Section 1.4 Key Terms
+ *
+ * o Transport address: A Transport Address is traditionally defined
+ * by Network Layer address, Transport Layer protocol and Transport
+ * Layer port number. In the case of SCTP running over IP, a
+ * transport address is defined by the combination of an IP address
+ * and an SCTP port number (where SCTP is the Transport protocol).
+ *
+ * RFC2960 Section 7.1 SCTP Differences from TCP Congestion control
+ *
+ * o The sender keeps a separate congestion control parameter set for
+ * each of the destination addresses it can send to (not each
+ * source-destination pair but for each destination). The parameters
+ * should decay if the address is not used for a long enough time
+ * period.
+ *
+ */
+struct sctp_transport {
+ /* A list of transports. */
+ struct list_head transports;
+
+ /* Reference counting. */
+ atomic_t refcnt;
+ int dead;
+
+ /* This is the peer's IP address and port. */
+ union sctp_addr ipaddr;
+
+ /* These are the functions we call to handle LLP stuff. */
+ struct sctp_af *af_specific;
+
+ /* Which association do we belong to? */
+ struct sctp_association *asoc;
+
+ /* RFC2960
+ *
+ * 12.3 Per Transport Address Data
+ *
+ * For each destination transport address in the peer's
+ * address list derived from the INIT or INIT ACK chunk, a
+ * number of data elements needs to be maintained including:
+ */
+ __u32 rtt; /* This is the most recent RTT. */
+
+ /* RTO : The current retransmission timeout value. */
+ __u32 rto;
+
+ /* RTTVAR : The current RTT variation. */
+ __u32 rttvar;
+
+ /* SRTT : The current smoothed round trip time. */
+ __u32 srtt;
+
+ /* RTO-Pending : A flag used to track if one of the DATA
+ * chunks sent to this address is currently being
+ * used to compute a RTT. If this flag is 0,
+ * the next DATA chunk sent to this destination
+ * should be used to compute a RTT and this flag
+ * should be set. Every time the RTT
+ * calculation completes (i.e. the DATA chunk
+ * is SACK'd) clear this flag.
+ */
+ int rto_pending;
+
+ /*
+ * These are the congestion stats.
+ */
+ /* cwnd : The current congestion window. */
+ __u32 cwnd; /* This is the actual cwnd. */
+
+ /* ssthresh : The current slow start threshold value. */
+ __u32 ssthresh;
+
+ /* partial : The tracking method for increase of cwnd when in
+ * bytes acked : congestion avoidance mode (see Section 6.2.2)
+ */
+ __u32 partial_bytes_acked;
+
+ /* Data that has been sent, but not acknowledged. */
+ __u32 flight_size;
+
+ /* PMTU : The current known path MTU. */
+ __u32 pmtu;
+
+ /* Destination */
+ struct dst_entry *dst;
+ /* Source address. */
+ union sctp_addr saddr;
+
+ /* When was the last time(in jiffies) that a data packet was sent on
+ * this transport? This is used to adjust the cwnd when the transport
+ * becomes inactive.
+ */
+ unsigned long last_time_used;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval.
+ */
+ int hb_interval;
+
+ /* When was the last time (in jiffies) that we heard from this
+ * transport? We use this to pick new active and retran paths.
+ */
+ unsigned long last_time_heard;
+
+ /* Last time(in jiffies) when cwnd is reduced due to the congestion
+ * indication based on ECNE chunk.
+ */
+ unsigned long last_time_ecne_reduced;
+
+ /* active : The current active state of this destination,
+ * : i.e. DOWN, UP, etc.
+ */
+ int active;
+
+ /* hb_allowed : The current heartbeat state of this destination,
+ * : i.e. ALLOW-HB, NO-HEARTBEAT, etc.
+ */
+ int hb_allowed;
+
+ /* These are the error stats for this destination. */
+
+ /* Error count : The current error count for this destination. */
+ unsigned short error_count;
+
+ /* This is the max_retrans value for the transport and will
+ * be initialized to proto.max_retrans.path. This can be changed
+ * using SCTP_SET_PEER_ADDR_PARAMS socket option.
+ */
+ int max_retrans;
+
+ /* Per : A timer used by each destination.
+ * Destination :
+ * Timer :
+ *
+ * [Everywhere else in the text this is called T3-rtx. -ed]
+ */
+ struct timer_list T3_rtx_timer;
+
+ /* Heartbeat timer is per destination. */
+ struct timer_list hb_timer;
+
+ /* Since we're using per-destination retransmission timers
+ * (see above), we're also using per-destination "transmitted"
+ * queues. This probably ought to be a private struct
+ * accessible only within the outqueue, but it's not, yet.
+ */
+ struct list_head transmitted;
+
+ /* We build bundle-able packets for this transport here. */
+ struct sctp_packet packet;
+
+ /* This is the list of transports that have chunks to send. */
+ struct list_head send_ready;
+
+ int malloced; /* Is this structure kfree()able? */
+
+ /* State information saved for SFR_CACC algorithm. The key
+ * idea in SFR_CACC is to maintain state at the sender on a
+ * per-destination basis when a changeover happens.
+ * char changeover_active;
+ * char cycling_changeover;
+ * __u32 next_tsn_at_change;
+ * char cacc_saw_newack;
+ */
+ struct {
+ /* An unsigned integer, which stores the next TSN to be
+ * used by the sender, at the moment of changeover.
+ */
+ __u32 next_tsn_at_change;
+
+ /* A flag which indicates the occurrence of a changeover */
+ char changeover_active;
+
+ /* A flag which indicates whether the change of primary is
+ * the first switch to this destination address during an
+ * active switch.
+ */
+ char cycling_changeover;
+
+ /* A temporary flag, which is used during the processing of
+ * a SACK to estimate the causative TSN(s)'s group.
+ */
+ char cacc_saw_newack;
+ } cacc;
+};
+
+struct sctp_transport *sctp_transport_new(const union sctp_addr *, int);
+void sctp_transport_set_owner(struct sctp_transport *,
+ struct sctp_association *);
+void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
+ struct sctp_sock *);
+void sctp_transport_pmtu(struct sctp_transport *);
+void sctp_transport_free(struct sctp_transport *);
+void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_hold(struct sctp_transport *);
+void sctp_transport_put(struct sctp_transport *);
+void sctp_transport_update_rto(struct sctp_transport *, __u32);
+void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
+void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
+unsigned long sctp_transport_timeout(struct sctp_transport *);
+
+
+/* This is the structure we use to queue packets as they come into
+ * SCTP. We write packets to it and read chunks from it.
+ */
+struct sctp_inq {
+ /* This is actually a queue of sctp_chunk each
+ * containing a partially decoded packet.
+ */
+ struct sk_buff_head in;
+ /* This is the packet which is currently off the in queue and is
+ * being worked on through the inbound chunk processing.
+ */
+ struct sctp_chunk *in_progress;
+
+ /* This is the delayed task to finish delivering inbound
+ * messages.
+ */
+ struct work_struct immediate;
+
+ int malloced; /* Is this structure kfree()able? */
+};
+
+void sctp_inq_init(struct sctp_inq *);
+void sctp_inq_free(struct sctp_inq *);
+void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
+struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
+void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+
+/* This is the structure we use to hold outbound chunks. You push
+ * chunks in and they automatically pop out the other end as bundled
+ * packets (it calls (*output_handler)()).
+ *
+ * This structure covers sections 6.3, 6.4, 6.7, 6.8, 6.10, 7., 8.1,
+ * and 8.2 of the v13 draft.
+ *
+ * It handles retransmissions. The connection to the timeout portion
+ * of the state machine is through sctp_..._timeout() and timeout_handler.
+ *
+ * If you feed it SACKs, it will eat them.
+ *
+ * If you give it big chunks, it will fragment them.
+ *
+ * It assigns TSN's to data chunks. This happens at the last possible
+ * instant before transmission.
+ *
+ * When free()'d, it empties itself out via output_handler().
+ */
+struct sctp_outq {
+ struct sctp_association *asoc;
+
+ /* Data pending that has never been transmitted. */
+ struct sk_buff_head out;
+
+ unsigned out_qlen; /* Total length of queued data chunks. */
+
+ /* Error of send failed, may used in SCTP_SEND_FAILED event. */
+ unsigned error;
+
+ /* These are control chunks we want to send. */
+ struct sk_buff_head control;
+
+ /* These are chunks that have been sacked but are above the
+ * CTSN, or cumulative tsn ack point.
+ */
+ struct list_head sacked;
+
+ /* Put chunks on this list to schedule them for
+ * retransmission.
+ */
+ struct list_head retransmit;
+
+ /* Put chunks on this list to save them for FWD TSN processing as
+ * they were abandoned.
+ */
+ struct list_head abandoned;
+
+ /* How many unackd bytes do we have in-flight? */
+ __u32 outstanding_bytes;
+
+ /* Corked? */
+ char cork;
+
+ /* Is this structure empty? */
+ char empty;
+
+ /* Are we kfree()able? */
+ char malloced;
+};
+
+void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
+void sctp_outq_teardown(struct sctp_outq *);
+void sctp_outq_free(struct sctp_outq*);
+int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk);
+int sctp_outq_flush(struct sctp_outq *, int);
+int sctp_outq_sack(struct sctp_outq *, struct sctp_sackhdr *);
+int sctp_outq_is_empty(const struct sctp_outq *);
+void sctp_outq_restart(struct sctp_outq *);
+
+void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
+ sctp_retransmit_reason_t);
+void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
+int sctp_outq_uncork(struct sctp_outq *);
+/* Uncork and flush an outqueue. */
+static inline void sctp_outq_cork(struct sctp_outq *q)
+{
+ q->cork = 1;
+}
+
+/* These bind address data fields common between endpoints and associations */
+struct sctp_bind_addr {
+
+ /* RFC 2960 12.1 Parameters necessary for the SCTP instance
+ *
+ * SCTP Port: The local SCTP port number the endpoint is
+ * bound to.
+ */
+ __u16 port;
+
+ /* RFC 2960 12.1 Parameters necessary for the SCTP instance
+ *
+ * Address List: The list of IP addresses that this instance
+ * has bound. This information is passed to one's
+ * peer(s) in INIT and INIT ACK chunks.
+ */
+ struct list_head address_list;
+
+ int malloced; /* Are we kfree()able? */
+};
+
+void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
+void sctp_bind_addr_free(struct sctp_bind_addr *);
+int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ sctp_scope_t scope, int gfp,int flags);
+int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
+ int gfp);
+int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
+int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
+ struct sctp_sock *);
+union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
+ const union sctp_addr *addrs,
+ int addrcnt,
+ struct sctp_sock *opt);
+union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
+ int *addrs_len, int gfp);
+int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
+ __u16 port, int gfp);
+
+sctp_scope_t sctp_scope(const union sctp_addr *);
+int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
+int sctp_is_any(const union sctp_addr *addr);
+int sctp_addr_is_valid(const union sctp_addr *addr);
+
+
+/* What type of endpoint? */
+typedef enum {
+ SCTP_EP_TYPE_SOCKET,
+ SCTP_EP_TYPE_ASSOCIATION,
+} sctp_endpoint_type_t;
+
+/*
+ * A common base class to bridge the implmentation view of a
+ * socket (usually listening) endpoint versus an association's
+ * local endpoint.
+ * This common structure is useful for several purposes:
+ * 1) Common interface for lookup routines.
+ * a) Subfunctions work for either endpoint or association
+ * b) Single interface to lookup allows hiding the lookup lock rather
+ * than acquiring it externally.
+ * 2) Common interface for the inbound chunk handling/state machine.
+ * 3) Common object handling routines for reference counting, etc.
+ * 4) Disentangle association lookup from endpoint lookup, where we
+ * do not have to find our endpoint to find our association.
+ *
+ */
+
+struct sctp_ep_common {
+ /* Fields to help us manage our entries in the hash tables. */
+ struct sctp_ep_common *next;
+ struct sctp_ep_common **pprev;
+ int hashent;
+
+ /* Runtime type information. What kind of endpoint is this? */
+ sctp_endpoint_type_t type;
+
+ /* Some fields to help us manage this object.
+ * refcnt - Reference count access to this object.
+ * dead - Do not attempt to use this object.
+ * malloced - Do we need to kfree this object?
+ */
+ atomic_t refcnt;
+ char dead;
+ char malloced;
+
+ /* What socket does this endpoint belong to? */
+ struct sock *sk;
+
+ /* This is where we receive inbound chunks. */
+ struct sctp_inq inqueue;
+
+ /* This substructure includes the defining parameters of the
+ * endpoint:
+ * bind_addr.port is our shared port number.
+ * bind_addr.address_list is our set of local IP addresses.
+ */
+ struct sctp_bind_addr bind_addr;
+
+ /* Protection during address list comparisons. */
+ rwlock_t addr_lock;
+};
+
+
+/* RFC Section 1.4 Key Terms
+ *
+ * o SCTP endpoint: The logical sender/receiver of SCTP packets. On a
+ * multi-homed host, an SCTP endpoint is represented to its peers as a
+ * combination of a set of eligible destination transport addresses to
+ * which SCTP packets can be sent and a set of eligible source
+ * transport addresses from which SCTP packets can be received.
+ * All transport addresses used by an SCTP endpoint must use the
+ * same port number, but can use multiple IP addresses. A transport
+ * address used by an SCTP endpoint must not be used by another
+ * SCTP endpoint. In other words, a transport address is unique
+ * to an SCTP endpoint.
+ *
+ * From an implementation perspective, each socket has one of these.
+ * A TCP-style socket will have exactly one association on one of
+ * these. An UDP-style socket will have multiple associations hanging
+ * off one of these.
+ */
+
+struct sctp_endpoint {
+ /* Common substructure for endpoint and association. */
+ struct sctp_ep_common base;
+
+ /* Associations: A list of current associations and mappings
+ * to the data consumers for each association. This
+ * may be in the form of a hash table or other
+ * implementation dependent structure. The data
+ * consumers may be process identification
+ * information such as file descriptors, named pipe
+ * pointer, or table pointers dependent on how SCTP
+ * is implemented.
+ */
+ /* This is really a list of struct sctp_association entries. */
+ struct list_head asocs;
+
+ /* Secret Key: A secret key used by this endpoint to compute
+ * the MAC. This SHOULD be a cryptographic quality
+ * random number with a sufficient length.
+ * Discussion in [RFC1750] can be helpful in
+ * selection of the key.
+ */
+ __u8 secret_key[SCTP_HOW_MANY_SECRETS][SCTP_SECRET_SIZE];
+ int current_key;
+ int last_key;
+ int key_changed_at;
+
+ /* Default timeouts. */
+ int timeouts[SCTP_NUM_TIMEOUT_TYPES];
+
+ /* Various thresholds. */
+
+ /* Name for debugging output... */
+ char *debug_name;
+};
+
+/* Recover the outter endpoint structure. */
+static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
+{
+ struct sctp_endpoint *ep;
+
+ ep = container_of(base, struct sctp_endpoint, base);
+ return ep;
+}
+
+/* These are function signatures for manipulating endpoints. */
+struct sctp_endpoint *sctp_endpoint_new(struct sock *, int);
+void sctp_endpoint_free(struct sctp_endpoint *);
+void sctp_endpoint_put(struct sctp_endpoint *);
+void sctp_endpoint_hold(struct sctp_endpoint *);
+void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
+struct sctp_association *sctp_endpoint_lookup_assoc(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr,
+ struct sctp_transport **);
+int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
+ const union sctp_addr *);
+struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
+ const union sctp_addr *);
+int sctp_has_association(const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
+
+int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
+ sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
+ struct sctp_chunk **err_chunk);
+int sctp_process_init(struct sctp_association *, sctp_cid_t cid,
+ const union sctp_addr *peer,
+ sctp_init_chunk_t *init, int gfp);
+__u32 sctp_generate_tag(const struct sctp_endpoint *);
+__u32 sctp_generate_tsn(const struct sctp_endpoint *);
+
+
+/* RFC2960
+ *
+ * 12. Recommended Transmission Control Block (TCB) Parameters
+ *
+ * This section details a recommended set of parameters that should
+ * be contained within the TCB for an implementation. This section is
+ * for illustrative purposes and should not be deemed as requirements
+ * on an implementation or as an exhaustive list of all parameters
+ * inside an SCTP TCB. Each implementation may need its own additional
+ * parameters for optimization.
+ */
+
+
+/* Here we have information about each individual association. */
+struct sctp_association {
+
+ /* A base structure common to endpoint and association.
+ * In this context, it represents the associations's view
+ * of the local endpoint of the association.
+ */
+ struct sctp_ep_common base;
+
+ /* Associations on the same socket. */
+ struct list_head asocs;
+
+ /* association id. */
+ sctp_assoc_t assoc_id;
+
+ /* This is our parent endpoint. */
+ struct sctp_endpoint *ep;
+
+ /* These are those association elements needed in the cookie. */
+ struct sctp_cookie c;
+
+ /* This is all information about our peer. */
+ struct {
+ /* rwnd
+ *
+ * Peer Rwnd : Current calculated value of the peer's rwnd.
+ */
+ __u32 rwnd;
+
+ /* transport_addr_list
+ *
+ * Peer : A list of SCTP transport addresses that the
+ * Transport : peer is bound to. This information is derived
+ * Address : from the INIT or INIT ACK and is used to
+ * List : associate an inbound packet with a given
+ * : association. Normally this information is
+ * : hashed or keyed for quick lookup and access
+ * : of the TCB.
+ *
+ * It is a list of SCTP_transport's.
+ */
+ struct list_head transport_addr_list;
+
+ /* port
+ * The transport layer port number.
+ */
+ __u16 port;
+
+ /* primary_path
+ *
+ * Primary : This is the current primary destination
+ * Path : transport address of the peer endpoint. It
+ * : may also specify a source transport address
+ * : on this endpoint.
+ *
+ * All of these paths live on transport_addr_list.
+ *
+ * At the bakeoffs, we discovered that the intent of
+ * primaryPath is that it only changes when the ULP
+ * asks to have it changed. We add the activePath to
+ * designate the connection we are currently using to
+ * transmit new data and most control chunks.
+ */
+ struct sctp_transport *primary_path;
+
+ /* Cache the primary path address here, when we
+ * need a an address for msg_name.
+ */
+ union sctp_addr primary_addr;
+
+ /* active_path
+ * The path that we are currently using to
+ * transmit new data and most control chunks.
+ */
+ struct sctp_transport *active_path;
+
+ /* retran_path
+ *
+ * RFC2960 6.4 Multi-homed SCTP Endpoints
+ * ...
+ * Furthermore, when its peer is multi-homed, an
+ * endpoint SHOULD try to retransmit a chunk to an
+ * active destination transport address that is
+ * different from the last destination address to
+ * which the DATA chunk was sent.
+ */
+ struct sctp_transport *retran_path;
+
+ /* Pointer to last transport I have sent on. */
+ struct sctp_transport *last_sent_to;
+
+ /* This is the last transport I have received DATA on. */
+ struct sctp_transport *last_data_from;
+
+ /*
+ * Mapping An array of bits or bytes indicating which out of
+ * Array order TSN's have been received (relative to the
+ * Last Rcvd TSN). If no gaps exist, i.e. no out of
+ * order packets have been received, this array
+ * will be set to all zero. This structure may be
+ * in the form of a circular buffer or bit array.
+ *
+ * Last Rcvd : This is the last TSN received in
+ * TSN : sequence. This value is set initially by
+ * : taking the peer's Initial TSN, received in
+ * : the INIT or INIT ACK chunk, and subtracting
+ * : one from it.
+ *
+ * Throughout most of the specification this is called the
+ * "Cumulative TSN ACK Point". In this case, we
+ * ignore the advice in 12.2 in favour of the term
+ * used in the bulk of the text. This value is hidden
+ * in tsn_map--we get it by calling sctp_tsnmap_get_ctsn().
+ */
+ struct sctp_tsnmap tsn_map;
+ __u8 _map[sctp_tsnmap_storage_size(SCTP_TSN_MAP_SIZE)];
+
+ /* Ack State : This flag indicates if the next received
+ * : packet is to be responded to with a
+ * : SACK. This is initializedto 0. When a packet
+ * : is received it is incremented. If this value
+ * : reaches 2 or more, a SACK is sent and the
+ * : value is reset to 0. Note: This is used only
+ * : when no DATA chunks are received out of
+ * : order. When DATA chunks are out of order,
+ * : SACK's are not delayed (see Section 6).
+ */
+ __u8 sack_needed; /* Do we need to sack the peer? */
+
+ /* These are capabilities which our peer advertised. */
+ __u8 ecn_capable; /* Can peer do ECN? */
+ __u8 ipv4_address; /* Peer understands IPv4 addresses? */
+ __u8 ipv6_address; /* Peer understands IPv6 addresses? */
+ __u8 hostname_address;/* Peer understands DNS addresses? */
+ __u8 asconf_capable; /* Does peer support ADDIP? */
+ __u8 prsctp_capable; /* Can peer do PR-SCTP? */
+
+ __u32 adaption_ind; /* Adaption Code point. */
+
+ /* This mask is used to disable sending the ASCONF chunk
+ * with specified parameter to peer.
+ */
+ __u16 addip_disabled_mask;
+
+ struct sctp_inithdr i;
+ int cookie_len;
+ void *cookie;
+
+ /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
+ * C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
+ * Initial TSN Value minus 1
+ */
+ __u32 addip_serial;
+ } peer;
+
+ /* State : A state variable indicating what state the
+ * : association is in, i.e. COOKIE-WAIT,
+ * : COOKIE-ECHOED, ESTABLISHED, SHUTDOWN-PENDING,
+ * : SHUTDOWN-SENT, SHUTDOWN-RECEIVED, SHUTDOWN-ACK-SENT.
+ *
+ * Note: No "CLOSED" state is illustrated since if a
+ * association is "CLOSED" its TCB SHOULD be removed.
+ *
+ * In this implementation we DO have a CLOSED
+ * state which is used during initiation and shutdown.
+ *
+ * State takes values from SCTP_STATE_*.
+ */
+ sctp_state_t state;
+
+ /* The cookie life I award for any cookie. */
+ struct timeval cookie_life;
+
+ /* Overall : The overall association error count.
+ * Error Count : [Clear this any time I get something.]
+ */
+ int overall_error_count;
+
+ /* These are the association's initial, max, and min RTO values.
+ * These values will be initialized by system defaults, but can
+ * be modified via the SCTP_RTOINFO socket option.
+ */
+ __u32 rto_initial;
+ __u32 rto_max;
+ __u32 rto_min;
+
+ /* Maximum number of new data packets that can be sent in a burst. */
+ int max_burst;
+
+ /* This is the max_retrans value for the association. This value will
+ * be initialized initialized from system defaults, but can be
+ * modified by the SCTP_ASSOCINFO socket option.
+ */
+ int max_retrans;
+
+ /* Maximum number of times the endpoint will retransmit INIT */
+ __u16 max_init_attempts;
+
+ /* How many times have we resent an INIT? */
+ __u16 init_retries;
+
+ /* The largest timeout or RTO value to use in attempting an INIT */
+ __u16 max_init_timeo;
+
+ int timeouts[SCTP_NUM_TIMEOUT_TYPES];
+ struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
+
+ /* Transport to which SHUTDOWN chunk was last sent. */
+ struct sctp_transport *shutdown_last_sent_to;
+
+ /* Next TSN : The next TSN number to be assigned to a new
+ * : DATA chunk. This is sent in the INIT or INIT
+ * : ACK chunk to the peer and incremented each
+ * : time a DATA chunk is assigned a TSN
+ * : (normally just prior to transmit or during
+ * : fragmentation).
+ */
+ __u32 next_tsn;
+
+ /*
+ * Last Rcvd : This is the last TSN received in sequence. This value
+ * TSN : is set initially by taking the peer's Initial TSN,
+ * : received in the INIT or INIT ACK chunk, and
+ * : subtracting one from it.
+ *
+ * Most of RFC 2960 refers to this as the Cumulative TSN Ack Point.
+ */
+
+ __u32 ctsn_ack_point;
+
+ /* PR-SCTP Advanced.Peer.Ack.Point */
+ __u32 adv_peer_ack_point;
+
+ /* Highest TSN that is acknowledged by incoming SACKs. */
+ __u32 highest_sacked;
+
+ /* The number of unacknowledged data chunks. Reported through
+ * the SCTP_STATUS sockopt.
+ */
+ __u16 unack_data;
+
+ /* This is the association's receive buffer space. This value is used
+ * to set a_rwnd field in an INIT or a SACK chunk.
+ */
+ __u32 rwnd;
+
+ /* This is the last advertised value of rwnd over a SACK chunk. */
+ __u32 a_rwnd;
+
+ /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
+ * to slop over a maximum of the association's frag_point.
+ */
+ __u32 rwnd_over;
+
+ /* This is the sndbuf size in use for the association.
+ * This corresponds to the sndbuf size for the association,
+ * as specified in the sk->sndbuf.
+ */
+ int sndbuf_used;
+
+ /* This is the wait queue head for send requests waiting on
+ * the association sndbuf space.
+ */
+ wait_queue_head_t wait;
+
+ /* Association : The smallest PMTU discovered for all of the
+ * PMTU : peer's transport addresses.
+ */
+ __u32 pmtu;
+
+ /* The message size at which SCTP fragmentation will occur. */
+ __u32 frag_point;
+
+ /* Currently only one counter is used to count INIT errors. */
+ int counters[SCTP_NUMBER_COUNTERS];
+
+ /* Default send parameters. */
+ __u16 default_stream;
+ __u16 default_flags;
+ __u32 default_ppid;
+ __u32 default_context;
+ __u32 default_timetolive;
+
+ /* This tracks outbound ssn for a given stream. */
+ struct sctp_ssnmap *ssnmap;
+
+ /* All outbound chunks go through this structure. */
+ struct sctp_outq outqueue;
+
+ /* A smart pipe that will handle reordering and fragmentation,
+ * as well as handle passing events up to the ULP.
+ */
+ struct sctp_ulpq ulpq;
+
+ /* Last TSN that caused an ECNE Chunk to be sent. */
+ __u32 last_ecne_tsn;
+
+ /* Last TSN that caused a CWR Chunk to be sent. */
+ __u32 last_cwr_tsn;
+
+ /* How many duplicated TSNs have we seen? */
+ int numduptsns;
+
+ /* Number of seconds of idle time before an association is closed. */
+ __u32 autoclose;
+
+ /* These are to support
+ * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
+ * and Enforcement of Flow and Message Limits"
+ * <draft-ietf-tsvwg-addip-sctp-02.txt>
+ * or "ADDIP" for short.
+ */
+
+
+
+ /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and
+ * unacknowledged at any one time. If a sender, after sending
+ * an ASCONF chunk, decides it needs to transfer another
+ * ASCONF Chunk, it MUST wait until the ASCONF-ACK Chunk
+ * returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side,
+ * so at any time two ASCONF may be in-transit on any given
+ * association (one sent from each endpoint).
+ *
+ * [This is our one-and-only-one ASCONF in flight. If we do
+ * not have an ASCONF in flight, this is NULL.]
+ */
+ struct sctp_chunk *addip_last_asconf;
+
+ /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
+ *
+ * IMPLEMENTATION NOTE: As an optimization a receiver may wish
+ * to save the last ASCONF-ACK for some predetermined period
+ * of time and instead of re-processing the ASCONF (with the
+ * same serial number) it may just re-transmit the
+ * ASCONF-ACK. It may wish to use the arrival of a new serial
+ * number to discard the previously saved ASCONF-ACK or any
+ * other means it may choose to expire the saved ASCONF-ACK.
+ *
+ * [This is our saved ASCONF-ACK. We invalidate it when a new
+ * ASCONF serial number arrives.]
+ */
+ struct sctp_chunk *addip_last_asconf_ack;
+
+ /* These ASCONF chunks are waiting to be sent.
+ *
+ * These chunaks can't be pushed to outqueue until receiving
+ * ASCONF_ACK for the previous ASCONF indicated by
+ * addip_last_asconf, so as to guarantee that only one ASCONF
+ * is in flight at any time.
+ *
+ * ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * In defining the ASCONF Chunk transfer procedures, it is
+ * essential that these transfers MUST NOT cause congestion
+ * within the network. To achieve this, we place these
+ * restrictions on the transfer of ASCONF Chunks:
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and
+ * unacknowledged at any one time. If a sender, after sending
+ * an ASCONF chunk, decides it needs to transfer another
+ * ASCONF Chunk, it MUST wait until the ASCONF-ACK Chunk
+ * returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side,
+ * so at any time two ASCONF may be in-transit on any given
+ * association (one sent from each endpoint).
+ *
+ *
+ * [I really think this is EXACTLY the sort of intelligence
+ * which already resides in sctp_outq. Please move this
+ * queue and its supporting logic down there. --piggy]
+ */
+ struct sk_buff_head addip_chunks;
+
+ /* ADDIP Section 4.1 ASCONF Chunk Procedures
+ *
+ * A2) A serial number should be assigned to the Chunk. The
+ * serial number SHOULD be a monotonically increasing
+ * number. The serial number SHOULD be initialized at
+ * the start of the association to the same value as the
+ * Initial TSN and every time a new ASCONF chunk is created
+ * it is incremented by one after assigning the serial number
+ * to the newly created chunk.
+ *
+ * ADDIP
+ * 3.1.1 Address/Stream Configuration Change Chunk (ASCONF)
+ *
+ * Serial Number : 32 bits (unsigned integer)
+ *
+ * This value represents a Serial Number for the ASCONF
+ * Chunk. The valid range of Serial Number is from 0 to
+ * 4294967295 (2^32 - 1). Serial Numbers wrap back to 0
+ * after reaching 4294967295.
+ */
+ __u32 addip_serial;
+
+ /* Need to send an ECNE Chunk? */
+ char need_ecne;
+
+ /* Is it a temporary association? */
+ char temp;
+};
+
+
+/* An eyecatcher for determining if we are really looking at an
+ * association data structure.
+ */
+enum {
+ SCTP_ASSOC_EYECATCHER = 0xa550c123,
+};
+
+/* Recover the outter association structure. */
+static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
+{
+ struct sctp_association *asoc;
+
+ asoc = container_of(base, struct sctp_association, base);
+ return asoc;
+}
+
+/* These are function signatures for manipulating associations. */
+
+
+struct sctp_association *
+sctp_association_new(const struct sctp_endpoint *, const struct sock *,
+ sctp_scope_t scope, int gfp);
+void sctp_association_free(struct sctp_association *);
+void sctp_association_put(struct sctp_association *);
+void sctp_association_hold(struct sctp_association *);
+
+struct sctp_transport *sctp_assoc_choose_shutdown_transport(
+ struct sctp_association *);
+void sctp_assoc_update_retran_path(struct sctp_association *);
+struct sctp_transport *sctp_assoc_lookup_paddr(const struct sctp_association *,
+ const union sctp_addr *);
+int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
+ const union sctp_addr *laddr);
+struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
+ const union sctp_addr *address,
+ const int gfp);
+void sctp_assoc_del_peer(struct sctp_association *asoc,
+ const union sctp_addr *addr);
+void sctp_assoc_control_transport(struct sctp_association *,
+ struct sctp_transport *,
+ sctp_transport_cmd_t, sctp_sn_error_t);
+struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
+struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
+ const union sctp_addr *,
+ const union sctp_addr *);
+void sctp_assoc_migrate(struct sctp_association *, struct sock *);
+void sctp_assoc_update(struct sctp_association *old,
+ struct sctp_association *new);
+
+__u32 sctp_association_get_next_tsn(struct sctp_association *);
+
+void sctp_assoc_sync_pmtu(struct sctp_association *);
+void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned);
+void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned);
+void sctp_assoc_set_primary(struct sctp_association *,
+ struct sctp_transport *);
+int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, int);
+int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
+ struct sctp_cookie*, int gfp);
+
+int sctp_cmp_addr_exact(const union sctp_addr *ss1,
+ const union sctp_addr *ss2);
+struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc);
+
+/* A convenience structure to parse out SCTP specific CMSGs. */
+typedef struct sctp_cmsgs {
+ struct sctp_initmsg *init;
+ struct sctp_sndrcvinfo *info;
+} sctp_cmsgs_t;
+
+/* Structure for tracking memory objects */
+typedef struct {
+ char *label;
+ atomic_t *counter;
+} sctp_dbg_objcnt_entry_t;
+
+#endif /* __sctp_structs_h__ */
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
new file mode 100644
index 000000000000..021947da70ea
--- /dev/null
+++ b/include/net/sctp/tsnmap.h
@@ -0,0 +1,207 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * These are the definitions needed for the tsnmap type. The tsnmap is used
+ * to track out of order TSNs received.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+#include <net/sctp/constants.h>
+
+#ifndef __sctp_tsnmap_h__
+#define __sctp_tsnmap_h__
+
+/* RFC 2960 12.2 Parameters necessary per association (i.e. the TCB)
+ * Mapping An array of bits or bytes indicating which out of
+ * Array order TSN's have been received (relative to the
+ * Last Rcvd TSN). If no gaps exist, i.e. no out of
+ * order packets have been received, this array
+ * will be set to all zero. This structure may be
+ * in the form of a circular buffer or bit array.
+ */
+struct sctp_tsnmap {
+ /* This array counts the number of chunks with each TSN.
+ * It points at one of the two buffers with which we will
+ * ping-pong between.
+ */
+ __u8 *tsn_map;
+
+ /* This marks the tsn which overflows the tsn_map, when the
+ * cumulative ack point reaches this point we know we can switch
+ * maps (tsn_map and overflow_map swap).
+ */
+ __u32 overflow_tsn;
+
+ /* This is the overflow array for tsn_map.
+ * It points at one of the other ping-pong buffers.
+ */
+ __u8 *overflow_map;
+
+ /* This is the TSN at tsn_map[0]. */
+ __u32 base_tsn;
+
+ /* Last Rcvd : This is the last TSN received in
+ * TSN : sequence. This value is set initially by
+ * : taking the peer's Initial TSN, received in
+ * : the INIT or INIT ACK chunk, and subtracting
+ * : one from it.
+ *
+ * Throughout most of the specification this is called the
+ * "Cumulative TSN ACK Point". In this case, we
+ * ignore the advice in 12.2 in favour of the term
+ * used in the bulk of the text.
+ */
+ __u32 cumulative_tsn_ack_point;
+
+ /* This is the minimum number of TSNs we can track. This corresponds
+ * to the size of tsn_map. Note: the overflow_map allows us to
+ * potentially track more than this quantity.
+ */
+ __u16 len;
+
+ /* This is the highest TSN we've marked. */
+ __u32 max_tsn_seen;
+
+ /* Data chunks pending receipt. used by SCTP_STATUS sockopt */
+ __u16 pending_data;
+
+ /* Record duplicate TSNs here. We clear this after
+ * every SACK. Store up to SCTP_MAX_DUP_TSNS worth of
+ * information.
+ */
+ __u32 dup_tsns[SCTP_MAX_DUP_TSNS];
+ __u16 num_dup_tsns;
+
+ /* Record gap ack block information here. */
+ struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+
+ int malloced;
+
+ __u8 raw_map[0];
+};
+
+struct sctp_tsnmap_iter {
+ __u32 start;
+};
+
+/* This macro assists in creation of external storage for variable length
+ * internal buffers. We double allocate so the overflow map works.
+ */
+#define sctp_tsnmap_storage_size(count) (sizeof(__u8) * (count) * 2)
+
+/* Initialize a block of memory as a tsnmap. */
+struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len,
+ __u32 initial_tsn);
+
+/* Test the tracking state of this TSN.
+ * Returns:
+ * 0 if the TSN has not yet been seen
+ * >0 if the TSN has been seen (duplicate)
+ * <0 if the TSN is invalid (too large to track)
+ */
+int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
+
+/* Mark this TSN as seen. */
+void sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+
+/* Mark this TSN and all lower as seen. */
+void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
+
+/* Retrieve the Cumulative TSN ACK Point. */
+static inline __u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
+{
+ return map->cumulative_tsn_ack_point;
+}
+
+/* Retrieve the highest TSN we've seen. */
+static inline __u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map)
+{
+ return map->max_tsn_seen;
+}
+
+/* How many duplicate TSNs are stored? */
+static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map)
+{
+ return map->num_dup_tsns;
+}
+
+/* Return pointer to duplicate tsn array as needed by SACK. */
+static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
+{
+ map->num_dup_tsns = 0;
+ return map->dup_tsns;
+}
+
+/* How many gap ack blocks do we have recorded? */
+__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map);
+
+/* Refresh the count on pending data. */
+__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map);
+
+/* Return pointer to gap ack blocks as needed by SACK. */
+static inline struct sctp_gap_ack_block *sctp_tsnmap_get_gabs(struct sctp_tsnmap *map)
+{
+ return map->gabs;
+}
+
+/* Is there a gap in the TSN map? */
+static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
+{
+ int has_gap;
+
+ has_gap = (map->cumulative_tsn_ack_point != map->max_tsn_seen);
+ return has_gap;
+}
+
+/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
+ * information.
+ */
+static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
+{
+ if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
+ map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
+}
+
+/* Renege a TSN that was seen. */
+void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
+
+/* Is there a gap in the TSN map? */
+int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
+
+#endif /* __sctp_tsnmap_h__ */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
new file mode 100644
index 000000000000..1019d83a580a
--- /dev/null
+++ b/include/net/sctp/ulpevent.h
@@ -0,0 +1,162 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These are the definitions needed for the sctp_ulpevent type. The
+ * sctp_ulpevent type is used to carry information from the state machine
+ * upwards to the ULP.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#ifndef __sctp_ulpevent_h__
+#define __sctp_ulpevent_h__
+
+/* A structure to carry information to the ULP (e.g. Sockets API) */
+/* Warning: This sits inside an skb.cb[] area. Be very careful of
+ * growing this structure as it is at the maximum limit now.
+ */
+struct sctp_ulpevent {
+ struct sctp_association *asoc;
+ __u16 stream;
+ __u16 ssn;
+ __u16 flags;
+ __u32 ppid;
+ __u32 tsn;
+ __u32 cumtsn;
+ int msg_flags;
+ int iif;
+};
+
+/* Retrieve the skb this event sits inside of. */
+static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
+{
+ return container_of((void *)ev, struct sk_buff, cb);
+}
+
+/* Retrieve & cast the event sitting inside the skb. */
+static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
+{
+ return (struct sctp_ulpevent *)skb->cb;
+}
+
+void sctp_ulpevent_free(struct sctp_ulpevent *);
+int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
+void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ __u16 state,
+ __u16 error,
+ __u16 outbound,
+ __u16 inbound,
+ int gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
+ const struct sctp_association *asoc,
+ const struct sockaddr_storage *aaddr,
+ int flags,
+ int state,
+ int error,
+ int gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ int gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ int gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ int gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
+ const struct sctp_association *asoc,
+ __u32 indication, int gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication(
+ const struct sctp_association *asoc, int gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ int gfp);
+
+void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *);
+__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+
+/* Is this event type enabled? */
+static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
+ struct sctp_event_subscribe *mask)
+{
+ char *amask = (char *) mask;
+ return amask[sn_type - SCTP_SN_TYPE_BASE];
+}
+
+/* Given an event subscription, is this event enabled? */
+static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ struct sctp_event_subscribe *mask)
+{
+ __u16 sn_type;
+ int enabled = 1;
+
+ if (sctp_ulpevent_is_notification(event)) {
+ sn_type = sctp_ulpevent_get_notification_type(event);
+ enabled = sctp_ulpevent_type_enabled(sn_type, mask);
+ }
+ return enabled;
+}
+
+#endif /* __sctp_ulpevent_h__ */
+
+
+
+
+
+
+
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
new file mode 100644
index 000000000000..961736d29d21
--- /dev/null
+++ b/include/net/sctp/ulpqueue.h
@@ -0,0 +1,92 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These are the definitions needed for the sctp_ulpq type. The
+ * sctp_ulpq is the interface between the Upper Layer Protocol, or ULP,
+ * and the core SCTP state machine. This is the component which handles
+ * reassembly and ordering.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * the SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#ifndef __sctp_ulpqueue_h__
+#define __sctp_ulpqueue_h__
+
+/* A structure to carry information to the ULP (e.g. Sockets API) */
+struct sctp_ulpq {
+ char malloced;
+ char pd_mode;
+ struct sctp_association *asoc;
+ struct sk_buff_head reasm;
+ struct sk_buff_head lobby;
+};
+
+/* Prototypes. */
+struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
+ struct sctp_association *);
+void sctp_ulpq_free(struct sctp_ulpq *);
+
+/* Add a new DATA chunk for processing. */
+int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int);
+
+/* Add a new event for propagation to the ULP. */
+int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
+
+/* Renege previously received chunks. */
+void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, int);
+
+/* Perform partial delivery. */
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int);
+
+/* Abort the partial delivery. */
+void sctp_ulpq_abort_pd(struct sctp_ulpq *, int);
+
+/* Clear the partial data delivery condition on this socket. */
+int sctp_clear_pd(struct sock *sk);
+
+/* Skip over an SSN. */
+void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
+
+#endif /* __sctp_ulpqueue_h__ */
+
+
+
+
+
+
+
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
new file mode 100644
index 000000000000..2758e8ce4f25
--- /dev/null
+++ b/include/net/sctp/user.h
@@ -0,0 +1,589 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2002 Intel Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * This header represents the structures and constants needed to support
+ * the SCTP Extension to the Sockets API.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * R. Stewart <randall@sctp.chicago.il.us>
+ * K. Morneau <kmorneau@cisco.com>
+ * Q. Xie <qxie1@email.mot.com>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#ifndef __net_sctp_user_h__
+#define __net_sctp_user_h__
+
+#include <linux/types.h>
+#include <linux/socket.h>
+
+typedef __s32 sctp_assoc_t;
+
+/* The following symbols come from the Sockets API Extensions for
+ * SCTP <draft-ietf-tsvwg-sctpsocket-07.txt>.
+ */
+enum sctp_optname {
+ SCTP_RTOINFO,
+#define SCTP_RTOINFO SCTP_RTOINFO
+ SCTP_ASSOCINFO,
+#define SCTP_ASSOCINFO SCTP_ASSOCINFO
+ SCTP_INITMSG,
+#define SCTP_INITMSG SCTP_INITMSG
+ SCTP_NODELAY, /* Get/set nodelay option. */
+#define SCTP_NODELAY SCTP_NODELAY
+ SCTP_AUTOCLOSE,
+#define SCTP_AUTOCLOSE SCTP_AUTOCLOSE
+ SCTP_SET_PEER_PRIMARY_ADDR,
+#define SCTP_SET_PEER_PRIMARY_ADDR SCTP_SET_PEER_PRIMARY_ADDR
+ SCTP_PRIMARY_ADDR,
+#define SCTP_PRIMARY_ADDR SCTP_PRIMARY_ADDR
+ SCTP_ADAPTION_LAYER,
+#define SCTP_ADAPTION_LAYER SCTP_ADAPTION_LAYER
+ SCTP_DISABLE_FRAGMENTS,
+#define SCTP_DISABLE_FRAGMENTS SCTP_DISABLE_FRAGMENTS
+ SCTP_PEER_ADDR_PARAMS,
+#define SCTP_PEER_ADDR_PARAMS SCTP_PEER_ADDR_PARAMS
+ SCTP_DEFAULT_SEND_PARAM,
+#define SCTP_DEFAULT_SEND_PARAM SCTP_DEFAULT_SEND_PARAM
+ SCTP_EVENTS,
+#define SCTP_EVENTS SCTP_EVENTS
+ SCTP_I_WANT_MAPPED_V4_ADDR, /* Turn on/off mapped v4 addresses */
+#define SCTP_I_WANT_MAPPED_V4_ADDR SCTP_I_WANT_MAPPED_V4_ADDR
+ SCTP_MAXSEG, /* Get/set maximum fragment. */
+#define SCTP_MAXSEG SCTP_MAXSEG
+ SCTP_STATUS,
+#define SCTP_STATUS SCTP_STATUS
+ SCTP_GET_PEER_ADDR_INFO,
+#define SCTP_GET_PEER_ADDR_INFO SCTP_GET_PEER_ADDR_INFO
+
+ /* Internal Socket Options. Some of the sctp library functions are
+ * implemented using these socket options.
+ */
+ SCTP_SOCKOPT_BINDX_ADD = 100,/* BINDX requests for adding addresses. */
+#define SCTP_SOCKOPT_BINDX_ADD SCTP_SOCKOPT_BINDX_ADD
+ SCTP_SOCKOPT_BINDX_REM, /* BINDX requests for removing addresses. */
+#define SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_BINDX_REM
+ SCTP_SOCKOPT_PEELOFF, /* peel off association. */
+#define SCTP_SOCKOPT_PEELOFF SCTP_SOCKOPT_PEELOFF
+ SCTP_GET_PEER_ADDRS_NUM, /* Get number of peer addresss. */
+#define SCTP_GET_PEER_ADDRS_NUM SCTP_GET_PEER_ADDRS_NUM
+ SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */
+#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS
+ SCTP_GET_LOCAL_ADDRS_NUM, /* Get number of local addresss. */
+#define SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS_NUM
+ SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */
+#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS
+};
+
+/*
+ * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
+ *
+ * This cmsghdr structure provides information for initializing new
+ * SCTP associations with sendmsg(). The SCTP_INITMSG socket option
+ * uses this same data structure. This structure is not used for
+ * recvmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ----------------------
+ * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
+ *
+ */
+struct sctp_initmsg {
+ __u16 sinit_num_ostreams;
+ __u16 sinit_max_instreams;
+ __u16 sinit_max_attempts;
+ __u16 sinit_max_init_timeo;
+};
+
+/*
+ * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+ *
+ * This cmsghdr structure specifies SCTP options for sendmsg() and
+ * describes SCTP header information about a received message through
+ * recvmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ----------------------
+ * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
+ *
+ */
+struct sctp_sndrcvinfo {
+ __u16 sinfo_stream;
+ __u16 sinfo_ssn;
+ __u16 sinfo_flags;
+ __u32 sinfo_ppid;
+ __u32 sinfo_context;
+ __u32 sinfo_timetolive;
+ __u32 sinfo_tsn;
+ __u32 sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+};
+
+/*
+ * sinfo_flags: 16 bits (unsigned integer)
+ *
+ * This field may contain any of the following flags and is composed of
+ * a bitwise OR of these values.
+ */
+
+enum sctp_sinfo_flags {
+ MSG_UNORDERED = 1, /* Send/receive message unordered. */
+ MSG_ADDR_OVER = 2, /* Override the primary destination. */
+ MSG_ABORT=4, /* Send an ABORT message to the peer. */
+ /* MSG_EOF is already defined per socket.h */
+};
+
+
+typedef union {
+ __u8 raw;
+ struct sctp_initmsg init;
+ struct sctp_sndrcvinfo sndrcv;
+} sctp_cmsg_data_t;
+
+/* These are cmsg_types. */
+typedef enum sctp_cmsg_type {
+ SCTP_INIT, /* 5.2.1 SCTP Initiation Structure */
+ SCTP_SNDRCV, /* 5.2.2 SCTP Header Information Structure */
+} sctp_cmsg_t;
+
+
+/*
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * Communication notifications inform the ULP that an SCTP association
+ * has either begun or ended. The identifier for a new association is
+ * provided by this notificaion. The notification information has the
+ * following format:
+ *
+ */
+struct sctp_assoc_change {
+ __u16 sac_type;
+ __u16 sac_flags;
+ __u32 sac_length;
+ __u16 sac_state;
+ __u16 sac_error;
+ __u16 sac_outbound_streams;
+ __u16 sac_inbound_streams;
+ sctp_assoc_t sac_assoc_id;
+};
+
+/*
+ * sac_state: 32 bits (signed integer)
+ *
+ * This field holds one of a number of values that communicate the
+ * event that happened to the association. They include:
+ *
+ * Note: The following state names deviate from the API draft as
+ * the names clash too easily with other kernel symbols.
+ */
+enum sctp_sac_state {
+ SCTP_COMM_UP,
+ SCTP_COMM_LOST,
+ SCTP_RESTART,
+ SCTP_SHUTDOWN_COMP,
+ SCTP_CANT_STR_ASSOC,
+};
+
+/*
+ * 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * When a destination address on a multi-homed peer encounters a change
+ * an interface details event is sent. The information has the
+ * following structure:
+ */
+struct sctp_paddr_change {
+ __u16 spc_type;
+ __u16 spc_flags;
+ __u32 spc_length;
+ struct sockaddr_storage spc_aaddr;
+ int spc_state;
+ int spc_error;
+ sctp_assoc_t spc_assoc_id;
+};
+
+/*
+ * spc_state: 32 bits (signed integer)
+ *
+ * This field holds one of a number of values that communicate the
+ * event that happened to the address. They include:
+ */
+enum sctp_spc_state {
+ SCTP_ADDR_AVAILABLE,
+ SCTP_ADDR_UNREACHABLE,
+ SCTP_ADDR_REMOVED,
+ SCTP_ADDR_ADDED,
+ SCTP_ADDR_MADE_PRIM,
+};
+
+
+/*
+ * 5.3.1.3 SCTP_REMOTE_ERROR
+ *
+ * A remote peer may send an Operational Error message to its peer.
+ * This message indicates a variety of error conditions on an
+ * association. The entire error TLV as it appears on the wire is
+ * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP
+ * specification [SCTP] and any extensions for a list of possible
+ * error formats. SCTP error TLVs have the format:
+ */
+struct sctp_remote_error {
+ __u16 sre_type;
+ __u16 sre_flags;
+ __u32 sre_length;
+ __u16 sre_error;
+ sctp_assoc_t sre_assoc_id;
+ __u8 sre_data[0];
+};
+
+
+/*
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * If SCTP cannot deliver a message it may return the message as a
+ * notification.
+ */
+struct sctp_send_failed {
+ __u16 ssf_type;
+ __u16 ssf_flags;
+ __u32 ssf_length;
+ __u32 ssf_error;
+ struct sctp_sndrcvinfo ssf_info;
+ sctp_assoc_t ssf_assoc_id;
+ __u8 ssf_data[0];
+};
+
+/*
+ * ssf_flags: 16 bits (unsigned integer)
+ *
+ * The flag value will take one of the following values
+ *
+ * SCTP_DATA_UNSENT - Indicates that the data was never put on
+ * the wire.
+ *
+ * SCTP_DATA_SENT - Indicates that the data was put on the wire.
+ * Note that this does not necessarily mean that the
+ * data was (or was not) successfully delivered.
+ */
+enum sctp_ssf_flags {
+ SCTP_DATA_UNSENT,
+ SCTP_DATA_SENT,
+};
+
+/*
+ * 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ *
+ * When a peer sends a SHUTDOWN, SCTP delivers this notification to
+ * inform the application that it should cease sending data.
+ */
+struct sctp_shutdown_event {
+ __u16 sse_type;
+ __u16 sse_flags;
+ __u32 sse_length;
+ sctp_assoc_t sse_assoc_id;
+};
+
+/*
+ * 5.3.1.6 SCTP_ADAPTION_INDICATION
+ *
+ * When a peer sends a Adaption Layer Indication parameter , SCTP
+ * delivers this notification to inform the application
+ * that of the peers requested adaption layer.
+ */
+struct sctp_adaption_event {
+ __u16 sai_type;
+ __u16 sai_flags;
+ __u32 sai_length;
+ __u32 sai_adaption_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+/*
+ * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
+ *
+ * When a receiver is engaged in a partial delivery of a
+ * message this notification will be used to indicate
+ * various events.
+ */
+struct sctp_pdapi_event {
+ __u16 pdapi_type;
+ __u16 pdapi_flags;
+ __u32 pdapi_length;
+ __u32 pdapi_indication;
+ sctp_assoc_t pdapi_assoc_id;
+};
+
+enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
+
+/*
+ * Described in Section 7.3
+ * Ancillary Data and Notification Interest Options
+ */
+struct sctp_event_subscribe {
+ __u8 sctp_data_io_event;
+ __u8 sctp_association_event;
+ __u8 sctp_address_event;
+ __u8 sctp_send_failure_event;
+ __u8 sctp_peer_error_event;
+ __u8 sctp_shutdown_event;
+ __u8 sctp_partial_delivery_event;
+ __u8 sctp_adaption_layer_event;
+};
+
+/*
+ * 5.3.1 SCTP Notification Structure
+ *
+ * The notification structure is defined as the union of all
+ * notification types.
+ *
+ */
+union sctp_notification {
+ struct {
+ __u16 sn_type; /* Notification type. */
+ __u16 sn_flags;
+ __u32 sn_length;
+ } sn_header;
+ struct sctp_assoc_change sn_assoc_change;
+ struct sctp_paddr_change sn_paddr_change;
+ struct sctp_remote_error sn_remote_error;
+ struct sctp_send_failed sn_send_failed;
+ struct sctp_shutdown_event sn_shutdown_event;
+ struct sctp_adaption_event sn_adaption_event;
+ struct sctp_pdapi_event sn_pdapi_event;
+};
+
+/* Section 5.3.1
+ * All standard values for sn_type flags are greater than 2^15.
+ * Values from 2^15 and down are reserved.
+ */
+
+enum sctp_sn_type {
+ SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_ASSOC_CHANGE,
+ SCTP_PEER_ADDR_CHANGE,
+ SCTP_SEND_FAILED,
+ SCTP_REMOTE_ERROR,
+ SCTP_SHUTDOWN_EVENT,
+ SCTP_PARTIAL_DELIVERY_EVENT,
+ SCTP_ADAPTION_INDICATION,
+};
+
+/* Notification error codes used to fill up the error fields in some
+ * notifications.
+ * SCTP_PEER_ADDRESS_CHAGE : spc_error
+ * SCTP_ASSOC_CHANGE : sac_error
+ * These names should be potentially included in the draft 04 of the SCTP
+ * sockets API specification.
+ */
+typedef enum sctp_sn_error {
+ SCTP_FAILED_THRESHOLD,
+ SCTP_RECEIVED_SACK,
+ SCTP_HEARTBEAT_SUCCESS,
+ SCTP_RESPONSE_TO_USER_REQ,
+ SCTP_INTERNAL_ERROR,
+ SCTP_SHUTDOWN_GUARD_EXPIRES,
+ SCTP_PEER_FAULTY,
+} sctp_sn_error_t;
+
+/*
+ * 7.1.1 Retransmission Timeout Parameters (SCTP_RTOINFO)
+ *
+ * The protocol parameters used to initialize and bound retransmission
+ * timeout (RTO) are tunable. See [SCTP] for more information on how
+ * these parameters are used in RTO calculation.
+ */
+struct sctp_rtoinfo {
+ sctp_assoc_t srto_assoc_id;
+ __u32 srto_initial;
+ __u32 srto_max;
+ __u32 srto_min;
+};
+
+/*
+ * 7.1.2 Association Parameters (SCTP_ASSOCINFO)
+ *
+ * This option is used to both examine and set various association and
+ * endpoint parameters.
+ */
+struct sctp_assocparams {
+ sctp_assoc_t sasoc_assoc_id;
+ __u16 sasoc_asocmaxrxt;
+ __u16 sasoc_number_peer_destinations;
+ __u32 sasoc_peer_rwnd;
+ __u32 sasoc_local_rwnd;
+ __u32 sasoc_cookie_life;
+};
+
+/*
+ * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
+ *
+ * Requests that the peer mark the enclosed address as the association
+ * primary. The enclosed address must be one of the association's
+ * locally bound addresses. The following structure is used to make a
+ * set primary request:
+ */
+struct sctp_setpeerprim {
+ sctp_assoc_t sspp_assoc_id;
+ struct sockaddr_storage sspp_addr;
+};
+
+/*
+ * 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
+ *
+ * Requests that the local SCTP stack use the enclosed peer address as
+ * the association primary. The enclosed address must be one of the
+ * association peer's addresses. The following structure is used to
+ * make a set peer primary request:
+ */
+struct sctp_prim {
+ sctp_assoc_t ssp_assoc_id;
+ struct sockaddr_storage ssp_addr;
+};
+
+/*
+ * 7.1.11 Set Adaption Layer Indicator (SCTP_ADAPTION_LAYER)
+ *
+ * Requests that the local endpoint set the specified Adaption Layer
+ * Indication parameter for all future INIT and INIT-ACK exchanges.
+ */
+struct sctp_setadaption {
+ __u32 ssb_adaption_ind;
+};
+
+/*
+ * 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
+ *
+ * Applications can enable or disable heartbeats for any peer address
+ * of an association, modify an address's heartbeat interval, force a
+ * heartbeat to be sent immediately, and adjust the address's maximum
+ * number of retransmissions sent before an address is considered
+ * unreachable. The following structure is used to access and modify an
+ * address's parameters:
+ */
+struct sctp_paddrparams {
+ sctp_assoc_t spp_assoc_id;
+ struct sockaddr_storage spp_address;
+ __u32 spp_hbinterval;
+ __u16 spp_pathmaxrxt;
+};
+
+/*
+ * 7.2.2 Peer Address Information
+ *
+ * Applications can retrieve information about a specific peer address
+ * of an association, including its reachability state, congestion
+ * window, and retransmission timer values. This information is
+ * read-only. The following structure is used to access this
+ * information:
+ */
+struct sctp_paddrinfo {
+ sctp_assoc_t spinfo_assoc_id;
+ struct sockaddr_storage spinfo_address;
+ __s32 spinfo_state;
+ __u32 spinfo_cwnd;
+ __u32 spinfo_srtt;
+ __u32 spinfo_rto;
+ __u32 spinfo_mtu;
+};
+
+/* Peer addresses's state. */
+enum sctp_spinfo_state {
+ SCTP_INACTIVE,
+ SCTP_ACTIVE,
+};
+
+/*
+ * 7.2.1 Association Status (SCTP_STATUS)
+ *
+ * Applications can retrieve current status information about an
+ * association, including association state, peer receiver window size,
+ * number of unacked data chunks, and number of data chunks pending
+ * receipt. This information is read-only. The following structure is
+ * used to access this information:
+ */
+struct sctp_status {
+ sctp_assoc_t sstat_assoc_id;
+ __s32 sstat_state;
+ __u32 sstat_rwnd;
+ __u16 sstat_unackdata;
+ __u16 sstat_penddata;
+ __u16 sstat_instrms;
+ __u16 sstat_outstrms;
+ __u32 sstat_fragmentation_point;
+ struct sctp_paddrinfo sstat_primary;
+};
+
+/*
+ * 8.3, 8.5 get all peer/local addresses in an association.
+ * This parameter struct is used by SCTP_GET_PEER_ADDRS and
+ * SCTP_GET_LOCAL_ADDRS socket options used internally to implement
+ * sctp_getpaddrs() and sctp_getladdrs() API.
+ */
+struct sctp_getaddrs {
+ sctp_assoc_t assoc_id;
+ int addr_num;
+ struct sockaddr __user *addrs;
+};
+
+/* These are bit fields for msghdr->msg_flags. See section 5.1. */
+/* On user space Linux, these live in <bits/socket.h> as an enum. */
+enum sctp_msg_flags {
+ MSG_NOTIFICATION = 0x8000,
+#define MSG_NOTIFICATION MSG_NOTIFICATION
+};
+
+/*
+ * 8.1 sctp_bindx()
+ *
+ * The flags parameter is formed from the bitwise OR of zero or more of the
+ * following currently defined flags:
+ */
+#define SCTP_BINDX_ADD_ADDR 0x01
+#define SCTP_BINDX_REM_ADDR 0x02
+
+/* This is the structure that is passed as an argument(optval) to
+ * getsockopt(SCTP_SOCKOPT_PEELOFF).
+ */
+typedef struct {
+ sctp_assoc_t associd;
+ int sd;
+} sctp_peeloff_arg_t;
+
+#endif /* __net_sctp_user_h__ */
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
new file mode 100644
index 000000000000..0b2c2784f333
--- /dev/null
+++ b/include/net/slhc_vj.h
@@ -0,0 +1,188 @@
+#ifndef _SLHC_H
+#define _SLHC_H
+/*
+ * Definitions for tcp compression routines.
+ *
+ * $Header: slcompress.h,v 1.10 89/12/31 08:53:02 van Exp $
+ *
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ *
+ * modified for KA9Q Internet Software Package by
+ * Katie Stevens (dkstevens@ucdavis.edu)
+ * University of California, Davis
+ * Computing Services
+ * - 01-31-90 initial adaptation
+ *
+ * - Feb 1991 Bill_Simpson@um.cc.umich.edu
+ * variable number of conversation slots
+ * allow zero or one slots
+ * separate routines
+ * status display
+ */
+
+/*
+ * Compressed packet format:
+ *
+ * The first octet contains the packet type (top 3 bits), TCP
+ * 'push' bit, and flags that indicate which of the 4 TCP sequence
+ * numbers have changed (bottom 5 bits). The next octet is a
+ * conversation number that associates a saved IP/TCP header with
+ * the compressed packet. The next two octets are the TCP checksum
+ * from the original datagram. The next 0 to 15 octets are
+ * sequence number changes, one change per bit set in the header
+ * (there may be no changes and there are two special cases where
+ * the receiver implicitly knows what changed -- see below).
+ *
+ * There are 5 numbers which can change (they are always inserted
+ * in the following order): TCP urgent pointer, window,
+ * acknowledgment, sequence number and IP ID. (The urgent pointer
+ * is different from the others in that its value is sent, not the
+ * change in value.) Since typical use of SLIP links is biased
+ * toward small packets (see comments on MTU/MSS below), changes
+ * use a variable length coding with one octet for numbers in the
+ * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
+ * range 256 - 65535 or 0. (If the change in sequence number or
+ * ack is more than 65535, an uncompressed packet is sent.)
+ */
+
+/*
+ * Packet types (must not conflict with IP protocol version)
+ *
+ * The top nibble of the first octet is the packet type. There are
+ * three possible types: IP (not proto TCP or tcp with one of the
+ * control flags set); uncompressed TCP (a normal IP/TCP packet but
+ * with the 8-bit protocol field replaced by an 8-bit connection id --
+ * this type of packet syncs the sender & receiver); and compressed
+ * TCP (described above).
+ *
+ * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
+ * is logically part of the 4-bit "changes" field that follows. Top
+ * three bits are actual packet type. For backward compatibility
+ * and in the interest of conserving bits, numbers are chosen so the
+ * IP protocol version number (4) which normally appears in this nibble
+ * means "IP packet".
+ */
+
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+/* SLIP compression masks for len/vers byte */
+#define SL_TYPE_IP 0x40
+#define SL_TYPE_UNCOMPRESSED_TCP 0x70
+#define SL_TYPE_COMPRESSED_TCP 0x80
+#define SL_TYPE_ERROR 0x00
+
+/* Bits in first octet of compressed packet */
+#define NEW_C 0x40 /* flag bits for what changed in a packet */
+#define NEW_I 0x20
+#define NEW_S 0x08
+#define NEW_A 0x04
+#define NEW_W 0x02
+#define NEW_U 0x01
+
+/* reserved, special-case values of above */
+#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
+#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
+#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
+
+#define TCP_PUSH_BIT 0x10
+
+/*
+ * data type and sizes conversion assumptions:
+ *
+ * VJ code KA9Q style generic
+ * u_char byte_t unsigned char 8 bits
+ * u_short int16 unsigned short 16 bits
+ * u_int int16 unsigned short 16 bits
+ * u_long unsigned long unsigned long 32 bits
+ * int int32 long 32 bits
+ */
+
+typedef __u8 byte_t;
+typedef __u32 int32;
+
+/*
+ * "state" data for each active tcp conversation on the wire. This is
+ * basically a copy of the entire IP/TCP header from the last packet
+ * we saw from the conversation together with a small identifier
+ * the transmit & receive ends of the line use to locate saved header.
+ */
+struct cstate {
+ byte_t cs_this; /* connection id number (xmit) */
+ struct cstate *next; /* next in ring (xmit) */
+ struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
+ struct tcphdr cs_tcp;
+ unsigned char cs_ipopt[64];
+ unsigned char cs_tcpopt[64];
+ int cs_hsize;
+};
+#define NULLSLSTATE (struct cstate *)0
+
+/*
+ * all the state data for one serial line (we need one of these per line).
+ */
+struct slcompress {
+ struct cstate *tstate; /* transmit connection states (array)*/
+ struct cstate *rstate; /* receive connection states (array)*/
+
+ byte_t tslot_limit; /* highest transmit slot id (0-l)*/
+ byte_t rslot_limit; /* highest receive slot id (0-l)*/
+
+ byte_t xmit_oldest; /* oldest xmit in ring */
+ byte_t xmit_current; /* most recent xmit id */
+ byte_t recv_current; /* most recent rcvd id */
+
+ byte_t flags;
+#define SLF_TOSS 0x01 /* tossing rcvd frames until id received */
+
+ int32 sls_o_nontcp; /* outbound non-TCP packets */
+ int32 sls_o_tcp; /* outbound TCP packets */
+ int32 sls_o_uncompressed; /* outbound uncompressed packets */
+ int32 sls_o_compressed; /* outbound compressed packets */
+ int32 sls_o_searches; /* searches for connection state */
+ int32 sls_o_misses; /* times couldn't find conn. state */
+
+ int32 sls_i_uncompressed; /* inbound uncompressed packets */
+ int32 sls_i_compressed; /* inbound compressed packets */
+ int32 sls_i_error; /* inbound error packets */
+ int32 sls_i_tossed; /* inbound packets tossed because of error */
+
+ int32 sls_i_runt;
+ int32 sls_i_badcheck;
+};
+#define NULLSLCOMPR (struct slcompress *)0
+
+#define __ARGS(x) x
+
+/* In slhc.c: */
+struct slcompress *slhc_init __ARGS((int rslots, int tslots));
+void slhc_free __ARGS((struct slcompress *comp));
+
+int slhc_compress __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize, unsigned char *ocp, unsigned char **cpp,
+ int compress_cid));
+int slhc_uncompress __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize));
+int slhc_remember __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize));
+int slhc_toss __ARGS((struct slcompress *comp));
+
+#endif /* _SLHC_H */
diff --git a/include/net/snmp.h b/include/net/snmp.h
new file mode 100644
index 000000000000..a15ab256276e
--- /dev/null
+++ b/include/net/snmp.h
@@ -0,0 +1,145 @@
+/*
+ *
+ * SNMP MIB entries for the IP subsystem.
+ *
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ *
+ * We don't chose to implement SNMP in the kernel (this would
+ * be silly as SNMP is a pain in the backside in places). We do
+ * however need to collect the MIB statistics and export them
+ * out of /proc (eventually)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Id: snmp.h,v 1.19 2001/06/14 13:40:46 davem Exp $
+ *
+ */
+
+#ifndef _SNMP_H
+#define _SNMP_H
+
+#include <linux/cache.h>
+#include <linux/snmp.h>
+
+/*
+ * Mibs are stored in array of unsigned long.
+ */
+/*
+ * struct snmp_mib{}
+ * - list of entries for particular API (such as /proc/net/snmp)
+ * - name of entries.
+ */
+struct snmp_mib {
+ char *name;
+ int entry;
+};
+
+#define SNMP_MIB_ITEM(_name,_entry) { \
+ .name = _name, \
+ .entry = _entry, \
+}
+
+#define SNMP_MIB_SENTINEL { \
+ .name = NULL, \
+ .entry = 0, \
+}
+
+/*
+ * We use all unsigned longs. Linux will soon be so reliable that even
+ * these will rapidly get too small 8-). Seriously consider the IpInReceives
+ * count on the 20Gb/s + networks people expect in a few years time!
+ */
+
+/*
+ * The rule for padding:
+ * Best is power of two because then the right structure can be found by a
+ * simple shift. The structure should be always cache line aligned.
+ * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
+ * instructions to emulate multiply in case it is not power-of-two.
+ * Currently n is always <=3 for all sizes so simple cache line alignment
+ * is enough.
+ *
+ * The best solution would be a global CPU local area , especially on 64
+ * and 128byte cacheline machine it makes a *lot* of sense -AK
+ */
+
+#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
+
+/* IPstats */
+#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
+struct ipstats_mib {
+ unsigned long mibs[IPSTATS_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* ICMP */
+#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
+#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
+
+struct icmp_mib {
+ unsigned long mibs[ICMP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* ICMP6 (IPv6-ICMP) */
+#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
+struct icmpv6_mib {
+ unsigned long mibs[ICMP6_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* TCP */
+#define TCP_MIB_MAX __TCP_MIB_MAX
+struct tcp_mib {
+ unsigned long mibs[TCP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* UDP */
+#define UDP_MIB_MAX __UDP_MIB_MAX
+struct udp_mib {
+ unsigned long mibs[UDP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* SCTP */
+#define SCTP_MIB_MAX __SCTP_MIB_MAX
+struct sctp_mib {
+ unsigned long mibs[SCTP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* Linux */
+#define LINUX_MIB_MAX __LINUX_MIB_MAX
+struct linux_mib {
+ unsigned long mibs[LINUX_MIB_MAX];
+};
+
+
+/*
+ * FIXME: On x86 and some other CPUs the split into user and softirq parts
+ * is not needed because addl $1,memory is atomic against interrupts (but
+ * atomic_inc would be overkill because of the lock cycles). Wants new
+ * nonlocked_atomic_inc() primitives -AK
+ */
+#define DEFINE_SNMP_STAT(type, name) \
+ __typeof__(type) *name[2]
+#define DECLARE_SNMP_STAT(type, name) \
+ extern __typeof__(type) *name[2]
+
+#define SNMP_STAT_BHPTR(name) (name[0])
+#define SNMP_STAT_USRPTR(name) (name[1])
+
+#define SNMP_INC_STATS_BH(mib, field) \
+ (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++)
+#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
+ (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++)
+#define SNMP_INC_STATS_USER(mib, field) \
+ (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++)
+#define SNMP_INC_STATS(mib, field) \
+ (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++)
+#define SNMP_DEC_STATS(mib, field) \
+ (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--)
+#define SNMP_ADD_STATS_BH(mib, field, addend) \
+ (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend)
+#define SNMP_ADD_STATS_USER(mib, field, addend) \
+ (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend)
+
+#endif
diff --git a/include/net/sock.h b/include/net/sock.h
new file mode 100644
index 000000000000..be81cabd0da3
--- /dev/null
+++ b/include/net/sock.h
@@ -0,0 +1,1297 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the AF_INET socket handler.
+ *
+ * Version: @(#)sock.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Volatiles in skbuff pointers. See
+ * skbuff comments. May be overdone,
+ * better to prove they can be removed
+ * than the reverse.
+ * Alan Cox : Added a zapped field for tcp to note
+ * a socket is reset and must stay shut up
+ * Alan Cox : New fields for options
+ * Pauline Middelink : identd support
+ * Alan Cox : Eliminate low level recv/recvfrom
+ * David S. Miller : New socket lookup architecture.
+ * Steve Whitehouse: Default routines for sock_ops
+ * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
+ * protinfo be just a void pointer, as the
+ * protocol specific parts were moved to
+ * respective headers and ipv4/v6, etc now
+ * use private slabcaches for its socks
+ * Pedro Hortas : New flags field for socket options
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/security.h>
+
+#include <linux/filter.h>
+
+#include <asm/atomic.h>
+#include <net/dst.h>
+#include <net/checksum.h>
+
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+
+/* Define this to get the SOCK_DBG debugging facility. */
+#define SOCK_DEBUGGING
+#ifdef SOCK_DEBUGGING
+#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
+ printk(KERN_DEBUG msg); } while (0)
+#else
+#define SOCK_DEBUG(sk, msg...) do { } while (0)
+#endif
+
+/* This is the per-socket lock. The spinlock provides a synchronization
+ * between user contexts and software interrupt processing, whereas the
+ * mini-semaphore synchronizes multiple users amongst themselves.
+ */
+struct sock_iocb;
+typedef struct {
+ spinlock_t slock;
+ struct sock_iocb *owner;
+ wait_queue_head_t wq;
+} socket_lock_t;
+
+#define sock_lock_init(__sk) \
+do { spin_lock_init(&((__sk)->sk_lock.slock)); \
+ (__sk)->sk_lock.owner = NULL; \
+ init_waitqueue_head(&((__sk)->sk_lock.wq)); \
+} while(0)
+
+struct sock;
+
+/**
+ * struct sock_common - minimal network layer representation of sockets
+ * @skc_family - network address family
+ * @skc_state - Connection state
+ * @skc_reuse - %SO_REUSEADDR setting
+ * @skc_bound_dev_if - bound device index if != 0
+ * @skc_node - main hash linkage for various protocol lookup tables
+ * @skc_bind_node - bind hash linkage for various protocol lookup tables
+ * @skc_refcnt - reference count
+ *
+ * This is the minimal network layer representation of sockets, the header
+ * for struct sock and struct tcp_tw_bucket.
+ */
+struct sock_common {
+ unsigned short skc_family;
+ volatile unsigned char skc_state;
+ unsigned char skc_reuse;
+ int skc_bound_dev_if;
+ struct hlist_node skc_node;
+ struct hlist_node skc_bind_node;
+ atomic_t skc_refcnt;
+};
+
+/**
+ * struct sock - network layer representation of sockets
+ * @__sk_common - shared layout with tcp_tw_bucket
+ * @sk_shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
+ * @sk_userlocks - %SO_SNDBUF and %SO_RCVBUF settings
+ * @sk_lock - synchronizer
+ * @sk_rcvbuf - size of receive buffer in bytes
+ * @sk_sleep - sock wait queue
+ * @sk_dst_cache - destination cache
+ * @sk_dst_lock - destination cache lock
+ * @sk_policy - flow policy
+ * @sk_rmem_alloc - receive queue bytes committed
+ * @sk_receive_queue - incoming packets
+ * @sk_wmem_alloc - transmit queue bytes committed
+ * @sk_write_queue - Packet sending queue
+ * @sk_omem_alloc - "o" is "option" or "other"
+ * @sk_wmem_queued - persistent queue size
+ * @sk_forward_alloc - space allocated forward
+ * @sk_allocation - allocation mode
+ * @sk_sndbuf - size of send buffer in bytes
+ * @sk_flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
+ * @sk_no_check - %SO_NO_CHECK setting, wether or not checkup packets
+ * @sk_route_caps - route capabilities (e.g. %NETIF_F_TSO)
+ * @sk_lingertime - %SO_LINGER l_linger setting
+ * @sk_hashent - hash entry in several tables (e.g. tcp_ehash)
+ * @sk_backlog - always used with the per-socket spinlock held
+ * @sk_callback_lock - used with the callbacks in the end of this struct
+ * @sk_error_queue - rarely used
+ * @sk_prot - protocol handlers inside a network family
+ * @sk_err - last error
+ * @sk_err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
+ * @sk_ack_backlog - current listen backlog
+ * @sk_max_ack_backlog - listen backlog set in listen()
+ * @sk_priority - %SO_PRIORITY setting
+ * @sk_type - socket type (%SOCK_STREAM, etc)
+ * @sk_protocol - which protocol this socket belongs in this network family
+ * @sk_peercred - %SO_PEERCRED setting
+ * @sk_rcvlowat - %SO_RCVLOWAT setting
+ * @sk_rcvtimeo - %SO_RCVTIMEO setting
+ * @sk_sndtimeo - %SO_SNDTIMEO setting
+ * @sk_filter - socket filtering instructions
+ * @sk_protinfo - private area, net family specific, when not using slab
+ * @sk_timer - sock cleanup timer
+ * @sk_stamp - time stamp of last packet received
+ * @sk_socket - Identd and reporting IO signals
+ * @sk_user_data - RPC layer private data
+ * @sk_sndmsg_page - cached page for sendmsg
+ * @sk_sndmsg_off - cached offset for sendmsg
+ * @sk_send_head - front of stuff to transmit
+ * @sk_write_pending - a write to stream socket waits to start
+ * @sk_state_change - callback to indicate change in the state of the sock
+ * @sk_data_ready - callback to indicate there is data to be processed
+ * @sk_write_space - callback to indicate there is bf sending space available
+ * @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
+ * @sk_backlog_rcv - callback to process the backlog
+ * @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
+ */
+struct sock {
+ /*
+ * Now struct tcp_tw_bucket also uses sock_common, so please just
+ * don't add nothing before this first member (__sk_common) --acme
+ */
+ struct sock_common __sk_common;
+#define sk_family __sk_common.skc_family
+#define sk_state __sk_common.skc_state
+#define sk_reuse __sk_common.skc_reuse
+#define sk_bound_dev_if __sk_common.skc_bound_dev_if
+#define sk_node __sk_common.skc_node
+#define sk_bind_node __sk_common.skc_bind_node
+#define sk_refcnt __sk_common.skc_refcnt
+ unsigned char sk_shutdown : 2,
+ sk_no_check : 2,
+ sk_userlocks : 4;
+ unsigned char sk_protocol;
+ unsigned short sk_type;
+ int sk_rcvbuf;
+ socket_lock_t sk_lock;
+ wait_queue_head_t *sk_sleep;
+ struct dst_entry *sk_dst_cache;
+ struct xfrm_policy *sk_policy[2];
+ rwlock_t sk_dst_lock;
+ atomic_t sk_rmem_alloc;
+ atomic_t sk_wmem_alloc;
+ atomic_t sk_omem_alloc;
+ struct sk_buff_head sk_receive_queue;
+ struct sk_buff_head sk_write_queue;
+ int sk_wmem_queued;
+ int sk_forward_alloc;
+ unsigned int sk_allocation;
+ int sk_sndbuf;
+ int sk_route_caps;
+ int sk_hashent;
+ unsigned long sk_flags;
+ unsigned long sk_lingertime;
+ /*
+ * The backlog queue is special, it is always used with
+ * the per-socket spinlock held and requires low latency
+ * access. Therefore we special case it's implementation.
+ */
+ struct {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ } sk_backlog;
+ struct sk_buff_head sk_error_queue;
+ struct proto *sk_prot;
+ rwlock_t sk_callback_lock;
+ int sk_err,
+ sk_err_soft;
+ unsigned short sk_ack_backlog;
+ unsigned short sk_max_ack_backlog;
+ __u32 sk_priority;
+ struct ucred sk_peercred;
+ int sk_rcvlowat;
+ long sk_rcvtimeo;
+ long sk_sndtimeo;
+ struct sk_filter *sk_filter;
+ void *sk_protinfo;
+ struct timer_list sk_timer;
+ struct timeval sk_stamp;
+ struct socket *sk_socket;
+ void *sk_user_data;
+ struct page *sk_sndmsg_page;
+ struct sk_buff *sk_send_head;
+ __u32 sk_sndmsg_off;
+ int sk_write_pending;
+ void *sk_security;
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_write_space)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
+ void (*sk_destruct)(struct sock *sk);
+};
+
+/*
+ * Hashed lists helper routines
+ */
+static inline struct sock *__sk_head(struct hlist_head *head)
+{
+ return hlist_entry(head->first, struct sock, sk_node);
+}
+
+static inline struct sock *sk_head(struct hlist_head *head)
+{
+ return hlist_empty(head) ? NULL : __sk_head(head);
+}
+
+static inline struct sock *sk_next(struct sock *sk)
+{
+ return sk->sk_node.next ?
+ hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
+}
+
+static inline int sk_unhashed(struct sock *sk)
+{
+ return hlist_unhashed(&sk->sk_node);
+}
+
+static inline int sk_hashed(struct sock *sk)
+{
+ return sk->sk_node.pprev != NULL;
+}
+
+static __inline__ void sk_node_init(struct hlist_node *node)
+{
+ node->pprev = NULL;
+}
+
+static __inline__ void __sk_del_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_node);
+}
+
+static __inline__ int __sk_del_node_init(struct sock *sk)
+{
+ if (sk_hashed(sk)) {
+ __sk_del_node(sk);
+ sk_node_init(&sk->sk_node);
+ return 1;
+ }
+ return 0;
+}
+
+/* Grab socket reference count. This operation is valid only
+ when sk is ALREADY grabbed f.e. it is found in hash table
+ or a list and the lookup is made under lock preventing hash table
+ modifications.
+ */
+
+static inline void sock_hold(struct sock *sk)
+{
+ atomic_inc(&sk->sk_refcnt);
+}
+
+/* Ungrab socket in the context, which assumes that socket refcnt
+ cannot hit zero, f.e. it is true in context of any socketcall.
+ */
+static inline void __sock_put(struct sock *sk)
+{
+ atomic_dec(&sk->sk_refcnt);
+}
+
+static __inline__ int sk_del_node_init(struct sock *sk)
+{
+ int rc = __sk_del_node_init(sk);
+
+ if (rc) {
+ /* paranoid for a while -acme */
+ WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+ __sock_put(sk);
+ }
+ return rc;
+}
+
+static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_node, list);
+}
+
+static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ __sk_add_node(sk, list);
+}
+
+static __inline__ void __sk_del_bind_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_bind_node);
+}
+
+static __inline__ void sk_add_bind_node(struct sock *sk,
+ struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_bind_node, list);
+}
+
+#define sk_for_each(__sk, node, list) \
+ hlist_for_each_entry(__sk, node, list, sk_node)
+#define sk_for_each_from(__sk, node) \
+ if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
+ hlist_for_each_entry_from(__sk, node, sk_node)
+#define sk_for_each_continue(__sk, node) \
+ if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
+ hlist_for_each_entry_continue(__sk, node, sk_node)
+#define sk_for_each_safe(__sk, node, tmp, list) \
+ hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
+#define sk_for_each_bound(__sk, node, list) \
+ hlist_for_each_entry(__sk, node, list, sk_bind_node)
+
+/* Sock flags */
+enum sock_flags {
+ SOCK_DEAD,
+ SOCK_DONE,
+ SOCK_URGINLINE,
+ SOCK_KEEPOPEN,
+ SOCK_LINGER,
+ SOCK_DESTROY,
+ SOCK_BROADCAST,
+ SOCK_TIMESTAMP,
+ SOCK_ZAPPED,
+ SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
+ SOCK_DBG, /* %SO_DEBUG setting */
+ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
+ SOCK_NO_LARGESEND, /* whether to sent large segments or not */
+ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
+ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
+};
+
+static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
+{
+ __set_bit(flag, &sk->sk_flags);
+}
+
+static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
+{
+ __clear_bit(flag, &sk->sk_flags);
+}
+
+static inline int sock_flag(struct sock *sk, enum sock_flags flag)
+{
+ return test_bit(flag, &sk->sk_flags);
+}
+
+static inline void sk_acceptq_removed(struct sock *sk)
+{
+ sk->sk_ack_backlog--;
+}
+
+static inline void sk_acceptq_added(struct sock *sk)
+{
+ sk->sk_ack_backlog++;
+}
+
+static inline int sk_acceptq_is_full(struct sock *sk)
+{
+ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+}
+
+/*
+ * Compute minimal free write space needed to queue new packets.
+ */
+static inline int sk_stream_min_wspace(struct sock *sk)
+{
+ return sk->sk_wmem_queued / 2;
+}
+
+static inline int sk_stream_wspace(struct sock *sk)
+{
+ return sk->sk_sndbuf - sk->sk_wmem_queued;
+}
+
+extern void sk_stream_write_space(struct sock *sk);
+
+static inline int sk_stream_memory_free(struct sock *sk)
+{
+ return sk->sk_wmem_queued < sk->sk_sndbuf;
+}
+
+extern void sk_stream_rfree(struct sk_buff *skb);
+
+static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb->sk = sk;
+ skb->destructor = sk_stream_rfree;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk->sk_forward_alloc -= skb->truesize;
+}
+
+static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ sk->sk_wmem_queued -= skb->truesize;
+ sk->sk_forward_alloc += skb->truesize;
+ __kfree_skb(skb);
+}
+
+/* The per-socket spinlock must be held here. */
+#define sk_add_backlog(__sk, __skb) \
+do { if (!(__sk)->sk_backlog.tail) { \
+ (__sk)->sk_backlog.head = \
+ (__sk)->sk_backlog.tail = (__skb); \
+ } else { \
+ ((__sk)->sk_backlog.tail)->next = (__skb); \
+ (__sk)->sk_backlog.tail = (__skb); \
+ } \
+ (__skb)->next = NULL; \
+} while(0)
+
+#define sk_wait_event(__sk, __timeo, __condition) \
+({ int rc; \
+ release_sock(__sk); \
+ rc = __condition; \
+ if (!rc) { \
+ *(__timeo) = schedule_timeout(*(__timeo)); \
+ rc = __condition; \
+ } \
+ lock_sock(__sk); \
+ rc; \
+})
+
+extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
+extern int sk_stream_error(struct sock *sk, int flags, int err);
+extern void sk_stream_kill_queues(struct sock *sk);
+
+extern int sk_wait_data(struct sock *sk, long *timeo);
+
+/* Networking protocol blocks we attach to sockets.
+ * socket layer -> transport layer interface
+ * transport -> network interface is defined by struct inet_proto
+ */
+struct proto {
+ void (*close)(struct sock *sk,
+ long timeout);
+ int (*connect)(struct sock *sk,
+ struct sockaddr *uaddr,
+ int addr_len);
+ int (*disconnect)(struct sock *sk, int flags);
+
+ struct sock * (*accept) (struct sock *sk, int flags, int *err);
+
+ int (*ioctl)(struct sock *sk, int cmd,
+ unsigned long arg);
+ int (*init)(struct sock *sk);
+ int (*destroy)(struct sock *sk);
+ void (*shutdown)(struct sock *sk, int how);
+ int (*setsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int optlen);
+ int (*getsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int __user *option);
+ int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len);
+ int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg,
+ size_t len, int noblock, int flags,
+ int *addr_len);
+ int (*sendpage)(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+ int (*bind)(struct sock *sk,
+ struct sockaddr *uaddr, int addr_len);
+
+ int (*backlog_rcv) (struct sock *sk,
+ struct sk_buff *skb);
+
+ /* Keeping track of sk's, looking them up, and port selection methods. */
+ void (*hash)(struct sock *sk);
+ void (*unhash)(struct sock *sk);
+ int (*get_port)(struct sock *sk, unsigned short snum);
+
+ /* Memory pressure */
+ void (*enter_memory_pressure)(void);
+ atomic_t *memory_allocated; /* Current allocated memory. */
+ atomic_t *sockets_allocated; /* Current number of sockets. */
+ /*
+ * Pressure flag: try to collapse.
+ * Technical note: it is used by multiple contexts non atomically.
+ * All the sk_stream_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
+ int *memory_pressure;
+ int *sysctl_mem;
+ int *sysctl_wmem;
+ int *sysctl_rmem;
+ int max_header;
+
+ kmem_cache_t *slab;
+ unsigned int obj_size;
+
+ struct module *owner;
+
+ char name[32];
+
+ struct list_head node;
+
+ struct {
+ int inuse;
+ u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
+ } stats[NR_CPUS];
+};
+
+extern int proto_register(struct proto *prot, int alloc_slab);
+extern void proto_unregister(struct proto *prot);
+
+/* Called with local bh disabled */
+static __inline__ void sock_prot_inc_use(struct proto *prot)
+{
+ prot->stats[smp_processor_id()].inuse++;
+}
+
+static __inline__ void sock_prot_dec_use(struct proto *prot)
+{
+ prot->stats[smp_processor_id()].inuse--;
+}
+
+/* About 10 seconds */
+#define SOCK_DESTROY_TIME (10*HZ)
+
+/* Sockets 0-1023 can't be bound to unless you are superuser */
+#define PROT_SOCK 1024
+
+#define SHUTDOWN_MASK 3
+#define RCV_SHUTDOWN 1
+#define SEND_SHUTDOWN 2
+
+#define SOCK_SNDBUF_LOCK 1
+#define SOCK_RCVBUF_LOCK 2
+#define SOCK_BINDADDR_LOCK 4
+#define SOCK_BINDPORT_LOCK 8
+
+/* sock_iocb: used to kick off async processing of socket ios */
+struct sock_iocb {
+ struct list_head list;
+
+ int flags;
+ int size;
+ struct socket *sock;
+ struct sock *sk;
+ struct scm_cookie *scm;
+ struct msghdr *msg, async_msg;
+ struct iovec async_iov;
+ struct kiocb *kiocb;
+};
+
+static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
+{
+ return (struct sock_iocb *)iocb->private;
+}
+
+static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
+{
+ return si->kiocb;
+}
+
+struct socket_alloc {
+ struct socket socket;
+ struct inode vfs_inode;
+};
+
+static inline struct socket *SOCKET_I(struct inode *inode)
+{
+ return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
+}
+
+static inline struct inode *SOCK_INODE(struct socket *socket)
+{
+ return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
+}
+
+extern void __sk_stream_mem_reclaim(struct sock *sk);
+extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
+
+#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
+
+static inline int sk_stream_pages(int amt)
+{
+ return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
+}
+
+static inline void sk_stream_mem_reclaim(struct sock *sk)
+{
+ if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
+ __sk_stream_mem_reclaim(sk);
+}
+
+static inline void sk_stream_writequeue_purge(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+ sk_stream_free_skb(sk, skb);
+ sk_stream_mem_reclaim(sk);
+}
+
+static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
+{
+ return (int)skb->truesize <= sk->sk_forward_alloc ||
+ sk_stream_mem_schedule(sk, skb->truesize, 1);
+}
+
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue. This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
+
+extern void FASTCALL(lock_sock(struct sock *sk));
+extern void FASTCALL(release_sock(struct sock *sk));
+
+/* BH context may only use the following locking interface. */
+#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
+#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
+
+extern struct sock *sk_alloc(int family, int priority,
+ struct proto *prot, int zero_it);
+extern void sk_free(struct sock *sk);
+
+extern struct sk_buff *sock_wmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern struct sk_buff *sock_rmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern void sock_wfree(struct sk_buff *skb);
+extern void sock_rfree(struct sk_buff *skb);
+
+extern int sock_setsockopt(struct socket *sock, int level,
+ int op, char __user *optval,
+ int optlen);
+
+extern int sock_getsockopt(struct socket *sock, int level,
+ int op, char __user *optval,
+ int __user *optlen);
+extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
+ unsigned long size,
+ int noblock,
+ int *errcode);
+extern void *sock_kmalloc(struct sock *sk, int size, int priority);
+extern void sock_kfree_s(struct sock *sk, void *mem, int size);
+extern void sk_send_sigurg(struct sock *sk);
+
+/*
+ * Functions to fill in entries in struct proto_ops when a protocol
+ * does not implement a particular function.
+ */
+extern int sock_no_bind(struct socket *,
+ struct sockaddr *, int);
+extern int sock_no_connect(struct socket *,
+ struct sockaddr *, int, int);
+extern int sock_no_socketpair(struct socket *,
+ struct socket *);
+extern int sock_no_accept(struct socket *,
+ struct socket *, int);
+extern int sock_no_getname(struct socket *,
+ struct sockaddr *, int *, int);
+extern unsigned int sock_no_poll(struct file *, struct socket *,
+ struct poll_table_struct *);
+extern int sock_no_ioctl(struct socket *, unsigned int,
+ unsigned long);
+extern int sock_no_listen(struct socket *, int);
+extern int sock_no_shutdown(struct socket *, int);
+extern int sock_no_getsockopt(struct socket *, int , int,
+ char __user *, int __user *);
+extern int sock_no_setsockopt(struct socket *, int, int,
+ char __user *, int);
+extern int sock_no_sendmsg(struct kiocb *, struct socket *,
+ struct msghdr *, size_t);
+extern int sock_no_recvmsg(struct kiocb *, struct socket *,
+ struct msghdr *, size_t, int);
+extern int sock_no_mmap(struct file *file,
+ struct socket *sock,
+ struct vm_area_struct *vma);
+extern ssize_t sock_no_sendpage(struct socket *sock,
+ struct page *page,
+ int offset, size_t size,
+ int flags);
+
+/*
+ * Functions to fill in entries in struct proto_ops when a protocol
+ * uses the inet style.
+ */
+extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen);
+extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags);
+extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen);
+
+extern void sk_common_release(struct sock *sk);
+
+/*
+ * Default socket callbacks and setup code
+ */
+
+/* Initialise core socket variables */
+extern void sock_init_data(struct socket *sock, struct sock *sk);
+
+/**
+ * sk_filter - run a packet through a socket filter
+ * @sk: sock associated with &sk_buff
+ * @skb: buffer to filter
+ * @needlock: set to 1 if the sock is not locked by caller.
+ *
+ * Run the filter code and then cut skb->data to correct size returned by
+ * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * than pkt_len we keep whole skb->data. This is the socket level
+ * wrapper to sk_run_filter. It returns 0 if the packet should
+ * be accepted or -EPERM if the packet should be tossed.
+ *
+ */
+
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
+{
+ int err;
+
+ err = security_sock_rcv_skb(sk, skb);
+ if (err)
+ return err;
+
+ if (sk->sk_filter) {
+ struct sk_filter *filter;
+
+ if (needlock)
+ bh_lock_sock(sk);
+
+ filter = sk->sk_filter;
+ if (filter) {
+ int pkt_len = sk_run_filter(skb, filter->insns,
+ filter->len);
+ if (!pkt_len)
+ err = -EPERM;
+ else
+ skb_trim(skb, pkt_len);
+ }
+
+ if (needlock)
+ bh_unlock_sock(sk);
+ }
+ return err;
+}
+
+/**
+ * sk_filter_release: Release a socket filter
+ * @sk: socket
+ * @fp: filter to remove
+ *
+ * Remove a filter from a socket and release its resources.
+ */
+
+static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
+{
+ unsigned int size = sk_filter_len(fp);
+
+ atomic_sub(size, &sk->sk_omem_alloc);
+
+ if (atomic_dec_and_test(&fp->refcnt))
+ kfree(fp);
+}
+
+static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+{
+ atomic_inc(&fp->refcnt);
+ atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+}
+
+/*
+ * Socket reference counting postulates.
+ *
+ * * Each user of socket SHOULD hold a reference count.
+ * * Each access point to socket (an hash table bucket, reference from a list,
+ * running timer, skb in flight MUST hold a reference count.
+ * * When reference count hits 0, it means it will never increase back.
+ * * When reference count hits 0, it means that no references from
+ * outside exist to this socket and current process on current CPU
+ * is last user and may/should destroy this socket.
+ * * sk_free is called from any context: process, BH, IRQ. When
+ * it is called, socket has no references from outside -> sk_free
+ * may release descendant resources allocated by the socket, but
+ * to the time when it is called, socket is NOT referenced by any
+ * hash tables, lists etc.
+ * * Packets, delivered from outside (from network or from another process)
+ * and enqueued on receive/error queues SHOULD NOT grab reference count,
+ * when they sit in queue. Otherwise, packets will leak to hole, when
+ * socket is looked up by one cpu and unhasing is made by another CPU.
+ * It is true for udp/raw, netlink (leak to receive and error queues), tcp
+ * (leak to backlog). Packet socket does all the processing inside
+ * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
+ * use separate SMP lock, so that they are prone too.
+ */
+
+/* Ungrab socket and destroy it, if it was the last reference. */
+static inline void sock_put(struct sock *sk)
+{
+ if (atomic_dec_and_test(&sk->sk_refcnt))
+ sk_free(sk);
+}
+
+/* Detach socket from process context.
+ * Announce socket dead, detach it from wait queue and inode.
+ * Note that parent inode held reference count on this struct sock,
+ * we do not release it in this function, because protocol
+ * probably wants some additional cleanups or even continuing
+ * to work with this socket (TCP).
+ */
+static inline void sock_orphan(struct sock *sk)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_socket = NULL;
+ sk->sk_sleep = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static inline void sock_graft(struct sock *sk, struct socket *parent)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_sleep = &parent->wait;
+ parent->sk = sk;
+ sk->sk_socket = parent;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+extern int sock_i_uid(struct sock *sk);
+extern unsigned long sock_i_ino(struct sock *sk);
+
+static inline struct dst_entry *
+__sk_dst_get(struct sock *sk)
+{
+ return sk->sk_dst_cache;
+}
+
+static inline struct dst_entry *
+sk_dst_get(struct sock *sk)
+{
+ struct dst_entry *dst;
+
+ read_lock(&sk->sk_dst_lock);
+ dst = sk->sk_dst_cache;
+ if (dst)
+ dst_hold(dst);
+ read_unlock(&sk->sk_dst_lock);
+ return dst;
+}
+
+static inline void
+__sk_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+ struct dst_entry *old_dst;
+
+ old_dst = sk->sk_dst_cache;
+ sk->sk_dst_cache = dst;
+ dst_release(old_dst);
+}
+
+static inline void
+sk_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+ write_lock(&sk->sk_dst_lock);
+ __sk_dst_set(sk, dst);
+ write_unlock(&sk->sk_dst_lock);
+}
+
+static inline void
+__sk_dst_reset(struct sock *sk)
+{
+ struct dst_entry *old_dst;
+
+ old_dst = sk->sk_dst_cache;
+ sk->sk_dst_cache = NULL;
+ dst_release(old_dst);
+}
+
+static inline void
+sk_dst_reset(struct sock *sk)
+{
+ write_lock(&sk->sk_dst_lock);
+ __sk_dst_reset(sk);
+ write_unlock(&sk->sk_dst_lock);
+}
+
+static inline struct dst_entry *
+__sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk->sk_dst_cache;
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk->sk_dst_cache = NULL;
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+
+static inline struct dst_entry *
+sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk_dst_reset(sk);
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+
+static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
+{
+ sk->sk_wmem_queued += skb->truesize;
+ sk->sk_forward_alloc -= skb->truesize;
+}
+
+static inline int skb_copy_to_page(struct sock *sk, char __user *from,
+ struct sk_buff *skb, struct page *page,
+ int off, int copy)
+{
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ int err = 0;
+ unsigned int csum = csum_and_copy_from_user(from,
+ page_address(page) + off,
+ copy, 0, &err);
+ if (err)
+ return err;
+ skb->csum = csum_block_add(skb->csum, csum, skb->len);
+ } else if (copy_from_user(page_address(page) + off, from, copy))
+ return -EFAULT;
+
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk->sk_wmem_queued += copy;
+ sk->sk_forward_alloc -= copy;
+ return 0;
+}
+
+/*
+ * Queue a received datagram if it will fit. Stream and sequenced
+ * protocols can't normally use this as they need to fit buffers in
+ * and play with them.
+ *
+ * Inlined as it's very short and called for pretty much every
+ * packet ever received.
+ */
+
+static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = sock_wfree;
+ atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+}
+
+static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb->sk = sk;
+ skb->destructor = sock_rfree;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+}
+
+extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+ unsigned long expires);
+
+extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+
+static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err = 0;
+ int skb_len;
+
+ /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
+ number of warnings when compiling with -W --ANK
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* It would be deadlock, if sock_queue_rcv_skb is used
+ with socket lock! We assume that users of this
+ function are lock free.
+ */
+ err = sk_filter(sk, skb, 1);
+ if (err)
+ goto out;
+
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+out:
+ return err;
+}
+
+static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+{
+ /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
+ number of warnings when compiling with -W --ANK
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
+ return -ENOMEM;
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(&sk->sk_error_queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb->len);
+ return 0;
+}
+
+/*
+ * Recover an error report and clear atomically
+ */
+
+static inline int sock_error(struct sock *sk)
+{
+ int err = xchg(&sk->sk_err, 0);
+ return -err;
+}
+
+static inline unsigned long sock_wspace(struct sock *sk)
+{
+ int amt = 0;
+
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+ amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
+ if (amt < 0)
+ amt = 0;
+ }
+ return amt;
+}
+
+static inline void sk_wake_async(struct sock *sk, int how, int band)
+{
+ if (sk->sk_socket && sk->sk_socket->fasync_list)
+ sock_wake_async(sk->sk_socket, how, band);
+}
+
+#define SOCK_MIN_SNDBUF 2048
+#define SOCK_MIN_RCVBUF 256
+
+static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+{
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+ sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
+ sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+ }
+}
+
+static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
+ int size, int mem, int gfp)
+{
+ struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);
+
+ if (skb) {
+ skb->truesize += mem;
+ if (sk->sk_forward_alloc >= (int)skb->truesize ||
+ sk_stream_mem_schedule(sk, skb->truesize, 0)) {
+ skb_reserve(skb, sk->sk_prot->max_header);
+ return skb;
+ }
+ __kfree_skb(skb);
+ } else {
+ sk->sk_prot->enter_memory_pressure();
+ sk_stream_moderate_sndbuf(sk);
+ }
+ return NULL;
+}
+
+static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
+ int size, int gfp)
+{
+ return sk_stream_alloc_pskb(sk, size, 0, gfp);
+}
+
+static inline struct page *sk_stream_alloc_page(struct sock *sk)
+{
+ struct page *page = NULL;
+
+ if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
+ sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
+ page = alloc_pages(sk->sk_allocation, 0);
+ else {
+ sk->sk_prot->enter_memory_pressure();
+ sk_stream_moderate_sndbuf(sk);
+ }
+ return page;
+}
+
+#define sk_stream_for_retrans_queue(skb, sk) \
+ for (skb = (sk)->sk_write_queue.next; \
+ (skb != (sk)->sk_send_head) && \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+
+/*
+ * Default write policy as shown to user space via poll/select/SIGIO
+ */
+static inline int sock_writeable(const struct sock *sk)
+{
+ return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
+}
+
+static inline int gfp_any(void)
+{
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
+{
+ return noblock ? 0 : sk->sk_rcvtimeo;
+}
+
+static inline long sock_sndtimeo(const struct sock *sk, int noblock)
+{
+ return noblock ? 0 : sk->sk_sndtimeo;
+}
+
+static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
+{
+ return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
+}
+
+/* Alas, with timeout socket operations are not restartable.
+ * Compare this to poll().
+ */
+static inline int sock_intr_errno(long timeo)
+{
+ return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
+}
+
+static __inline__ void
+sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+{
+ struct timeval *stamp = &skb->stamp;
+ if (sock_flag(sk, SOCK_RCVTSTAMP)) {
+ /* Race occurred between timestamp enabling and packet
+ receiving. Fill in the current time for now. */
+ if (stamp->tv_sec == 0)
+ do_gettimeofday(stamp);
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
+ stamp);
+ } else
+ sk->sk_stamp = *stamp;
+}
+
+/**
+ * sk_eat_skb - Release a skb if it is no longer needed
+ * @sk - socket to eat this skb from
+ * @skb - socket buffer to eat
+ *
+ * This routine must be called with interrupts disabled or with the socket
+ * locked so that the sk_buff queue operation is ok.
+*/
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ __kfree_skb(skb);
+}
+
+extern void sock_enable_timestamp(struct sock *sk);
+extern int sock_get_timestamp(struct sock *, struct timeval __user *);
+
+/*
+ * Enable debug/info messages
+ */
+
+#if 0
+#define NETDEBUG(x) do { } while (0)
+#define LIMIT_NETDEBUG(x) do {} while(0)
+#else
+#define NETDEBUG(x) do { x; } while (0)
+#define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0)
+#endif
+
+/*
+ * Macros for sleeping on a socket. Use them like this:
+ *
+ * SOCK_SLEEP_PRE(sk)
+ * if (condition)
+ * schedule();
+ * SOCK_SLEEP_POST(sk)
+ *
+ * N.B. These are now obsolete and were, afaik, only ever used in DECnet
+ * and when the last use of them in DECnet has gone, I'm intending to
+ * remove them.
+ */
+
+#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
+ DECLARE_WAITQUEUE(wait, tsk); \
+ tsk->state = TASK_INTERRUPTIBLE; \
+ add_wait_queue((sk)->sk_sleep, &wait); \
+ release_sock(sk);
+
+#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
+ remove_wait_queue((sk)->sk_sleep, &wait); \
+ lock_sock(sk); \
+ }
+
+static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
+{
+ if (valbool)
+ sock_set_flag(sk, bit);
+ else
+ sock_reset_flag(sk, bit);
+}
+
+extern __u32 sysctl_wmem_max;
+extern __u32 sysctl_rmem_max;
+
+#ifdef CONFIG_NET
+int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
+#else
+static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _SOCK_H */
diff --git a/include/net/syncppp.h b/include/net/syncppp.h
new file mode 100644
index 000000000000..614cb6ba564e
--- /dev/null
+++ b/include/net/syncppp.h
@@ -0,0 +1,105 @@
+/*
+ * Defines for synchronous PPP/Cisco link level subroutines.
+ *
+ * Copyright (C) 1994 Cronyx Ltd.
+ * Author: Serge Vakulenko, <vak@zebub.msk.su>
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organizations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * Version 1.7, Wed Jun 7 22:12:02 MSD 1995
+ *
+ *
+ *
+ */
+
+#ifndef _SYNCPPP_H_
+#define _SYNCPPP_H_ 1
+
+#ifdef __KERNEL__
+struct slcp {
+ u16 state; /* state machine */
+ u32 magic; /* local magic number */
+ u_char echoid; /* id of last keepalive echo request */
+ u_char confid; /* id of last configuration request */
+};
+
+struct sipcp {
+ u16 state; /* state machine */
+ u_char confid; /* id of last configuration request */
+};
+
+struct sppp
+{
+ struct sppp * pp_next; /* next interface in keepalive list */
+ u32 pp_flags; /* use Cisco protocol instead of PPP */
+ u16 pp_alivecnt; /* keepalive packets counter */
+ u16 pp_loopcnt; /* loopback detection counter */
+ u32 pp_seq; /* local sequence number */
+ u32 pp_rseq; /* remote sequence number */
+ struct slcp lcp; /* LCP params */
+ struct sipcp ipcp; /* IPCP params */
+ u32 ibytes,obytes; /* Bytes in/out */
+ u32 ipkts,opkts; /* Packets in/out */
+ struct timer_list pp_timer;
+ struct net_device *pp_if;
+ char pp_link_state; /* Link status */
+ spinlock_t lock;
+};
+
+struct ppp_device
+{
+ struct net_device *dev; /* Network device pointer */
+ struct sppp sppp; /* Synchronous PPP */
+};
+
+static inline struct sppp *sppp_of(struct net_device *dev)
+{
+ struct ppp_device **ppp = dev->priv;
+ BUG_ON((*ppp)->dev != dev);
+ return &(*ppp)->sppp;
+}
+
+#define PP_KEEPALIVE 0x01 /* use keepalive protocol */
+#define PP_CISCO 0x02 /* use Cisco protocol instead of PPP */
+#define PP_TIMO 0x04 /* cp_timeout routine active */
+#define PP_DEBUG 0x08
+
+#define PPP_MTU 1500 /* max. transmit unit */
+
+#define LCP_STATE_CLOSED 0 /* LCP state: closed (conf-req sent) */
+#define LCP_STATE_ACK_RCVD 1 /* LCP state: conf-ack received */
+#define LCP_STATE_ACK_SENT 2 /* LCP state: conf-ack sent */
+#define LCP_STATE_OPENED 3 /* LCP state: opened */
+
+#define IPCP_STATE_CLOSED 0 /* IPCP state: closed (conf-req sent) */
+#define IPCP_STATE_ACK_RCVD 1 /* IPCP state: conf-ack received */
+#define IPCP_STATE_ACK_SENT 2 /* IPCP state: conf-ack sent */
+#define IPCP_STATE_OPENED 3 /* IPCP state: opened */
+
+#define SPPP_LINK_DOWN 0 /* link down - no keepalive */
+#define SPPP_LINK_UP 1 /* link is up - keepalive ok */
+
+void sppp_attach (struct ppp_device *pd);
+void sppp_detach (struct net_device *dev);
+void sppp_input (struct net_device *dev, struct sk_buff *m);
+int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
+struct sk_buff *sppp_dequeue (struct net_device *dev);
+int sppp_isempty (struct net_device *dev);
+void sppp_flush (struct net_device *dev);
+int sppp_open (struct net_device *dev);
+int sppp_reopen (struct net_device *dev);
+int sppp_close (struct net_device *dev);
+#endif
+
+#define SPPPIOCCISCO (SIOCDEVPRIVATE)
+#define SPPPIOCPPP (SIOCDEVPRIVATE+1)
+#define SPPPIOCDEBUG (SIOCDEVPRIVATE+2)
+#define SPPPIOCSFLAGS (SIOCDEVPRIVATE+3)
+#define SPPPIOCGFLAGS (SIOCDEVPRIVATE+4)
+
+#endif /* _SYNCPPP_H_ */
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
new file mode 100644
index 000000000000..59f0d9628ad1
--- /dev/null
+++ b/include/net/tc_act/tc_gact.h
@@ -0,0 +1,17 @@
+#ifndef __NET_TC_GACT_H
+#define __NET_TC_GACT_H
+
+#include <net/act_api.h>
+
+struct tcf_gact
+{
+ tca_gen(gact);
+#ifdef CONFIG_GACT_PROB
+ u16 ptype;
+ u16 pval;
+ int paction;
+#endif
+
+};
+
+#endif
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
new file mode 100644
index 000000000000..02eccebd55ae
--- /dev/null
+++ b/include/net/tc_act/tc_ipt.h
@@ -0,0 +1,16 @@
+#ifndef __NET_TC_IPT_H
+#define __NET_TC_IPT_H
+
+#include <net/act_api.h>
+
+struct ipt_entry_target;
+
+struct tcf_ipt
+{
+ tca_gen(ipt);
+ u32 hook;
+ char *tname;
+ struct ipt_entry_target *t;
+};
+
+#endif
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
new file mode 100644
index 000000000000..b5c32f65c12c
--- /dev/null
+++ b/include/net/tc_act/tc_mirred.h
@@ -0,0 +1,15 @@
+#ifndef __NET_TC_MIR_H
+#define __NET_TC_MIR_H
+
+#include <net/act_api.h>
+
+struct tcf_mirred
+{
+ tca_gen(mirred);
+ int eaction;
+ int ifindex;
+ int ok_push;
+ struct net_device *dev;
+};
+
+#endif
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
new file mode 100644
index 000000000000..eb21689d759d
--- /dev/null
+++ b/include/net/tc_act/tc_pedit.h
@@ -0,0 +1,14 @@
+#ifndef __NET_TC_PED_H
+#define __NET_TC_PED_H
+
+#include <net/act_api.h>
+
+struct tcf_pedit
+{
+ tca_gen(pedit);
+ unsigned char nkeys;
+ unsigned char flags;
+ struct tc_pedit_key *keys;
+};
+
+#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
new file mode 100644
index 000000000000..503810a70e21
--- /dev/null
+++ b/include/net/tcp.h
@@ -0,0 +1,2022 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP module.
+ *
+ * Version: @(#)tcp.h 1.0.5 05/23/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _TCP_H
+#define _TCP_H
+
+#define TCP_DEBUG 1
+#define FASTRETRANS_DEBUG 1
+
+/* Cancel timers, when they are not required. */
+#undef TCP_CLEAR_TIMERS
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/tcp.h>
+#include <linux/slab.h>
+#include <linux/cache.h>
+#include <linux/percpu.h>
+#include <net/checksum.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <net/ip.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#include <linux/ipv6.h>
+#endif
+#include <linux/seq_file.h>
+
+/* This is for all connections with a full identity, no wildcards.
+ * New scheme, half the table is for TIME_WAIT, the other half is
+ * for the rest. I'll experiment with dynamic table growth later.
+ */
+struct tcp_ehash_bucket {
+ rwlock_t lock;
+ struct hlist_head chain;
+} __attribute__((__aligned__(8)));
+
+/* This is for listening sockets, thus all sockets which possess wildcards. */
+#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
+
+/* There are a few simple rules, which allow for local port reuse by
+ * an application. In essence:
+ *
+ * 1) Sockets bound to different interfaces may share a local port.
+ * Failing that, goto test 2.
+ * 2) If all sockets have sk->sk_reuse set, and none of them are in
+ * TCP_LISTEN state, the port may be shared.
+ * Failing that, goto test 3.
+ * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
+ * address, and none of them are the same, the port may be
+ * shared.
+ * Failing this, the port cannot be shared.
+ *
+ * The interesting point, is test #2. This is what an FTP server does
+ * all day. To optimize this case we use a specific flag bit defined
+ * below. As we add sockets to a bind bucket list, we perform a
+ * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
+ * As long as all sockets added to a bind bucket pass this test,
+ * the flag bit will be set.
+ * The resulting situation is that tcp_v[46]_verify_bind() can just check
+ * for this flag bit, if it is set and the socket trying to bind has
+ * sk->sk_reuse set, we don't even have to walk the owners list at all,
+ * we return that it is ok to bind this socket to the requested local port.
+ *
+ * Sounds like a lot of work, but it is worth it. In a more naive
+ * implementation (ie. current FreeBSD etc.) the entire list of ports
+ * must be walked for each data port opened by an ftp server. Needless
+ * to say, this does not scale at all. With a couple thousand FTP
+ * users logged onto your box, isn't it nice to know that new data
+ * ports are created in O(1) time? I thought so. ;-) -DaveM
+ */
+struct tcp_bind_bucket {
+ unsigned short port;
+ signed short fastreuse;
+ struct hlist_node node;
+ struct hlist_head owners;
+};
+
+#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
+
+struct tcp_bind_hashbucket {
+ spinlock_t lock;
+ struct hlist_head chain;
+};
+
+static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
+{
+ return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
+}
+
+static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
+{
+ return hlist_empty(&head->chain) ? NULL : __tb_head(head);
+}
+
+extern struct tcp_hashinfo {
+ /* This is for sockets with full identity only. Sockets here will
+ * always be without wildcards and will have the following invariant:
+ *
+ * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
+ *
+ * First half of the table is for sockets not in TIME_WAIT, second half
+ * is for TIME_WAIT sockets only.
+ */
+ struct tcp_ehash_bucket *__tcp_ehash;
+
+ /* Ok, let's try this, I give up, we do need a local binding
+ * TCP hash as well as the others for fast bind/connect.
+ */
+ struct tcp_bind_hashbucket *__tcp_bhash;
+
+ int __tcp_bhash_size;
+ int __tcp_ehash_size;
+
+ /* All sockets in TCP_LISTEN state will be in here. This is the only
+ * table where wildcard'd TCP sockets can exist. Hash function here
+ * is just local port number.
+ */
+ struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
+
+ /* All the above members are written once at bootup and
+ * never written again _or_ are predominantly read-access.
+ *
+ * Now align to a new cache line as all the following members
+ * are often dirty.
+ */
+ rwlock_t __tcp_lhash_lock ____cacheline_aligned;
+ atomic_t __tcp_lhash_users;
+ wait_queue_head_t __tcp_lhash_wait;
+ spinlock_t __tcp_portalloc_lock;
+} tcp_hashinfo;
+
+#define tcp_ehash (tcp_hashinfo.__tcp_ehash)
+#define tcp_bhash (tcp_hashinfo.__tcp_bhash)
+#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
+#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
+#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
+#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
+#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
+#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
+#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
+
+extern kmem_cache_t *tcp_bucket_cachep;
+extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
+ unsigned short snum);
+extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
+extern void tcp_bucket_unlock(struct sock *sk);
+extern int tcp_port_rover;
+
+/* These are AF independent. */
+static __inline__ int tcp_bhashfn(__u16 lport)
+{
+ return (lport & (tcp_bhash_size - 1));
+}
+
+extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
+ unsigned short snum);
+
+#if (BITS_PER_LONG == 64)
+#define TCP_ADDRCMP_ALIGN_BYTES 8
+#else
+#define TCP_ADDRCMP_ALIGN_BYTES 4
+#endif
+
+/* This is a TIME_WAIT bucket. It works around the memory consumption
+ * problems of sockets in such a state on heavily loaded servers, but
+ * without violating the protocol specification.
+ */
+struct tcp_tw_bucket {
+ /*
+ * Now struct sock also uses sock_common, so please just
+ * don't add nothing before this first member (__tw_common) --acme
+ */
+ struct sock_common __tw_common;
+#define tw_family __tw_common.skc_family
+#define tw_state __tw_common.skc_state
+#define tw_reuse __tw_common.skc_reuse
+#define tw_bound_dev_if __tw_common.skc_bound_dev_if
+#define tw_node __tw_common.skc_node
+#define tw_bind_node __tw_common.skc_bind_node
+#define tw_refcnt __tw_common.skc_refcnt
+ volatile unsigned char tw_substate;
+ unsigned char tw_rcv_wscale;
+ __u16 tw_sport;
+ /* Socket demultiplex comparisons on incoming packets. */
+ /* these five are in inet_sock */
+ __u32 tw_daddr
+ __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
+ __u32 tw_rcv_saddr;
+ __u16 tw_dport;
+ __u16 tw_num;
+ /* And these are ours. */
+ int tw_hashent;
+ int tw_timeout;
+ __u32 tw_rcv_nxt;
+ __u32 tw_snd_nxt;
+ __u32 tw_rcv_wnd;
+ __u32 tw_ts_recent;
+ long tw_ts_recent_stamp;
+ unsigned long tw_ttd;
+ struct tcp_bind_bucket *tw_tb;
+ struct hlist_node tw_death_node;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct in6_addr tw_v6_daddr;
+ struct in6_addr tw_v6_rcv_saddr;
+ int tw_v6_ipv6only;
+#endif
+};
+
+static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
+ struct hlist_head *list)
+{
+ hlist_add_head(&tw->tw_node, list);
+}
+
+static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
+ struct hlist_head *list)
+{
+ hlist_add_head(&tw->tw_bind_node, list);
+}
+
+static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
+{
+ return tw->tw_death_node.pprev != NULL;
+}
+
+static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
+{
+ tw->tw_death_node.pprev = NULL;
+}
+
+static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
+{
+ __hlist_del(&tw->tw_death_node);
+ tw_dead_node_init(tw);
+}
+
+static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
+{
+ if (tw_dead_hashed(tw)) {
+ __tw_del_dead_node(tw);
+ return 1;
+ }
+ return 0;
+}
+
+#define tw_for_each(tw, node, head) \
+ hlist_for_each_entry(tw, node, head, tw_node)
+
+#define tw_for_each_inmate(tw, node, jail) \
+ hlist_for_each_entry(tw, node, jail, tw_death_node)
+
+#define tw_for_each_inmate_safe(tw, node, safe, jail) \
+ hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
+
+#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
+
+static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
+{
+ return likely(sk->sk_state != TCP_TIME_WAIT) ?
+ inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
+{
+ return likely(sk->sk_state != TCP_TIME_WAIT) ?
+ &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
+}
+
+static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
+{
+ return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
+}
+
+#define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
+
+static inline int tcp_v6_ipv6only(const struct sock *sk)
+{
+ return likely(sk->sk_state != TCP_TIME_WAIT) ?
+ ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
+}
+#else
+# define __tcp_v6_rcv_saddr(__sk) NULL
+# define tcp_v6_rcv_saddr(__sk) NULL
+# define tcptw_sk_ipv6only(__sk) 0
+# define tcp_v6_ipv6only(__sk) 0
+#endif
+
+extern kmem_cache_t *tcp_timewait_cachep;
+
+static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
+{
+ if (atomic_dec_and_test(&tw->tw_refcnt)) {
+#ifdef INET_REFCNT_DEBUG
+ printk(KERN_DEBUG "tw_bucket %p released\n", tw);
+#endif
+ kmem_cache_free(tcp_timewait_cachep, tw);
+ }
+}
+
+extern atomic_t tcp_orphan_count;
+extern int tcp_tw_count;
+extern void tcp_time_wait(struct sock *sk, int state, int timeo);
+extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
+
+
+/* Socket demux engine toys. */
+#ifdef __BIG_ENDIAN
+#define TCP_COMBINED_PORTS(__sport, __dport) \
+ (((__u32)(__sport)<<16) | (__u32)(__dport))
+#else /* __LITTLE_ENDIAN */
+#define TCP_COMBINED_PORTS(__sport, __dport) \
+ (((__u32)(__dport)<<16) | (__u32)(__sport))
+#endif
+
+#if (BITS_PER_LONG == 64)
+#ifdef __BIG_ENDIAN
+#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
+ __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
+#else /* __LITTLE_ENDIAN */
+#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
+ __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
+#endif /* __BIG_ENDIAN */
+#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
+ (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
+ ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
+ (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
+ ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#else /* 32-bit arch */
+#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
+#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
+ ((inet_sk(__sk)->daddr == (__saddr)) && \
+ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
+ ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
+ ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
+ (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
+ ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#endif /* 64-bit arch */
+
+#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
+ (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
+ ((__sk)->sk_family == AF_INET6) && \
+ ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
+ ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+
+/* These can have wildcards, don't try too hard. */
+static __inline__ int tcp_lhashfn(unsigned short num)
+{
+ return num & (TCP_LHTABLE_SIZE - 1);
+}
+
+static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
+{
+ return tcp_lhashfn(inet_sk(sk)->num);
+}
+
+#define MAX_TCP_HEADER (128 + MAX_HEADER)
+
+/*
+ * Never offer a window over 32767 without using window scaling. Some
+ * poor stacks do signed 16bit maths!
+ */
+#define MAX_TCP_WINDOW 32767U
+
+/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
+#define TCP_MIN_MSS 88U
+
+/* Minimal RCV_MSS. */
+#define TCP_MIN_RCVMSS 536U
+
+/* After receiving this amount of duplicate ACKs fast retransmit starts. */
+#define TCP_FASTRETRANS_THRESH 3
+
+/* Maximal reordering. */
+#define TCP_MAX_REORDERING 127
+
+/* Maximal number of ACKs sent quickly to accelerate slow-start. */
+#define TCP_MAX_QUICKACKS 16U
+
+/* urg_data states */
+#define TCP_URG_VALID 0x0100
+#define TCP_URG_NOTYET 0x0200
+#define TCP_URG_READ 0x0400
+
+#define TCP_RETR1 3 /*
+ * This is how many retries it does before it
+ * tries to figure out if the gateway is
+ * down. Minimal RFC value is 3; it corresponds
+ * to ~3sec-8min depending on RTO.
+ */
+
+#define TCP_RETR2 15 /*
+ * This should take at least
+ * 90 minutes to time out.
+ * RFC1122 says that the limit is 100 sec.
+ * 15 is ~13-30min depending on RTO.
+ */
+
+#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
+ * connection: ~180sec is RFC minumum */
+
+#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
+ * connection: ~180sec is RFC minumum */
+
+
+#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
+ * socket. 7 is ~50sec-16min.
+ */
+
+
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
+ * state, about 60 seconds */
+#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
+ /* BSD style FIN_WAIT2 deadlock breaker.
+ * It used to be 3min, new value is 60sec,
+ * to combine FIN-WAIT-2 timeout with
+ * TIME-WAIT timer.
+ */
+
+#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
+#if HZ >= 100
+#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
+#define TCP_ATO_MIN ((unsigned)(HZ/25))
+#else
+#define TCP_DELACK_MIN 4U
+#define TCP_ATO_MIN 4U
+#endif
+#define TCP_RTO_MAX ((unsigned)(120*HZ))
+#define TCP_RTO_MIN ((unsigned)(HZ/5))
+#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
+
+#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
+ * for local resources.
+ */
+
+#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
+#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
+#define TCP_KEEPALIVE_INTVL (75*HZ)
+
+#define MAX_TCP_KEEPIDLE 32767
+#define MAX_TCP_KEEPINTVL 32767
+#define MAX_TCP_KEEPCNT 127
+#define MAX_TCP_SYNCNT 127
+
+#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
+#define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
+
+#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
+#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
+ * after this time. It should be equal
+ * (or greater than) TCP_TIMEWAIT_LEN
+ * to provide reliability equal to one
+ * provided by timewait state.
+ */
+#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
+ * timestamps. It must be less than
+ * minimal timewait lifetime.
+ */
+
+#define TCP_TW_RECYCLE_SLOTS_LOG 5
+#define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
+
+/* If time > 4sec, it is "slow" path, no recycling is required,
+ so that we select tick to get range about 4 seconds.
+ */
+
+#if HZ <= 16 || HZ > 4096
+# error Unsupported: HZ <= 16 or HZ > 4096
+#elif HZ <= 32
+# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 64
+# define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 128
+# define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 256
+# define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 512
+# define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 1024
+# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 2048
+# define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#else
+# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#endif
+
+#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
+ * max_cwnd = snd_cwnd * beta
+ */
+#define BICTCP_MAX_INCREMENT 32 /*
+ * Limit on the amount of
+ * increment allowed during
+ * binary search.
+ */
+#define BICTCP_FUNC_OF_MIN_INCR 11 /*
+ * log(B/Smin)/log(B/(B-1))+1,
+ * Smin:min increment
+ * B:log factor
+ */
+#define BICTCP_B 4 /*
+ * In binary search,
+ * go to point (max+min)/N
+ */
+
+/*
+ * TCP option
+ */
+
+#define TCPOPT_NOP 1 /* Padding */
+#define TCPOPT_EOL 0 /* End of options */
+#define TCPOPT_MSS 2 /* Segment size negotiating */
+#define TCPOPT_WINDOW 3 /* Window scaling */
+#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
+#define TCPOPT_SACK 5 /* SACK Block */
+#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+
+/*
+ * TCP option lengths
+ */
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_SACK_PERM 2
+#define TCPOLEN_TIMESTAMP 10
+
+/* But this is what stacks really send out. */
+#define TCPOLEN_TSTAMP_ALIGNED 12
+#define TCPOLEN_WSCALE_ALIGNED 4
+#define TCPOLEN_SACKPERM_ALIGNED 4
+#define TCPOLEN_SACK_BASE 2
+#define TCPOLEN_SACK_BASE_ALIGNED 4
+#define TCPOLEN_SACK_PERBLOCK 8
+
+#define TCP_TIME_RETRANS 1 /* Retransmit timer */
+#define TCP_TIME_DACK 2 /* Delayed ack timer */
+#define TCP_TIME_PROBE0 3 /* Zero window probe timer */
+#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
+
+/* Flags in tp->nonagle */
+#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
+#define TCP_NAGLE_CORK 2 /* Socket is corked */
+#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
+
+/* sysctl variables for tcp */
+extern int sysctl_max_syn_backlog;
+extern int sysctl_tcp_timestamps;
+extern int sysctl_tcp_window_scaling;
+extern int sysctl_tcp_sack;
+extern int sysctl_tcp_fin_timeout;
+extern int sysctl_tcp_tw_recycle;
+extern int sysctl_tcp_keepalive_time;
+extern int sysctl_tcp_keepalive_probes;
+extern int sysctl_tcp_keepalive_intvl;
+extern int sysctl_tcp_syn_retries;
+extern int sysctl_tcp_synack_retries;
+extern int sysctl_tcp_retries1;
+extern int sysctl_tcp_retries2;
+extern int sysctl_tcp_orphan_retries;
+extern int sysctl_tcp_syncookies;
+extern int sysctl_tcp_retrans_collapse;
+extern int sysctl_tcp_stdurg;
+extern int sysctl_tcp_rfc1337;
+extern int sysctl_tcp_abort_on_overflow;
+extern int sysctl_tcp_max_orphans;
+extern int sysctl_tcp_max_tw_buckets;
+extern int sysctl_tcp_fack;
+extern int sysctl_tcp_reordering;
+extern int sysctl_tcp_ecn;
+extern int sysctl_tcp_dsack;
+extern int sysctl_tcp_mem[3];
+extern int sysctl_tcp_wmem[3];
+extern int sysctl_tcp_rmem[3];
+extern int sysctl_tcp_app_win;
+extern int sysctl_tcp_adv_win_scale;
+extern int sysctl_tcp_tw_reuse;
+extern int sysctl_tcp_frto;
+extern int sysctl_tcp_low_latency;
+extern int sysctl_tcp_westwood;
+extern int sysctl_tcp_vegas_cong_avoid;
+extern int sysctl_tcp_vegas_alpha;
+extern int sysctl_tcp_vegas_beta;
+extern int sysctl_tcp_vegas_gamma;
+extern int sysctl_tcp_nometrics_save;
+extern int sysctl_tcp_bic;
+extern int sysctl_tcp_bic_fast_convergence;
+extern int sysctl_tcp_bic_low_window;
+extern int sysctl_tcp_bic_beta;
+extern int sysctl_tcp_moderate_rcvbuf;
+extern int sysctl_tcp_tso_win_divisor;
+
+extern atomic_t tcp_memory_allocated;
+extern atomic_t tcp_sockets_allocated;
+extern int tcp_memory_pressure;
+
+struct open_request;
+
+struct or_calltable {
+ int family;
+ int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
+ void (*send_ack) (struct sk_buff *skb, struct open_request *req);
+ void (*destructor) (struct open_request *req);
+ void (*send_reset) (struct sk_buff *skb);
+};
+
+struct tcp_v4_open_req {
+ __u32 loc_addr;
+ __u32 rmt_addr;
+ struct ip_options *opt;
+};
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+struct tcp_v6_open_req {
+ struct in6_addr loc_addr;
+ struct in6_addr rmt_addr;
+ struct sk_buff *pktopts;
+ int iif;
+};
+#endif
+
+/* this structure is too big */
+struct open_request {
+ struct open_request *dl_next; /* Must be first member! */
+ __u32 rcv_isn;
+ __u32 snt_isn;
+ __u16 rmt_port;
+ __u16 mss;
+ __u8 retrans;
+ __u8 __pad;
+ __u16 snd_wscale : 4,
+ rcv_wscale : 4,
+ tstamp_ok : 1,
+ sack_ok : 1,
+ wscale_ok : 1,
+ ecn_ok : 1,
+ acked : 1;
+ /* The following two fields can be easily recomputed I think -AK */
+ __u32 window_clamp; /* window clamp at creation time */
+ __u32 rcv_wnd; /* rcv_wnd offered first time */
+ __u32 ts_recent;
+ unsigned long expires;
+ struct or_calltable *class;
+ struct sock *sk;
+ union {
+ struct tcp_v4_open_req v4_req;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ struct tcp_v6_open_req v6_req;
+#endif
+ } af;
+};
+
+/* SLAB cache for open requests. */
+extern kmem_cache_t *tcp_openreq_cachep;
+
+#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
+#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
+
+static inline void tcp_openreq_free(struct open_request *req)
+{
+ req->class->destructor(req);
+ tcp_openreq_fastfree(req);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
+#else
+#define TCP_INET_FAMILY(fam) 1
+#endif
+
+/*
+ * Pointers to address related TCP functions
+ * (i.e. things that depend on the address family)
+ */
+
+struct tcp_func {
+ int (*queue_xmit) (struct sk_buff *skb,
+ int ipfragok);
+
+ void (*send_check) (struct sock *sk,
+ struct tcphdr *th,
+ int len,
+ struct sk_buff *skb);
+
+ int (*rebuild_header) (struct sock *sk);
+
+ int (*conn_request) (struct sock *sk,
+ struct sk_buff *skb);
+
+ struct sock * (*syn_recv_sock) (struct sock *sk,
+ struct sk_buff *skb,
+ struct open_request *req,
+ struct dst_entry *dst);
+
+ int (*remember_stamp) (struct sock *sk);
+
+ __u16 net_header_len;
+
+ int (*setsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ char __user *optval,
+ int optlen);
+
+ int (*getsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ char __user *optval,
+ int __user *optlen);
+
+
+ void (*addr2sockaddr) (struct sock *sk,
+ struct sockaddr *);
+
+ int sockaddr_len;
+};
+
+/*
+ * The next routines deal with comparing 32 bit unsigned ints
+ * and worry about wraparound (automatic with unsigned arithmetic).
+ */
+
+static inline int before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+
+static inline int after(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq2-seq1) < 0;
+}
+
+
+/* is s2<=s1<=s3 ? */
+static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
+{
+ return seq3 - seq2 >= seq1 - seq2;
+}
+
+
+extern struct proto tcp_prot;
+
+DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
+#define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
+#define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
+#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
+#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
+#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
+#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
+
+extern void tcp_put_port(struct sock *sk);
+extern void tcp_inherit_port(struct sock *sk, struct sock *child);
+
+extern void tcp_v4_err(struct sk_buff *skb, u32);
+
+extern void tcp_shutdown (struct sock *sk, int how);
+
+extern int tcp_v4_rcv(struct sk_buff *skb);
+
+extern int tcp_v4_remember_stamp(struct sock *sk);
+
+extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
+
+extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t size);
+extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
+
+extern int tcp_ioctl(struct sock *sk,
+ int cmd,
+ unsigned long arg);
+
+extern int tcp_rcv_state_process(struct sock *sk,
+ struct sk_buff *skb,
+ struct tcphdr *th,
+ unsigned len);
+
+extern int tcp_rcv_established(struct sock *sk,
+ struct sk_buff *skb,
+ struct tcphdr *th,
+ unsigned len);
+
+extern void tcp_rcv_space_adjust(struct sock *sk);
+
+enum tcp_ack_state_t
+{
+ TCP_ACK_SCHED = 1,
+ TCP_ACK_TIMER = 2,
+ TCP_ACK_PUSHED= 4
+};
+
+static inline void tcp_schedule_ack(struct tcp_sock *tp)
+{
+ tp->ack.pending |= TCP_ACK_SCHED;
+}
+
+static inline int tcp_ack_scheduled(struct tcp_sock *tp)
+{
+ return tp->ack.pending&TCP_ACK_SCHED;
+}
+
+static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp)
+{
+ if (tp->ack.quick && --tp->ack.quick == 0) {
+ /* Leaving quickack mode we deflate ATO. */
+ tp->ack.ato = TCP_ATO_MIN;
+ }
+}
+
+extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
+
+static __inline__ void tcp_delack_init(struct tcp_sock *tp)
+{
+ memset(&tp->ack, 0, sizeof(tp->ack));
+}
+
+static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
+{
+ rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
+}
+
+enum tcp_tw_status
+{
+ TCP_TW_SUCCESS = 0,
+ TCP_TW_RST = 1,
+ TCP_TW_ACK = 2,
+ TCP_TW_SYN = 3
+};
+
+
+extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
+ struct sk_buff *skb,
+ struct tcphdr *th,
+ unsigned len);
+
+extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
+ struct open_request *req,
+ struct open_request **prev);
+extern int tcp_child_process(struct sock *parent,
+ struct sock *child,
+ struct sk_buff *skb);
+extern void tcp_enter_frto(struct sock *sk);
+extern void tcp_enter_loss(struct sock *sk, int how);
+extern void tcp_clear_retrans(struct tcp_sock *tp);
+extern void tcp_update_metrics(struct sock *sk);
+
+extern void tcp_close(struct sock *sk,
+ long timeout);
+extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
+extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
+
+extern int tcp_getsockopt(struct sock *sk, int level,
+ int optname,
+ char __user *optval,
+ int __user *optlen);
+extern int tcp_setsockopt(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int optlen);
+extern void tcp_set_keepalive(struct sock *sk, int val);
+extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg,
+ size_t len, int nonblock,
+ int flags, int *addr_len);
+
+extern int tcp_listen_start(struct sock *sk);
+
+extern void tcp_parse_options(struct sk_buff *skb,
+ struct tcp_options_received *opt_rx,
+ int estab);
+
+/*
+ * TCP v4 functions exported for the inet6 API
+ */
+
+extern int tcp_v4_rebuild_header(struct sock *sk);
+
+extern int tcp_v4_build_header(struct sock *sk,
+ struct sk_buff *skb);
+
+extern void tcp_v4_send_check(struct sock *sk,
+ struct tcphdr *th, int len,
+ struct sk_buff *skb);
+
+extern int tcp_v4_conn_request(struct sock *sk,
+ struct sk_buff *skb);
+
+extern struct sock * tcp_create_openreq_child(struct sock *sk,
+ struct open_request *req,
+ struct sk_buff *skb);
+
+extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
+ struct sk_buff *skb,
+ struct open_request *req,
+ struct dst_entry *dst);
+
+extern int tcp_v4_do_rcv(struct sock *sk,
+ struct sk_buff *skb);
+
+extern int tcp_v4_connect(struct sock *sk,
+ struct sockaddr *uaddr,
+ int addr_len);
+
+extern int tcp_connect(struct sock *sk);
+
+extern struct sk_buff * tcp_make_synack(struct sock *sk,
+ struct dst_entry *dst,
+ struct open_request *req);
+
+extern int tcp_disconnect(struct sock *sk, int flags);
+
+extern void tcp_unhash(struct sock *sk);
+
+extern int tcp_v4_hash_connecting(struct sock *sk);
+
+
+/* From syncookies.c */
+extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ struct ip_options *opt);
+extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
+ __u16 *mss);
+
+/* tcp_output.c */
+
+extern int tcp_write_xmit(struct sock *, int nonagle);
+extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+extern void tcp_xmit_retransmit_queue(struct sock *);
+extern void tcp_simple_retransmit(struct sock *);
+extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+
+extern void tcp_send_probe0(struct sock *);
+extern void tcp_send_partial(struct sock *);
+extern int tcp_write_wakeup(struct sock *);
+extern void tcp_send_fin(struct sock *sk);
+extern void tcp_send_active_reset(struct sock *sk, int priority);
+extern int tcp_send_synack(struct sock *);
+extern void tcp_push_one(struct sock *, unsigned mss_now);
+extern void tcp_send_ack(struct sock *sk);
+extern void tcp_send_delayed_ack(struct sock *sk);
+
+/* tcp_timer.c */
+extern void tcp_init_xmit_timers(struct sock *);
+extern void tcp_clear_xmit_timers(struct sock *);
+
+extern void tcp_delete_keepalive_timer(struct sock *);
+extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
+extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+extern unsigned int tcp_current_mss(struct sock *sk, int large);
+
+#ifdef TCP_DEBUG
+extern const char tcp_timer_bug_msg[];
+#endif
+
+/* tcp_diag.c */
+extern void tcp_get_info(struct sock *, struct tcp_info *);
+
+/* Read 'sendfile()'-style from a TCP socket */
+typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
+ unsigned int, size_t);
+extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
+
+static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ switch (what) {
+ case TCP_TIME_RETRANS:
+ case TCP_TIME_PROBE0:
+ tp->pending = 0;
+
+#ifdef TCP_CLEAR_TIMERS
+ sk_stop_timer(sk, &tp->retransmit_timer);
+#endif
+ break;
+ case TCP_TIME_DACK:
+ tp->ack.blocked = 0;
+ tp->ack.pending = 0;
+
+#ifdef TCP_CLEAR_TIMERS
+ sk_stop_timer(sk, &tp->delack_timer);
+#endif
+ break;
+ default:
+#ifdef TCP_DEBUG
+ printk(tcp_timer_bug_msg);
+#endif
+ return;
+ };
+
+}
+
+/*
+ * Reset the retransmission timer
+ */
+static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (when > TCP_RTO_MAX) {
+#ifdef TCP_DEBUG
+ printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
+#endif
+ when = TCP_RTO_MAX;
+ }
+
+ switch (what) {
+ case TCP_TIME_RETRANS:
+ case TCP_TIME_PROBE0:
+ tp->pending = what;
+ tp->timeout = jiffies+when;
+ sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
+ break;
+
+ case TCP_TIME_DACK:
+ tp->ack.pending |= TCP_ACK_TIMER;
+ tp->ack.timeout = jiffies+when;
+ sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
+ break;
+
+ default:
+#ifdef TCP_DEBUG
+ printk(tcp_timer_bug_msg);
+#endif
+ return;
+ };
+}
+
+/* Initialize RCV_MSS value.
+ * RCV_MSS is an our guess about MSS used by the peer.
+ * We haven't any direct information about the MSS.
+ * It's better to underestimate the RCV_MSS rather than overestimate.
+ * Overestimations make us ACKing less frequently than needed.
+ * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
+ */
+
+static inline void tcp_initialize_rcv_mss(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned int hint = min(tp->advmss, tp->mss_cache_std);
+
+ hint = min(hint, tp->rcv_wnd/2);
+ hint = min(hint, TCP_MIN_RCVMSS);
+ hint = max(hint, TCP_MIN_MSS);
+
+ tp->ack.rcv_mss = hint;
+}
+
+static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+{
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
+}
+
+static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
+{
+ __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
+}
+
+static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
+{
+ if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
+ tp->rcv_wnd &&
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ !tp->urg_data)
+ tcp_fast_path_on(tp);
+}
+
+/* Compute the actual receive window we are currently advertising.
+ * Rcv_nxt can be after the window if our peer push more data
+ * than the offered window.
+ */
+static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
+{
+ s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
+
+ if (win < 0)
+ win = 0;
+ return (u32) win;
+}
+
+/* Choose a new window, without checks for shrinking, and without
+ * scaling applied to the result. The caller does these things
+ * if necessary. This is a "raw" window selection.
+ */
+extern u32 __tcp_select_window(struct sock *sk);
+
+/* TCP timestamps are only 32-bits, this causes a slight
+ * complication on 64-bit systems since we store a snapshot
+ * of jiffies in the buffer control blocks below. We decidely
+ * only use of the low 32-bits of jiffies and hide the ugly
+ * casts with the following macro.
+ */
+#define tcp_time_stamp ((__u32)(jiffies))
+
+/* This is what the send packet queueing engine uses to pass
+ * TCP per-packet control information to the transmission
+ * code. We also store the host-order sequence numbers in
+ * here too. This is 36 bytes on 32-bit architectures,
+ * 40 bytes on 64-bit machines, if this grows please adjust
+ * skbuff.h:skbuff->cb[xxx] size appropriately.
+ */
+struct tcp_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ struct inet6_skb_parm h6;
+#endif
+ } header; /* For incoming frames */
+ __u32 seq; /* Starting sequence number */
+ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
+ __u32 when; /* used to compute rtt's */
+ __u8 flags; /* TCP header flags. */
+
+ /* NOTE: These must match up to the flags byte in a
+ * real TCP header.
+ */
+#define TCPCB_FLAG_FIN 0x01
+#define TCPCB_FLAG_SYN 0x02
+#define TCPCB_FLAG_RST 0x04
+#define TCPCB_FLAG_PSH 0x08
+#define TCPCB_FLAG_ACK 0x10
+#define TCPCB_FLAG_URG 0x20
+#define TCPCB_FLAG_ECE 0x40
+#define TCPCB_FLAG_CWR 0x80
+
+ __u8 sacked; /* State flags for SACK/FACK. */
+#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
+#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
+#define TCPCB_LOST 0x04 /* SKB is lost */
+#define TCPCB_TAGBITS 0x07 /* All tag bits */
+
+#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
+#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
+
+#define TCPCB_URG 0x20 /* Urgent pointer advenced here */
+
+#define TCPCB_AT_TAIL (TCPCB_URG)
+
+ __u16 urg_ptr; /* Valid w/URG flags is set. */
+ __u32 ack_seq; /* Sequence number ACK'd */
+};
+
+#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+
+#include <net/tcp_ecn.h>
+
+/* Due to TSO, an SKB can be composed of multiple actual
+ * packets. To keep these tracked properly, we use this.
+ */
+static inline int tcp_skb_pcount(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tso_segs;
+}
+
+/* This is valid iff tcp_skb_pcount() > 1. */
+static inline int tcp_skb_mss(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tso_size;
+}
+
+static inline void tcp_dec_pcount_approx(__u32 *count,
+ const struct sk_buff *skb)
+{
+ if (*count) {
+ *count -= tcp_skb_pcount(skb);
+ if ((int)*count < 0)
+ *count = 0;
+ }
+}
+
+static inline void tcp_packets_out_inc(struct sock *sk,
+ struct tcp_sock *tp,
+ const struct sk_buff *skb)
+{
+ int orig = tp->packets_out;
+
+ tp->packets_out += tcp_skb_pcount(skb);
+ if (!orig)
+ tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+}
+
+static inline void tcp_packets_out_dec(struct tcp_sock *tp,
+ const struct sk_buff *skb)
+{
+ tp->packets_out -= tcp_skb_pcount(skb);
+}
+
+/* This determines how many packets are "in the network" to the best
+ * of our knowledge. In many cases it is conservative, but where
+ * detailed information is available from the receiver (via SACK
+ * blocks etc.) we can make more aggressive calculations.
+ *
+ * Use this for decisions involving congestion control, use just
+ * tp->packets_out to determine if the send queue is empty or not.
+ *
+ * Read this equation as:
+ *
+ * "Packets sent once on transmission queue" MINUS
+ * "Packets left network, but not honestly ACKed yet" PLUS
+ * "Packets fast retransmitted"
+ */
+static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+{
+ return (tp->packets_out - tp->left_out + tp->retrans_out);
+}
+
+/*
+ * Which congestion algorithim is in use on the connection.
+ */
+#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
+#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
+#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
+
+/* Recalculate snd_ssthresh, we want to set it to:
+ *
+ * Reno:
+ * one half the current congestion window, but no
+ * less than two segments
+ *
+ * BIC:
+ * behave like Reno until low_window is reached,
+ * then increase congestion window slowly
+ */
+static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp)
+{
+ if (tcp_is_bic(tp)) {
+ if (sysctl_tcp_bic_fast_convergence &&
+ tp->snd_cwnd < tp->bictcp.last_max_cwnd)
+ tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
+ (BICTCP_BETA_SCALE
+ + sysctl_tcp_bic_beta))
+ / (2 * BICTCP_BETA_SCALE);
+ else
+ tp->bictcp.last_max_cwnd = tp->snd_cwnd;
+
+ if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
+ return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
+ / BICTCP_BETA_SCALE, 2U);
+ }
+
+ return max(tp->snd_cwnd >> 1U, 2U);
+}
+
+/* Stop taking Vegas samples for now. */
+#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
+
+static inline void tcp_vegas_enable(struct tcp_sock *tp)
+{
+ /* There are several situations when we must "re-start" Vegas:
+ *
+ * o when a connection is established
+ * o after an RTO
+ * o after fast recovery
+ * o when we send a packet and there is no outstanding
+ * unacknowledged data (restarting an idle connection)
+ *
+ * In these circumstances we cannot do a Vegas calculation at the
+ * end of the first RTT, because any calculation we do is using
+ * stale info -- both the saved cwnd and congestion feedback are
+ * stale.
+ *
+ * Instead we must wait until the completion of an RTT during
+ * which we actually receive ACKs.
+ */
+
+ /* Begin taking Vegas samples next time we send something. */
+ tp->vegas.doing_vegas_now = 1;
+
+ /* Set the beginning of the next send window. */
+ tp->vegas.beg_snd_nxt = tp->snd_nxt;
+
+ tp->vegas.cntRTT = 0;
+ tp->vegas.minRTT = 0x7fffffff;
+}
+
+/* Should we be taking Vegas samples right now? */
+#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
+
+extern void tcp_ca_init(struct tcp_sock *tp);
+
+static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
+{
+ if (tcp_is_vegas(tp)) {
+ if (ca_state == TCP_CA_Open)
+ tcp_vegas_enable(tp);
+ else
+ tcp_vegas_disable(tp);
+ }
+ tp->ca_state = ca_state;
+}
+
+/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
+ * The exception is rate halving phase, when cwnd is decreasing towards
+ * ssthresh.
+ */
+static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp)
+{
+ if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
+ return tp->snd_ssthresh;
+ else
+ return max(tp->snd_ssthresh,
+ ((tp->snd_cwnd >> 1) +
+ (tp->snd_cwnd >> 2)));
+}
+
+static inline void tcp_sync_left_out(struct tcp_sock *tp)
+{
+ if (tp->rx_opt.sack_ok &&
+ (tp->sacked_out >= tp->packets_out - tp->lost_out))
+ tp->sacked_out = tp->packets_out - tp->lost_out;
+ tp->left_out = tp->sacked_out + tp->lost_out;
+}
+
+extern void tcp_cwnd_application_limited(struct sock *sk);
+
+/* Congestion window validation. (RFC2861) */
+
+static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
+{
+ __u32 packets_out = tp->packets_out;
+
+ if (packets_out >= tp->snd_cwnd) {
+ /* Network is feed fully. */
+ tp->snd_cwnd_used = 0;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ } else {
+ /* Network starves. */
+ if (tp->packets_out > tp->snd_cwnd_used)
+ tp->snd_cwnd_used = tp->packets_out;
+
+ if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
+ tcp_cwnd_application_limited(sk);
+ }
+}
+
+/* Set slow start threshould and cwnd not falling to slow start */
+static inline void __tcp_enter_cwr(struct tcp_sock *tp)
+{
+ tp->undo_marker = 0;
+ tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
+ tp->snd_cwnd = min(tp->snd_cwnd,
+ tcp_packets_in_flight(tp) + 1U);
+ tp->snd_cwnd_cnt = 0;
+ tp->high_seq = tp->snd_nxt;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ TCP_ECN_queue_cwr(tp);
+}
+
+static inline void tcp_enter_cwr(struct tcp_sock *tp)
+{
+ tp->prior_ssthresh = 0;
+ if (tp->ca_state < TCP_CA_CWR) {
+ __tcp_enter_cwr(tp);
+ tcp_set_ca_state(tp, TCP_CA_CWR);
+ }
+}
+
+extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
+
+/* Slow start with delack produces 3 packets of burst, so that
+ * it is safe "de facto".
+ */
+static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
+{
+ return 3;
+}
+
+static __inline__ int tcp_minshall_check(const struct tcp_sock *tp)
+{
+ return after(tp->snd_sml,tp->snd_una) &&
+ !after(tp->snd_sml, tp->snd_nxt);
+}
+
+static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
+ const struct sk_buff *skb)
+{
+ if (skb->len < mss)
+ tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
+}
+
+/* Return 0, if packet can be sent now without violation Nagle's rules:
+ 1. It is full sized.
+ 2. Or it contains FIN.
+ 3. Or TCP_NODELAY was set.
+ 4. Or TCP_CORK is not set, and all sent packets are ACKed.
+ With Minshall's modification: all sent small packets are ACKed.
+ */
+
+static __inline__ int
+tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
+ unsigned mss_now, int nonagle)
+{
+ return (skb->len < mss_now &&
+ !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+ ((nonagle&TCP_NAGLE_CORK) ||
+ (!nonagle &&
+ tp->packets_out &&
+ tcp_minshall_check(tp))));
+}
+
+extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int);
+
+/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
+ * should be put on the wire right now.
+ */
+static __inline__ int tcp_snd_test(const struct tcp_sock *tp,
+ struct sk_buff *skb,
+ unsigned cur_mss, int nonagle)
+{
+ int pkts = tcp_skb_pcount(skb);
+
+ if (!pkts) {
+ tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
+ pkts = tcp_skb_pcount(skb);
+ }
+
+ /* RFC 1122 - section 4.2.3.4
+ *
+ * We must queue if
+ *
+ * a) The right edge of this frame exceeds the window
+ * b) There are packets in flight and we have a small segment
+ * [SWS avoidance and Nagle algorithm]
+ * (part of SWS is done on packetization)
+ * Minshall version sounds: there are no _small_
+ * segments in flight. (tcp_nagle_check)
+ * c) We have too many packets 'in flight'
+ *
+ * Don't use the nagle rule for urgent data (or
+ * for the final FIN -DaveM).
+ *
+ * Also, Nagle rule does not apply to frames, which
+ * sit in the middle of queue (they have no chances
+ * to get new data) and if room at tail of skb is
+ * not enough to save something seriously (<32 for now).
+ */
+
+ /* Don't be strict about the congestion window for the
+ * final FIN frame. -DaveM
+ */
+ return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
+ || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
+ (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) ||
+ (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
+ !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
+}
+
+static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
+{
+ if (!tp->packets_out && !tp->pending)
+ tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
+}
+
+static __inline__ int tcp_skb_is_last(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ return skb->next == (struct sk_buff *)&sk->sk_write_queue;
+}
+
+/* Push out any pending frames which were held back due to
+ * TCP_CORK or attempt at coalescing tiny packets.
+ * The socket must be locked by the caller.
+ */
+static __inline__ void __tcp_push_pending_frames(struct sock *sk,
+ struct tcp_sock *tp,
+ unsigned cur_mss,
+ int nonagle)
+{
+ struct sk_buff *skb = sk->sk_send_head;
+
+ if (skb) {
+ if (!tcp_skb_is_last(sk, skb))
+ nonagle = TCP_NAGLE_PUSH;
+ if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
+ tcp_write_xmit(sk, nonagle))
+ tcp_check_probe_timer(sk, tp);
+ }
+ tcp_cwnd_validate(sk, tp);
+}
+
+static __inline__ void tcp_push_pending_frames(struct sock *sk,
+ struct tcp_sock *tp)
+{
+ __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
+}
+
+static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
+{
+ struct sk_buff *skb = sk->sk_send_head;
+
+ return (skb &&
+ tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
+ tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
+}
+
+static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
+{
+ tp->snd_wl1 = seq;
+}
+
+static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
+{
+ tp->snd_wl1 = seq;
+}
+
+extern void tcp_destroy_sock(struct sock *sk);
+
+
+/*
+ * Calculate(/check) TCP checksum
+ */
+static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr,
+ unsigned long base)
+{
+ return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+}
+
+static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
+{
+ return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+}
+
+static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
+{
+ return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+ __tcp_checksum_complete(skb);
+}
+
+/* Prequeue for VJ style copy to user, combined with checksumming. */
+
+static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
+{
+ tp->ucopy.task = NULL;
+ tp->ucopy.len = 0;
+ tp->ucopy.memory = 0;
+ skb_queue_head_init(&tp->ucopy.prequeue);
+}
+
+/* Packet is added to VJ-style prequeue for processing in process
+ * context, if a reader task is waiting. Apparently, this exciting
+ * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
+ * failed somewhere. Latency? Burstiness? Well, at least now we will
+ * see, why it failed. 8)8) --ANK
+ *
+ * NOTE: is this not too big to inline?
+ */
+static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!sysctl_tcp_low_latency && tp->ucopy.task) {
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
+ struct sk_buff *skb1;
+
+ BUG_ON(sock_owned_by_user(sk));
+
+ while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+ sk->sk_backlog_rcv(sk, skb1);
+ NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
+ }
+
+ tp->ucopy.memory = 0;
+ } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+ wake_up_interruptible(sk->sk_sleep);
+ if (!tcp_ack_scheduled(tp))
+ tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+
+#undef STATE_TRACE
+
+#ifdef STATE_TRACE
+static const char *statename[]={
+ "Unused","Established","Syn Sent","Syn Recv",
+ "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
+ "Close Wait","Last ACK","Listen","Closing"
+};
+#endif
+
+static __inline__ void tcp_set_state(struct sock *sk, int state)
+{
+ int oldstate = sk->sk_state;
+
+ switch (state) {
+ case TCP_ESTABLISHED:
+ if (oldstate != TCP_ESTABLISHED)
+ TCP_INC_STATS(TCP_MIB_CURRESTAB);
+ break;
+
+ case TCP_CLOSE:
+ if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
+ TCP_INC_STATS(TCP_MIB_ESTABRESETS);
+
+ sk->sk_prot->unhash(sk);
+ if (tcp_sk(sk)->bind_hash &&
+ !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
+ tcp_put_port(sk);
+ /* fall through */
+ default:
+ if (oldstate==TCP_ESTABLISHED)
+ TCP_DEC_STATS(TCP_MIB_CURRESTAB);
+ }
+
+ /* Change state AFTER socket is unhashed to avoid closed
+ * socket sitting in hash tables.
+ */
+ sk->sk_state = state;
+
+#ifdef STATE_TRACE
+ SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
+#endif
+}
+
+static __inline__ void tcp_done(struct sock *sk)
+{
+ tcp_set_state(sk, TCP_CLOSE);
+ tcp_clear_xmit_timers(sk);
+
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+ else
+ tcp_destroy_sock(sk);
+}
+
+static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
+{
+ rx_opt->dsack = 0;
+ rx_opt->eff_sacks = 0;
+ rx_opt->num_sacks = 0;
+}
+
+static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
+{
+ if (tp->rx_opt.tstamp_ok) {
+ *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ *ptr++ = htonl(tstamp);
+ *ptr++ = htonl(tp->rx_opt.ts_recent);
+ }
+ if (tp->rx_opt.eff_sacks) {
+ struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
+ int this_sack;
+
+ *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_SACK << 8) |
+ (TCPOLEN_SACK_BASE +
+ (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
+ for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
+ *ptr++ = htonl(sp[this_sack].start_seq);
+ *ptr++ = htonl(sp[this_sack].end_seq);
+ }
+ if (tp->rx_opt.dsack) {
+ tp->rx_opt.dsack = 0;
+ tp->rx_opt.eff_sacks--;
+ }
+ }
+}
+
+/* Construct a tcp options header for a SYN or SYN_ACK packet.
+ * If this is every changed make sure to change the definition of
+ * MAX_SYN_SIZE to match the new maximum number of options that you
+ * can generate.
+ */
+static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
+ int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
+{
+ /* We always get an MSS option.
+ * The option bytes which will be seen in normal data
+ * packets should timestamps be used, must be in the MSS
+ * advertised. But we subtract them from tp->mss_cache so
+ * that calculations in tcp_sendmsg are simpler etc.
+ * So account for this fact here if necessary. If we
+ * don't do this correctly, as a receiver we won't
+ * recognize data packets as being full sized when we
+ * should, and thus we won't abide by the delayed ACK
+ * rules correctly.
+ * SACKs don't matter, we never delay an ACK when we
+ * have any of those going out.
+ */
+ *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
+ if (ts) {
+ if(sack)
+ *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+ else
+ *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+ *ptr++ = htonl(tstamp); /* TSVAL */
+ *ptr++ = htonl(ts_recent); /* TSECR */
+ } else if(sack)
+ *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
+ if (offer_wscale)
+ *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
+}
+
+/* Determine a window scaling and initial window to offer. */
+extern void tcp_select_initial_window(int __space, __u32 mss,
+ __u32 *rcv_wnd, __u32 *window_clamp,
+ int wscale_ok, __u8 *rcv_wscale);
+
+static inline int tcp_win_from_space(int space)
+{
+ return sysctl_tcp_adv_win_scale<=0 ?
+ (space>>(-sysctl_tcp_adv_win_scale)) :
+ space - (space>>sysctl_tcp_adv_win_scale);
+}
+
+/* Note: caller must be prepared to deal with negative returns */
+static inline int tcp_space(const struct sock *sk)
+{
+ return tcp_win_from_space(sk->sk_rcvbuf -
+ atomic_read(&sk->sk_rmem_alloc));
+}
+
+static inline int tcp_full_space(const struct sock *sk)
+{
+ return tcp_win_from_space(sk->sk_rcvbuf);
+}
+
+static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
+ struct sock *child)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ req->sk = child;
+ sk_acceptq_added(sk);
+
+ if (!tp->accept_queue_tail) {
+ tp->accept_queue = req;
+ } else {
+ tp->accept_queue_tail->dl_next = req;
+ }
+ tp->accept_queue_tail = req;
+ req->dl_next = NULL;
+}
+
+struct tcp_listen_opt
+{
+ u8 max_qlen_log; /* log_2 of maximal queued SYNs */
+ int qlen;
+ int qlen_young;
+ int clock_hand;
+ u32 hash_rnd;
+ struct open_request *syn_table[TCP_SYNQ_HSIZE];
+};
+
+static inline void
+tcp_synq_removed(struct sock *sk, struct open_request *req)
+{
+ struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
+
+ if (--lopt->qlen == 0)
+ tcp_delete_keepalive_timer(sk);
+ if (req->retrans == 0)
+ lopt->qlen_young--;
+}
+
+static inline void tcp_synq_added(struct sock *sk)
+{
+ struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
+
+ if (lopt->qlen++ == 0)
+ tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
+ lopt->qlen_young++;
+}
+
+static inline int tcp_synq_len(struct sock *sk)
+{
+ return tcp_sk(sk)->listen_opt->qlen;
+}
+
+static inline int tcp_synq_young(struct sock *sk)
+{
+ return tcp_sk(sk)->listen_opt->qlen_young;
+}
+
+static inline int tcp_synq_is_full(struct sock *sk)
+{
+ return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
+}
+
+static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req,
+ struct open_request **prev)
+{
+ write_lock(&tp->syn_wait_lock);
+ *prev = req->dl_next;
+ write_unlock(&tp->syn_wait_lock);
+}
+
+static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
+ struct open_request **prev)
+{
+ tcp_synq_unlink(tcp_sk(sk), req, prev);
+ tcp_synq_removed(sk, req);
+ tcp_openreq_free(req);
+}
+
+static __inline__ void tcp_openreq_init(struct open_request *req,
+ struct tcp_options_received *rx_opt,
+ struct sk_buff *skb)
+{
+ req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
+ req->rcv_isn = TCP_SKB_CB(skb)->seq;
+ req->mss = rx_opt->mss_clamp;
+ req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+ req->tstamp_ok = rx_opt->tstamp_ok;
+ req->sack_ok = rx_opt->sack_ok;
+ req->snd_wscale = rx_opt->snd_wscale;
+ req->wscale_ok = rx_opt->wscale_ok;
+ req->acked = 0;
+ req->ecn_ok = 0;
+ req->rmt_port = skb->h.th->source;
+}
+
+extern void tcp_enter_memory_pressure(void);
+
+extern void tcp_listen_wlock(void);
+
+/* - We may sleep inside this lock.
+ * - If sleeping is not required (or called from BH),
+ * use plain read_(un)lock(&tcp_lhash_lock).
+ */
+
+static inline void tcp_listen_lock(void)
+{
+ /* read_lock synchronizes to candidates to writers */
+ read_lock(&tcp_lhash_lock);
+ atomic_inc(&tcp_lhash_users);
+ read_unlock(&tcp_lhash_lock);
+}
+
+static inline void tcp_listen_unlock(void)
+{
+ if (atomic_dec_and_test(&tcp_lhash_users))
+ wake_up(&tcp_lhash_wait);
+}
+
+static inline int keepalive_intvl_when(const struct tcp_sock *tp)
+{
+ return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
+}
+
+static inline int keepalive_time_when(const struct tcp_sock *tp)
+{
+ return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
+}
+
+static inline int tcp_fin_time(const struct tcp_sock *tp)
+{
+ int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
+
+ if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
+ fin_timeout = (tp->rto<<2) - (tp->rto>>1);
+
+ return fin_timeout;
+}
+
+static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
+{
+ if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
+ return 0;
+ if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
+ return 0;
+
+ /* RST segments are not recommended to carry timestamp,
+ and, if they do, it is recommended to ignore PAWS because
+ "their cleanup function should take precedence over timestamps."
+ Certainly, it is mistake. It is necessary to understand the reasons
+ of this constraint to relax it: if peer reboots, clock may go
+ out-of-sync and half-open connections will not be reset.
+ Actually, the problem would be not existing if all
+ the implementations followed draft about maintaining clock
+ via reboots. Linux-2.2 DOES NOT!
+
+ However, we can relax time bounds for RST segments to MSL.
+ */
+ if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+ return 0;
+ return 1;
+}
+
+static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
+{
+ sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
+ sk->sk_route_caps &= ~NETIF_F_TSO;
+ }
+}
+
+#define TCP_CHECK_TIMER(sk) do { } while (0)
+
+static inline int tcp_use_frto(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* F-RTO must be activated in sysctl and there must be some
+ * unsent new data, and the advertised window should allow
+ * sending it.
+ */
+ return (sysctl_tcp_frto && sk->sk_send_head &&
+ !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
+ tp->snd_una + tp->snd_wnd));
+}
+
+static inline void tcp_mib_init(void)
+{
+ /* See RFC 2012 */
+ TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
+}
+
+/* /proc */
+enum tcp_seq_states {
+ TCP_SEQ_STATE_LISTENING,
+ TCP_SEQ_STATE_OPENREQ,
+ TCP_SEQ_STATE_ESTABLISHED,
+ TCP_SEQ_STATE_TIME_WAIT,
+};
+
+struct tcp_seq_afinfo {
+ struct module *owner;
+ char *name;
+ sa_family_t family;
+ int (*seq_show) (struct seq_file *m, void *v);
+ struct file_operations *seq_fops;
+};
+
+struct tcp_iter_state {
+ sa_family_t family;
+ enum tcp_seq_states state;
+ struct sock *syn_wait_sk;
+ int bucket, sbucket, num, uid;
+ struct seq_operations seq_ops;
+};
+
+extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
+extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
+
+/* TCP Westwood functions and constants */
+
+#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
+#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
+
+static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq)
+{
+ if (tcp_is_westwood(tp))
+ tp->westwood.rtt = rtt_seq;
+}
+
+static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
+{
+ return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
+ (__u32) (tp->mss_cache_std),
+ 2U);
+}
+
+static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
+{
+ return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
+}
+
+static inline int tcp_westwood_ssthresh(struct tcp_sock *tp)
+{
+ __u32 ssthresh = 0;
+
+ if (tcp_is_westwood(tp)) {
+ ssthresh = __tcp_westwood_bw_rttmin(tp);
+ if (ssthresh)
+ tp->snd_ssthresh = ssthresh;
+ }
+
+ return (ssthresh != 0);
+}
+
+static inline int tcp_westwood_cwnd(struct tcp_sock *tp)
+{
+ __u32 cwnd = 0;
+
+ if (tcp_is_westwood(tp)) {
+ cwnd = __tcp_westwood_bw_rttmin(tp);
+ if (cwnd)
+ tp->snd_cwnd = cwnd;
+ }
+
+ return (cwnd != 0);
+}
+#endif /* _TCP_H */
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
new file mode 100644
index 000000000000..dc1456389a97
--- /dev/null
+++ b/include/net/tcp_ecn.h
@@ -0,0 +1,126 @@
+#ifndef _NET_TCP_ECN_H_
+#define _NET_TCP_ECN_H_ 1
+
+#include <net/inet_ecn.h>
+
+#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
+
+#define TCP_ECN_OK 1
+#define TCP_ECN_QUEUE_CWR 2
+#define TCP_ECN_DEMAND_CWR 4
+
+static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
+{
+ if (tp->ecn_flags&TCP_ECN_OK)
+ tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
+}
+
+
+/* Output functions */
+
+static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
+ struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+ if (!(tp->ecn_flags&TCP_ECN_OK))
+ TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+}
+
+static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
+ struct sk_buff *skb)
+{
+ tp->ecn_flags = 0;
+ if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) {
+ TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
+ tp->ecn_flags = TCP_ECN_OK;
+ sock_set_flag(sk, SOCK_NO_LARGESEND);
+ }
+}
+
+static __inline__ void
+TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th)
+{
+ if (req->ecn_ok)
+ th->ece = 1;
+}
+
+static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
+ struct sk_buff *skb, int tcp_header_len)
+{
+ if (tp->ecn_flags & TCP_ECN_OK) {
+ /* Not-retransmitted data segment: set ECT and inject CWR. */
+ if (skb->len != tcp_header_len &&
+ !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
+ INET_ECN_xmit(sk);
+ if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+ skb->h.th->cwr = 1;
+ }
+ } else {
+ /* ACK or retransmitted segment: clear ECT|CE */
+ INET_ECN_dontxmit(sk);
+ }
+ if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
+ skb->h.th->ece = 1;
+ }
+}
+
+/* Input functions */
+
+static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
+{
+ if (skb->h.th->cwr)
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+}
+
+static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
+{
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+}
+
+static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
+{
+ if (tp->ecn_flags&TCP_ECN_OK) {
+ if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ /* Funny extension: if ECT is not set on a segment,
+ * it is surely retransmit. It is not in ECN RFC,
+ * but Linux follows this rule. */
+ else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
+ tcp_enter_quickack_mode(tp);
+ }
+}
+
+static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
+{
+ if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr))
+ tp->ecn_flags &= ~TCP_ECN_OK;
+}
+
+static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
+{
+ if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr))
+ tp->ecn_flags &= ~TCP_ECN_OK;
+}
+
+static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
+{
+ if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK))
+ return 1;
+ return 0;
+}
+
+static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
+ struct open_request *req)
+{
+ tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0;
+}
+
+static __inline__ void
+TCP_ECN_create_request(struct open_request *req, struct tcphdr *th)
+{
+ if (sysctl_tcp_ecn && th->ece && th->cwr)
+ req->ecn_ok = 1;
+}
+
+#endif
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
new file mode 100644
index 000000000000..8b075ab7a26c
--- /dev/null
+++ b/include/net/transp_v6.h
@@ -0,0 +1,53 @@
+#ifndef _TRANSP_V6_H
+#define _TRANSP_V6_H
+
+#include <net/checksum.h>
+
+/*
+ * IPv6 transport protocols
+ */
+
+#ifdef __KERNEL__
+
+extern struct proto rawv6_prot;
+extern struct proto udpv6_prot;
+extern struct proto tcpv6_prot;
+
+struct flowi;
+
+/* extention headers */
+extern void ipv6_rthdr_init(void);
+extern void ipv6_frag_init(void);
+extern void ipv6_nodata_init(void);
+extern void ipv6_destopt_init(void);
+
+/* transport protocols */
+extern void rawv6_init(void);
+extern void udpv6_init(void);
+extern void tcpv6_init(void);
+
+extern int udpv6_connect(struct sock *sk,
+ struct sockaddr *uaddr,
+ int addr_len);
+
+extern int datagram_recv_ctl(struct sock *sk,
+ struct msghdr *msg,
+ struct sk_buff *skb);
+
+extern int datagram_send_ctl(struct msghdr *msg,
+ struct flowi *fl,
+ struct ipv6_txoptions *opt,
+ int *hlimit);
+
+#define LOOPBACK4_IPV6 __constant_htonl(0x7f000006)
+
+/*
+ * address family specific functions
+ */
+extern struct tcp_func ipv4_specific;
+
+extern int inet6_destroy_sock(struct sock *sk);
+
+#endif
+
+#endif
diff --git a/include/net/udp.h b/include/net/udp.h
new file mode 100644
index 000000000000..c496d10101db
--- /dev/null
+++ b/include/net/udp.h
@@ -0,0 +1,99 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP module.
+ *
+ * Version: @(#)udp.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Turned on udp checksums. I don't want to
+ * chase 'memory corruption' bugs that aren't!
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UDP_H
+#define _UDP_H
+
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <linux/seq_file.h>
+
+#define UDP_HTABLE_SIZE 128
+
+/* udp.c: This needs to be shared by v4 and v6 because the lookup
+ * and hashing code needs to work with different AF's yet
+ * the port space is shared.
+ */
+extern struct hlist_head udp_hash[UDP_HTABLE_SIZE];
+extern rwlock_t udp_hash_lock;
+
+extern int udp_port_rover;
+
+static inline int udp_lport_inuse(u16 num)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+
+ sk_for_each(sk, node, &udp_hash[num & (UDP_HTABLE_SIZE - 1)])
+ if (inet_sk(sk)->num == num)
+ return 1;
+ return 0;
+}
+
+/* Note: this must match 'valbool' in sock_setsockopt */
+#define UDP_CSUM_NOXMIT 1
+
+/* Used by SunRPC/xprt layer. */
+#define UDP_CSUM_NORCV 2
+
+/* Default, as per the RFC, is to always do csums. */
+#define UDP_CSUM_DEFAULT 0
+
+extern struct proto udp_prot;
+
+
+extern void udp_err(struct sk_buff *, u32);
+
+extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len);
+
+extern int udp_rcv(struct sk_buff *skb);
+extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+extern int udp_disconnect(struct sock *sk, int flags);
+extern unsigned int udp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+
+DECLARE_SNMP_STAT(struct udp_mib, udp_statistics);
+#define UDP_INC_STATS(field) SNMP_INC_STATS(udp_statistics, field)
+#define UDP_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_statistics, field)
+#define UDP_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_statistics, field)
+
+/* /proc */
+struct udp_seq_afinfo {
+ struct module *owner;
+ char *name;
+ sa_family_t family;
+ int (*seq_show) (struct seq_file *m, void *v);
+ struct file_operations *seq_fops;
+};
+
+struct udp_iter_state {
+ sa_family_t family;
+ int bucket;
+ struct seq_operations seq_ops;
+};
+
+extern int udp_proc_register(struct udp_seq_afinfo *afinfo);
+extern void udp_proc_unregister(struct udp_seq_afinfo *afinfo);
+#endif /* _UDP_H */
diff --git a/include/net/x25.h b/include/net/x25.h
new file mode 100644
index 000000000000..7a1ba5bbb868
--- /dev/null
+++ b/include/net/x25.h
@@ -0,0 +1,273 @@
+/*
+ * Declarations of X.25 Packet Layer type objects.
+ *
+ * History
+ * nov/17/96 Jonathan Naylor Initial version.
+ * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
+ * negotiation.
+ */
+
+#ifndef _X25_H
+#define _X25_H
+#include <linux/x25.h>
+#include <net/sock.h>
+
+#define X25_ADDR_LEN 16
+
+#define X25_MAX_L2_LEN 18 /* 802.2 LLC */
+
+#define X25_STD_MIN_LEN 3
+#define X25_EXT_MIN_LEN 4
+
+#define X25_GFI_SEQ_MASK 0x30
+#define X25_GFI_STDSEQ 0x10
+#define X25_GFI_EXTSEQ 0x20
+
+#define X25_Q_BIT 0x80
+#define X25_D_BIT 0x40
+#define X25_STD_M_BIT 0x10
+#define X25_EXT_M_BIT 0x01
+
+#define X25_CALL_REQUEST 0x0B
+#define X25_CALL_ACCEPTED 0x0F
+#define X25_CLEAR_REQUEST 0x13
+#define X25_CLEAR_CONFIRMATION 0x17
+#define X25_DATA 0x00
+#define X25_INTERRUPT 0x23
+#define X25_INTERRUPT_CONFIRMATION 0x27
+#define X25_RR 0x01
+#define X25_RNR 0x05
+#define X25_REJ 0x09
+#define X25_RESET_REQUEST 0x1B
+#define X25_RESET_CONFIRMATION 0x1F
+#define X25_REGISTRATION_REQUEST 0xF3
+#define X25_REGISTRATION_CONFIRMATION 0xF7
+#define X25_RESTART_REQUEST 0xFB
+#define X25_RESTART_CONFIRMATION 0xFF
+#define X25_DIAGNOSTIC 0xF1
+#define X25_ILLEGAL 0xFD
+
+/* Define the various conditions that may exist */
+
+#define X25_COND_ACK_PENDING 0x01
+#define X25_COND_OWN_RX_BUSY 0x02
+#define X25_COND_PEER_RX_BUSY 0x04
+
+/* Define Link State constants. */
+enum {
+ X25_STATE_0, /* Ready */
+ X25_STATE_1, /* Awaiting Call Accepted */
+ X25_STATE_2, /* Awaiting Clear Confirmation */
+ X25_STATE_3, /* Data Transfer */
+ X25_STATE_4 /* Awaiting Reset Confirmation */
+};
+
+enum {
+ X25_LINK_STATE_0,
+ X25_LINK_STATE_1,
+ X25_LINK_STATE_2,
+ X25_LINK_STATE_3
+};
+
+#define X25_DEFAULT_T20 (180 * HZ) /* Default T20 value */
+#define X25_DEFAULT_T21 (200 * HZ) /* Default T21 value */
+#define X25_DEFAULT_T22 (180 * HZ) /* Default T22 value */
+#define X25_DEFAULT_T23 (180 * HZ) /* Default T23 value */
+#define X25_DEFAULT_T2 (3 * HZ) /* Default ack holdback value */
+
+#define X25_DEFAULT_WINDOW_SIZE 2 /* Default Window Size */
+#define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */
+#define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */
+#define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */
+
+#define X25_SMODULUS 8
+#define X25_EMODULUS 128
+
+/*
+ * X.25 Facilities constants.
+ */
+
+#define X25_FAC_CLASS_MASK 0xC0
+
+#define X25_FAC_CLASS_A 0x00
+#define X25_FAC_CLASS_B 0x40
+#define X25_FAC_CLASS_C 0x80
+#define X25_FAC_CLASS_D 0xC0
+
+#define X25_FAC_REVERSE 0x01
+#define X25_FAC_THROUGHPUT 0x02
+#define X25_FAC_PACKET_SIZE 0x42
+#define X25_FAC_WINDOW_SIZE 0x43
+
+#define X25_MAX_FAC_LEN 20 /* Plenty to spare */
+#define X25_MAX_CUD_LEN 128
+
+/**
+ * struct x25_route - x25 routing entry
+ * @node - entry in x25_list_lock
+ * @address - Start of address range
+ * @sigdigits - Number of sig digits
+ * @dev - More than one for MLP
+ * @refcnt - reference counter
+ */
+struct x25_route {
+ struct list_head node;
+ struct x25_address address;
+ unsigned int sigdigits;
+ struct net_device *dev;
+ atomic_t refcnt;
+};
+
+struct x25_neigh {
+ struct list_head node;
+ struct net_device *dev;
+ unsigned int state;
+ unsigned int extended;
+ struct sk_buff_head queue;
+ unsigned long t20;
+ struct timer_list t20timer;
+ unsigned long global_facil_mask;
+ atomic_t refcnt;
+};
+
+struct x25_sock {
+ struct sock sk;
+ struct x25_address source_addr, dest_addr;
+ struct x25_neigh *neighbour;
+ unsigned int lci;
+ unsigned char state, condition, qbitincl, intflag;
+ unsigned short vs, vr, va, vl;
+ unsigned long t2, t21, t22, t23;
+ unsigned short fraglen;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head fragment_queue;
+ struct sk_buff_head interrupt_in_queue;
+ struct sk_buff_head interrupt_out_queue;
+ struct timer_list timer;
+ struct x25_causediag causediag;
+ struct x25_facilities facilities;
+ struct x25_calluserdata calluserdata;
+ unsigned long vc_facil_mask; /* inc_call facilities mask */
+};
+
+static inline struct x25_sock *x25_sk(const struct sock *sk)
+{
+ return (struct x25_sock *)sk;
+}
+
+/* af_x25.c */
+extern int sysctl_x25_restart_request_timeout;
+extern int sysctl_x25_call_request_timeout;
+extern int sysctl_x25_reset_request_timeout;
+extern int sysctl_x25_clear_request_timeout;
+extern int sysctl_x25_ack_holdback_timeout;
+
+extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
+ struct x25_address *);
+extern int x25_addr_aton(unsigned char *, struct x25_address *,
+ struct x25_address *);
+extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+extern void x25_destroy_socket(struct sock *);
+extern int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+extern void x25_kill_by_neigh(struct x25_neigh *);
+
+/* x25_dev.c */
+extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+extern int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *);
+extern void x25_establish_link(struct x25_neigh *);
+extern void x25_terminate_link(struct x25_neigh *);
+
+/* x25_facilities.c */
+extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *, unsigned long *);
+extern int x25_create_facilities(unsigned char *, struct x25_facilities *, unsigned long);
+extern int x25_negotiate_facilities(struct sk_buff *, struct sock *, struct x25_facilities *);
+extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+
+/* x25_in.c */
+extern int x25_process_rx_frame(struct sock *, struct sk_buff *);
+extern int x25_backlog_rcv(struct sock *, struct sk_buff *);
+
+/* x25_link.c */
+extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+extern void x25_link_device_up(struct net_device *);
+extern void x25_link_device_down(struct net_device *);
+extern void x25_link_established(struct x25_neigh *);
+extern void x25_link_terminated(struct x25_neigh *);
+extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char);
+extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+extern int x25_subscr_ioctl(unsigned int, void __user *);
+extern struct x25_neigh *x25_get_neigh(struct net_device *);
+extern void x25_link_free(void);
+
+/* x25_neigh.c */
+static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
+{
+ atomic_inc(&nb->refcnt);
+}
+
+static __inline__ void x25_neigh_put(struct x25_neigh *nb)
+{
+ if (atomic_dec_and_test(&nb->refcnt))
+ kfree(nb);
+}
+
+/* x25_out.c */
+extern int x25_output(struct sock *, struct sk_buff *);
+extern void x25_kick(struct sock *);
+extern void x25_enquiry_response(struct sock *);
+
+/* x25_route.c */
+extern struct x25_route *x25_get_route(struct x25_address *addr);
+extern struct net_device *x25_dev_get(char *);
+extern void x25_route_device_down(struct net_device *dev);
+extern int x25_route_ioctl(unsigned int, void __user *);
+extern void x25_route_free(void);
+
+static __inline__ void x25_route_hold(struct x25_route *rt)
+{
+ atomic_inc(&rt->refcnt);
+}
+
+static __inline__ void x25_route_put(struct x25_route *rt)
+{
+ if (atomic_dec_and_test(&rt->refcnt))
+ kfree(rt);
+}
+
+/* x25_subr.c */
+extern void x25_clear_queues(struct sock *);
+extern void x25_frames_acked(struct sock *, unsigned short);
+extern void x25_requeue_frames(struct sock *);
+extern int x25_validate_nr(struct sock *, unsigned short);
+extern void x25_write_internal(struct sock *, int);
+extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
+extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+extern int x25_check_calluserdata(struct x25_calluserdata *,struct x25_calluserdata *);
+
+/* x25_timer.c */
+extern void x25_start_heartbeat(struct sock *);
+extern void x25_start_t2timer(struct sock *);
+extern void x25_start_t21timer(struct sock *);
+extern void x25_start_t22timer(struct sock *);
+extern void x25_start_t23timer(struct sock *);
+extern void x25_stop_heartbeat(struct sock *);
+extern void x25_stop_timer(struct sock *);
+extern unsigned long x25_display_timer(struct sock *);
+extern void x25_check_rbuf(struct sock *);
+
+/* sysctl_net_x25.c */
+extern void x25_register_sysctl(void);
+extern void x25_unregister_sysctl(void);
+struct x25_skb_cb {
+ unsigned flags;
+};
+#define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
+
+extern struct hlist_head x25_list;
+extern rwlock_t x25_list_lock;
+extern struct list_head x25_route_list;
+extern rwlock_t x25_route_list_lock;
+
+extern int x25_proc_init(void);
+extern void x25_proc_exit(void);
+#endif
diff --git a/include/net/x25device.h b/include/net/x25device.h
new file mode 100644
index 000000000000..cf36a20ea3c5
--- /dev/null
+++ b/include/net/x25device.h
@@ -0,0 +1,17 @@
+#ifndef _X25DEVICE_H
+#define _X25DEVICE_H
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/skbuff.h>
+
+static inline unsigned short x25_type_trans(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ skb->mac.raw = skb->data;
+ skb->input_dev = skb->dev = dev;
+ skb->pkt_type = PACKET_HOST;
+
+ return htons(ETH_P_X25);
+}
+#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
new file mode 100644
index 000000000000..73e9a8ca3d3b
--- /dev/null
+++ b/include/net/xfrm.h
@@ -0,0 +1,905 @@
+#ifndef _NET_XFRM_H
+#define _NET_XFRM_H
+
+#include <linux/xfrm.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/crypto.h>
+#include <linux/pfkeyv2.h>
+#include <linux/in6.h>
+
+#include <net/sock.h>
+#include <net/dst.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+
+#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
+
+extern struct semaphore xfrm_cfg_sem;
+
+/* Organization of SPD aka "XFRM rules"
+ ------------------------------------
+
+ Basic objects:
+ - policy rule, struct xfrm_policy (=SPD entry)
+ - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
+ - instance of a transformer, struct xfrm_state (=SA)
+ - template to clone xfrm_state, struct xfrm_tmpl
+
+ SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ (To be compatible with existing pfkeyv2 implementations,
+ many rules with priority of 0x7fffffff are allowed to exist and
+ such rules are ordered in an unpredictable way, thanks to bsd folks.)
+
+ Lookup is plain linear search until the first match with selector.
+
+ If "action" is "block", then we prohibit the flow, otherwise:
+ if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
+ policy entry has list of up to XFRM_MAX_DEPTH transformations,
+ described by templates xfrm_tmpl. Each template is resolved
+ to a complete xfrm_state (see below) and we pack bundle of transformations
+ to a dst_entry returned to requestor.
+
+ dst -. xfrm .-> xfrm_state #1
+ |---. child .-> dst -. xfrm .-> xfrm_state #2
+ |---. child .-> dst -. xfrm .-> xfrm_state #3
+ |---. child .-> NULL
+
+ Bundles are cached at xrfm_policy struct (field ->bundles).
+
+
+ Resolution of xrfm_tmpl
+ -----------------------
+ Template contains:
+ 1. ->mode Mode: transport or tunnel
+ 2. ->id.proto Protocol: AH/ESP/IPCOMP
+ 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
+ Q: allow to resolve security gateway?
+ 4. ->id.spi If not zero, static SPI.
+ 5. ->saddr Local tunnel endpoint, ignored for transport mode.
+ 6. ->algos List of allowed algos. Plain bitmask now.
+ Q: ealgos, aalgos, calgos. What a mess...
+ 7. ->share Sharing mode.
+ Q: how to implement private sharing mode? To add struct sock* to
+ flow id?
+
+ Having this template we search through SAD searching for entries
+ with appropriate mode/proto/algo, permitted by selector.
+ If no appropriate entry found, it is requested from key manager.
+
+ PROBLEMS:
+ Q: How to find all the bundles referring to a physical path for
+ PMTU discovery? Seems, dst should contain list of all parents...
+ and enter to infinite locking hierarchy disaster.
+ No! It is easier, we will not search for them, let them find us.
+ We add genid to each dst plus pointer to genid of raw IP route,
+ pmtu disc will update pmtu on raw IP route and increase its genid.
+ dst_check() will see this for top level and trigger resyncing
+ metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
+ */
+
+/* Full description of state of transformer. */
+struct xfrm_state
+{
+ /* Note: bydst is re-used during gc */
+ struct list_head bydst;
+ struct list_head byspi;
+
+ atomic_t refcnt;
+ spinlock_t lock;
+
+ struct xfrm_id id;
+ struct xfrm_selector sel;
+
+ /* Key manger bits */
+ struct {
+ u8 state;
+ u8 dying;
+ u32 seq;
+ } km;
+
+ /* Parameters of this state. */
+ struct {
+ u32 reqid;
+ u8 mode;
+ u8 replay_window;
+ u8 aalgo, ealgo, calgo;
+ u8 flags;
+ u16 family;
+ xfrm_address_t saddr;
+ int header_len;
+ int trailer_len;
+ } props;
+
+ struct xfrm_lifetime_cfg lft;
+
+ /* Data for transformer */
+ struct xfrm_algo *aalg;
+ struct xfrm_algo *ealg;
+ struct xfrm_algo *calg;
+
+ /* Data for encapsulator */
+ struct xfrm_encap_tmpl *encap;
+
+ /* IPComp needs an IPIP tunnel for handling uncompressed packets */
+ struct xfrm_state *tunnel;
+
+ /* If a tunnel, number of users + 1 */
+ atomic_t tunnel_users;
+
+ /* State for replay detection */
+ struct xfrm_replay_state replay;
+
+ /* Statistics */
+ struct xfrm_stats stats;
+
+ struct xfrm_lifetime_cur curlft;
+ struct timer_list timer;
+
+ /* Reference to data common to all the instances of this
+ * transformer. */
+ struct xfrm_type *type;
+
+ /* Private data of this transformer, format is opaque,
+ * interpreted by xfrm_type methods. */
+ void *data;
+};
+
+enum {
+ XFRM_STATE_VOID,
+ XFRM_STATE_ACQ,
+ XFRM_STATE_VALID,
+ XFRM_STATE_ERROR,
+ XFRM_STATE_EXPIRED,
+ XFRM_STATE_DEAD
+};
+
+struct xfrm_type;
+struct xfrm_dst;
+struct xfrm_policy_afinfo {
+ unsigned short family;
+ rwlock_t lock;
+ struct xfrm_type_map *type_map;
+ struct dst_ops *dst_ops;
+ void (*garbage_collect)(void);
+ int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl);
+ struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
+ int (*bundle_create)(struct xfrm_policy *policy,
+ struct xfrm_state **xfrm,
+ int nx,
+ struct flowi *fl,
+ struct dst_entry **dst_p);
+ void (*decode_session)(struct sk_buff *skb,
+ struct flowi *fl);
+};
+
+extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+
+#define XFRM_ACQ_EXPIRES 30
+
+struct xfrm_tmpl;
+struct xfrm_state_afinfo {
+ unsigned short family;
+ rwlock_t lock;
+ struct list_head *state_bydst;
+ struct list_head *state_byspi;
+ void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr);
+ struct xfrm_state *(*state_lookup)(xfrm_address_t *daddr, u32 spi, u8 proto);
+ struct xfrm_state *(*find_acq)(u8 mode, u32 reqid, u8 proto,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ int create);
+};
+
+extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+
+extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
+
+struct xfrm_decap_state;
+struct xfrm_type
+{
+ char *description;
+ struct module *owner;
+ __u8 proto;
+
+ int (*init_state)(struct xfrm_state *x, void *args);
+ void (*destructor)(struct xfrm_state *);
+ int (*input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb);
+ int (*post_input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb);
+ int (*output)(struct xfrm_state *, struct sk_buff *pskb);
+ /* Estimate maximal size of result of transformation of a dgram */
+ u32 (*get_max_size)(struct xfrm_state *, int size);
+};
+
+struct xfrm_type_map {
+ rwlock_t lock;
+ struct xfrm_type *map[256];
+};
+
+extern int xfrm_register_type(struct xfrm_type *type, unsigned short family);
+extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family);
+extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family);
+extern void xfrm_put_type(struct xfrm_type *type);
+
+struct xfrm_tmpl
+{
+/* id in template is interpreted as:
+ * daddr - destination of tunnel, may be zero for transport mode.
+ * spi - zero to acquire spi. Not zero if spi is static, then
+ * daddr must be fixed too.
+ * proto - AH/ESP/IPCOMP
+ */
+ struct xfrm_id id;
+
+/* Source address of tunnel. Ignored, if it is not a tunnel. */
+ xfrm_address_t saddr;
+
+ __u32 reqid;
+
+/* Mode: transport/tunnel */
+ __u8 mode;
+
+/* Sharing mode: unique, this session only, this user only etc. */
+ __u8 share;
+
+/* May skip this transfomration if no SA is found */
+ __u8 optional;
+
+/* Bit mask of algos allowed for acquisition */
+ __u32 aalgos;
+ __u32 ealgos;
+ __u32 calgos;
+};
+
+#define XFRM_MAX_DEPTH 4
+
+struct xfrm_policy
+{
+ struct xfrm_policy *next;
+ struct list_head list;
+
+ /* This lock only affects elements except for entry. */
+ rwlock_t lock;
+ atomic_t refcnt;
+ struct timer_list timer;
+
+ u32 priority;
+ u32 index;
+ struct xfrm_selector selector;
+ struct xfrm_lifetime_cfg lft;
+ struct xfrm_lifetime_cur curlft;
+ struct dst_entry *bundles;
+ __u16 family;
+ __u8 action;
+ __u8 flags;
+ __u8 dead;
+ __u8 xfrm_nr;
+ struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
+};
+
+#define XFRM_KM_TIMEOUT 30
+
+struct xfrm_mgr
+{
+ struct list_head list;
+ char *id;
+ int (*notify)(struct xfrm_state *x, int event);
+ int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
+ struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir);
+ int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
+ int (*notify_policy)(struct xfrm_policy *x, int dir, int event);
+};
+
+extern int xfrm_register_km(struct xfrm_mgr *km);
+extern int xfrm_unregister_km(struct xfrm_mgr *km);
+
+
+extern struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
+
+static inline void xfrm_pol_hold(struct xfrm_policy *policy)
+{
+ if (likely(policy != NULL))
+ atomic_inc(&policy->refcnt);
+}
+
+extern void __xfrm_policy_destroy(struct xfrm_policy *policy);
+
+static inline void xfrm_pol_put(struct xfrm_policy *policy)
+{
+ if (atomic_dec_and_test(&policy->refcnt))
+ __xfrm_policy_destroy(policy);
+}
+
+#define XFRM_DST_HSIZE 1024
+
+static __inline__
+unsigned __xfrm4_dst_hash(xfrm_address_t *addr)
+{
+ unsigned h;
+ h = ntohl(addr->a4);
+ h = (h ^ (h>>16)) % XFRM_DST_HSIZE;
+ return h;
+}
+
+static __inline__
+unsigned __xfrm6_dst_hash(xfrm_address_t *addr)
+{
+ unsigned h;
+ h = ntohl(addr->a6[2]^addr->a6[3]);
+ h = (h ^ (h>>16)) % XFRM_DST_HSIZE;
+ return h;
+}
+
+static __inline__
+unsigned xfrm_dst_hash(xfrm_address_t *addr, unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_dst_hash(addr);
+ case AF_INET6:
+ return __xfrm6_dst_hash(addr);
+ }
+ return 0;
+}
+
+static __inline__
+unsigned __xfrm4_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto)
+{
+ unsigned h;
+ h = ntohl(addr->a4^spi^proto);
+ h = (h ^ (h>>10) ^ (h>>20)) % XFRM_DST_HSIZE;
+ return h;
+}
+
+static __inline__
+unsigned __xfrm6_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto)
+{
+ unsigned h;
+ h = ntohl(addr->a6[2]^addr->a6[3]^spi^proto);
+ h = (h ^ (h>>10) ^ (h>>20)) % XFRM_DST_HSIZE;
+ return h;
+}
+
+static __inline__
+unsigned xfrm_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto, unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_spi_hash(addr, spi, proto);
+ case AF_INET6:
+ return __xfrm6_spi_hash(addr, spi, proto);
+ }
+ return 0; /*XXX*/
+}
+
+extern void __xfrm_state_destroy(struct xfrm_state *);
+
+static inline void xfrm_state_put(struct xfrm_state *x)
+{
+ if (atomic_dec_and_test(&x->refcnt))
+ __xfrm_state_destroy(x);
+}
+
+static inline void xfrm_state_hold(struct xfrm_state *x)
+{
+ atomic_inc(&x->refcnt);
+}
+
+static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
+{
+ __u32 *a1 = token1;
+ __u32 *a2 = token2;
+ int pdw;
+ int pbi;
+
+ pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
+ pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
+
+ if (pdw)
+ if (memcmp(a1, a2, pdw << 2))
+ return 0;
+
+ if (pbi) {
+ __u32 mask;
+
+ mask = htonl((0xffffffff) << (32 - pbi));
+
+ if ((a1[pdw] ^ a2[pdw]) & mask)
+ return 0;
+ }
+
+ return 1;
+}
+
+static __inline__
+u16 xfrm_flowi_sport(struct flowi *fl)
+{
+ u16 port;
+ switch(fl->proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ port = fl->fl_ip_sport;
+ break;
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ port = htons(fl->fl_icmp_type);
+ break;
+ default:
+ port = 0; /*XXX*/
+ }
+ return port;
+}
+
+static __inline__
+u16 xfrm_flowi_dport(struct flowi *fl)
+{
+ u16 port;
+ switch(fl->proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ port = fl->fl_ip_dport;
+ break;
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ port = htons(fl->fl_icmp_code);
+ break;
+ default:
+ port = 0; /*XXX*/
+ }
+ return port;
+}
+
+static inline int
+__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+{
+ return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
+ addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
+ !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+ !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+ (fl->proto == sel->proto || !sel->proto) &&
+ (fl->oif == sel->ifindex || !sel->ifindex);
+}
+
+static inline int
+__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+{
+ return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
+ addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
+ !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+ !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+ (fl->proto == sel->proto || !sel->proto) &&
+ (fl->oif == sel->ifindex || !sel->ifindex);
+}
+
+static inline int
+xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+ unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_selector_match(sel, fl);
+ case AF_INET6:
+ return __xfrm6_selector_match(sel, fl);
+ }
+ return 0;
+}
+
+/* A struct encoding bundle of transformations to apply to some set of flow.
+ *
+ * dst->child points to the next element of bundle.
+ * dst->xfrm points to an instanse of transformer.
+ *
+ * Due to unfortunate limitations of current routing cache, which we
+ * have no time to fix, it mirrors struct rtable and bound to the same
+ * routing key, including saddr,daddr. However, we can have many of
+ * bundles differing by session id. All the bundles grow from a parent
+ * policy rule.
+ */
+struct xfrm_dst
+{
+ union {
+ struct xfrm_dst *next;
+ struct dst_entry dst;
+ struct rtable rt;
+ struct rt6_info rt6;
+ } u;
+ struct dst_entry *route;
+ u32 route_mtu_cached;
+ u32 child_mtu_cached;
+};
+
+/* Decapsulation state, used by the input to store data during
+ * decapsulation procedure, to be used later (during the policy
+ * check
+ */
+struct xfrm_decap_state {
+ char decap_data[20];
+ __u16 decap_type;
+};
+
+struct sec_decap_state {
+ struct xfrm_state *xvec;
+ struct xfrm_decap_state decap;
+};
+
+struct sec_path
+{
+ atomic_t refcnt;
+ int len;
+ struct sec_decap_state x[XFRM_MAX_DEPTH];
+};
+
+static inline struct sec_path *
+secpath_get(struct sec_path *sp)
+{
+ if (sp)
+ atomic_inc(&sp->refcnt);
+ return sp;
+}
+
+extern void __secpath_destroy(struct sec_path *sp);
+
+static inline void
+secpath_put(struct sec_path *sp)
+{
+ if (sp && atomic_dec_and_test(&sp->refcnt))
+ __secpath_destroy(sp);
+}
+
+extern struct sec_path *secpath_dup(struct sec_path *src);
+
+static inline void
+secpath_reset(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ secpath_put(skb->sp);
+ skb->sp = NULL;
+#endif
+}
+
+static inline int
+__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+{
+ return (tmpl->saddr.a4 &&
+ tmpl->saddr.a4 != x->props.saddr.a4);
+}
+
+static inline int
+__xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+{
+ return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
+ ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
+}
+
+static inline int
+xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_state_addr_cmp(tmpl, x);
+ case AF_INET6:
+ return __xfrm6_state_addr_cmp(tmpl, x);
+ }
+ return !0;
+}
+
+#ifdef CONFIG_XFRM
+
+extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
+
+static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
+{
+ if (sk && sk->sk_policy[XFRM_POLICY_IN])
+ return __xfrm_policy_check(sk, dir, skb, family);
+
+ return (!xfrm_policy_list[dir] && !skb->sp) ||
+ (skb->dst->flags & DST_NOPOLICY) ||
+ __xfrm_policy_check(sk, dir, skb, family);
+}
+
+static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return xfrm_policy_check(sk, dir, skb, AF_INET);
+}
+
+static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return xfrm_policy_check(sk, dir, skb, AF_INET6);
+}
+
+
+extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+
+static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
+{
+ return !xfrm_policy_list[XFRM_POLICY_OUT] ||
+ (skb->dst->flags & DST_NOXFRM) ||
+ __xfrm_route_forward(skb, family);
+}
+
+static inline int xfrm4_route_forward(struct sk_buff *skb)
+{
+ return xfrm_route_forward(skb, AF_INET);
+}
+
+static inline int xfrm6_route_forward(struct sk_buff *skb)
+{
+ return xfrm_route_forward(skb, AF_INET6);
+}
+
+extern int __xfrm_sk_clone_policy(struct sock *sk);
+
+static inline int xfrm_sk_clone_policy(struct sock *sk)
+{
+ if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
+ return __xfrm_sk_clone_policy(sk);
+ return 0;
+}
+
+extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+
+static inline void xfrm_sk_free_policy(struct sock *sk)
+{
+ if (unlikely(sk->sk_policy[0] != NULL)) {
+ xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
+ sk->sk_policy[0] = NULL;
+ }
+ if (unlikely(sk->sk_policy[1] != NULL)) {
+ xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
+ sk->sk_policy[1] = NULL;
+ }
+}
+
+#else
+
+static inline void xfrm_sk_free_policy(struct sock *sk) {}
+static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
+static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
+static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
+static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return 1;
+}
+static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+{
+ return 1;
+}
+static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
+{
+ return 1;
+}
+#endif
+
+static __inline__
+xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
+{
+ switch (family){
+ case AF_INET:
+ return (xfrm_address_t *)&fl->fl4_dst;
+ case AF_INET6:
+ return (xfrm_address_t *)&fl->fl6_dst;
+ }
+ return NULL;
+}
+
+static __inline__
+xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
+{
+ switch (family){
+ case AF_INET:
+ return (xfrm_address_t *)&fl->fl4_src;
+ case AF_INET6:
+ return (xfrm_address_t *)&fl->fl6_src;
+ }
+ return NULL;
+}
+
+static __inline__ int
+__xfrm4_state_addr_check(struct xfrm_state *x,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
+ if (daddr->a4 == x->id.daddr.a4 &&
+ (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
+ return 1;
+ return 0;
+}
+
+static __inline__ int
+__xfrm6_state_addr_check(struct xfrm_state *x,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
+ if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
+ (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
+ ipv6_addr_any((struct in6_addr *)saddr) ||
+ ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
+ return 1;
+ return 0;
+}
+
+static __inline__ int
+xfrm_state_addr_check(struct xfrm_state *x,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_state_addr_check(x, daddr, saddr);
+ case AF_INET6:
+ return __xfrm6_state_addr_check(x, daddr, saddr);
+ }
+ return 0;
+}
+
+static inline int xfrm_state_kern(struct xfrm_state *x)
+{
+ return atomic_read(&x->tunnel_users);
+}
+
+/*
+ * xfrm algorithm information
+ */
+struct xfrm_algo_auth_info {
+ u16 icv_truncbits;
+ u16 icv_fullbits;
+};
+
+struct xfrm_algo_encr_info {
+ u16 blockbits;
+ u16 defkeybits;
+};
+
+struct xfrm_algo_comp_info {
+ u16 threshold;
+};
+
+struct xfrm_algo_desc {
+ char *name;
+ u8 available:1;
+ union {
+ struct xfrm_algo_auth_info auth;
+ struct xfrm_algo_encr_info encr;
+ struct xfrm_algo_comp_info comp;
+ } uinfo;
+ struct sadb_alg desc;
+};
+
+/* XFRM tunnel handlers. */
+struct xfrm_tunnel {
+ int (*handler)(struct sk_buff *skb);
+ void (*err_handler)(struct sk_buff *skb, void *info);
+};
+
+struct xfrm6_tunnel {
+ int (*handler)(struct sk_buff **pskb, unsigned int *nhoffp);
+ void (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ int type, int code, int offset, __u32 info);
+};
+
+extern void xfrm_init(void);
+extern void xfrm4_init(void);
+extern void xfrm6_init(void);
+extern void xfrm6_fini(void);
+extern void xfrm_state_init(void);
+extern void xfrm4_state_init(void);
+extern void xfrm4_state_fini(void);
+extern void xfrm6_state_init(void);
+extern void xfrm6_state_fini(void);
+
+extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
+extern struct xfrm_state *xfrm_state_alloc(void);
+extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
+ struct flowi *fl, struct xfrm_tmpl *tmpl,
+ struct xfrm_policy *pol, int *err,
+ unsigned short family);
+extern int xfrm_state_check_expire(struct xfrm_state *x);
+extern void xfrm_state_insert(struct xfrm_state *x);
+extern int xfrm_state_add(struct xfrm_state *x);
+extern int xfrm_state_update(struct xfrm_state *x);
+extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
+extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
+extern void xfrm_state_delete(struct xfrm_state *x);
+extern void xfrm_state_flush(u8 proto);
+extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
+extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
+extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb);
+extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
+extern int xfrm4_rcv(struct sk_buff *skb);
+extern int xfrm4_output(struct sk_buff *skb);
+extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
+extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
+extern int xfrm6_rcv_spi(struct sk_buff **pskb, unsigned int *nhoffp, u32 spi);
+extern int xfrm6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
+extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
+extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
+extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
+extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
+extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
+extern int xfrm6_output(struct sk_buff *skb);
+
+#ifdef CONFIG_XFRM
+extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
+extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
+extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family);
+#else
+static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
+{
+ /* should not happen */
+ kfree_skb(skb);
+ return 0;
+}
+static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family)
+{
+ return -EINVAL;
+}
+#endif
+
+struct xfrm_policy *xfrm_policy_alloc(int gfp);
+extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
+int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
+struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
+ int delete);
+struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete);
+void xfrm_policy_flush(void);
+u32 xfrm_get_acqseq(void);
+void xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ int create, unsigned short family);
+extern void xfrm_policy_flush(void);
+extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+extern int xfrm_flush_bundles(void);
+extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family);
+extern void xfrm_init_pmtu(struct dst_entry *dst);
+
+extern wait_queue_head_t km_waitq;
+extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
+extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard);
+
+extern void xfrm_input_init(void);
+extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq);
+
+extern void xfrm_probe_algs(void);
+extern int xfrm_count_auth_supported(void);
+extern int xfrm_count_enc_supported(void);
+extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
+
+struct crypto_tfm;
+typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int);
+
+extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
+ int offset, int len, icv_update_fn_t icv_update);
+
+static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
+ int family)
+{
+ switch (family) {
+ default:
+ case AF_INET:
+ return a->a4 - b->a4;
+ case AF_INET6:
+ return ipv6_addr_cmp((struct in6_addr *)a,
+ (struct in6_addr *)b);
+ }
+}
+
+#endif /* _NET_XFRM_H */