summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 14:45:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 14:45:08 -0700
commitaae3dbb4776e7916b6cd442d00159bea27a695c1 (patch)
treed074c5d783a81e7e2e084b1eba77f57459da7e37 /drivers/staging
parentec3604c7a5aae8953545b0d05495357009a960e5 (diff)
parent66bed8465a808400eb14562510e26c8818082cb8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Support ipv6 checksum offload in sunvnet driver, from Shannon Nelson. 2) Move to RB-tree instead of custom AVL code in inetpeer, from Eric Dumazet. 3) Allow generic XDP to work on virtual devices, from John Fastabend. 4) Add bpf device maps and XDP_REDIRECT, which can be used to build arbitrary switching frameworks using XDP. From John Fastabend. 5) Remove UFO offloads from the tree, gave us little other than bugs. 6) Remove the IPSEC flow cache, from Florian Westphal. 7) Support ipv6 route offload in mlxsw driver. 8) Support VF representors in bnxt_en, from Sathya Perla. 9) Add support for forward error correction modes to ethtool, from Vidya Sagar Ravipati. 10) Add time filter for packet scheduler action dumping, from Jamal Hadi Salim. 11) Extend the zerocopy sendmsg() used by virtio and tap to regular sockets via MSG_ZEROCOPY. From Willem de Bruijn. 12) Significantly rework value tracking in the BPF verifier, from Edward Cree. 13) Add new jump instructions to eBPF, from Daniel Borkmann. 14) Rework rtnetlink plumbing so that operations can be run without taking the RTNL semaphore. From Florian Westphal. 15) Support XDP in tap driver, from Jason Wang. 16) Add 32-bit eBPF JIT for ARM, from Shubham Bansal. 17) Add Huawei hinic ethernet driver. 18) Allow to report MD5 keys in TCP inet_diag dumps, from Ivan Delalande. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1780 commits) i40e: point wb_desc at the nvm_wb_desc during i40e_read_nvm_aq i40e: avoid NVM acquire deadlock during NVM update drivers: net: xgene: Remove return statement from void function drivers: net: xgene: Configure tx/rx delay for ACPI drivers: net: xgene: Read tx/rx delay for ACPI rocker: fix kcalloc parameter order rds: Fix non-atomic operation on shared flag variable net: sched: don't use GFP_KERNEL under spin lock vhost_net: correctly check tx avail during rx busy polling net: mdio-mux: add mdio_mux parameter to mdio_mux_init() rxrpc: Make service connection lookup always check for retry net: stmmac: Delete dead code for MDIO registration gianfar: Fix Tx flow control deactivation cxgb4: Ignore MPS_TX_INT_CAUSE[Bubble] for T6 cxgb4: Fix pause frame count in t4_get_port_stats cxgb4: fix memory leak tun: rename generic_xdp to skb_xdp tun: reserve extra headroom only when XDP is set net: dsa: bcm_sf2: Configure IMP port TC2QOS mapping net: dsa: bcm_sf2: Advertise number of egress queues ...
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/irda/TODO4
-rw-r--r--drivers/staging/irda/drivers/Kconfig398
-rw-r--r--drivers/staging/irda/drivers/Makefile44
-rw-r--r--drivers/staging/irda/drivers/act200l-sir.c250
-rw-r--r--drivers/staging/irda/drivers/actisys-sir.c245
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c2218
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.h227
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c989
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c817
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.h93
-rw-r--r--drivers/staging/irda/drivers/donauboe.c1732
-rw-r--r--drivers/staging/irda/drivers/donauboe.h362
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c157
-rw-r--r--drivers/staging/irda/drivers/girbil-sir.c252
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c1914
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h174
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.c570
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.h34
-rw-r--r--drivers/staging/irda/drivers/kingsun-sir.c634
-rw-r--r--drivers/staging/irda/drivers/ks959-sir.c912
-rw-r--r--drivers/staging/irda/drivers/ksdazzle-sir.c813
-rw-r--r--drivers/staging/irda/drivers/litelink-sir.c199
-rw-r--r--drivers/staging/irda/drivers/ma600-sir.c253
-rw-r--r--drivers/staging/irda/drivers/mcp2120-sir.c224
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c987
-rw-r--r--drivers/staging/irda/drivers/mcs7780.h165
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.c2410
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.h281
-rw-r--r--drivers/staging/irda/drivers/old_belkin-sir.c146
-rw-r--r--drivers/staging/irda/drivers/pxaficp_ir.c1076
-rw-r--r--drivers/staging/irda/drivers/sa1100_ir.c1150
-rw-r--r--drivers/staging/irda/drivers/sh_sir.c810
-rw-r--r--drivers/staging/irda/drivers/sir-dev.h191
-rw-r--r--drivers/staging/irda/drivers/sir_dev.c987
-rw-r--r--drivers/staging/irda/drivers/sir_dongle.c133
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.c3026
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.h191
-rw-r--r--drivers/staging/irda/drivers/smsc-sio.h100
-rw-r--r--drivers/staging/irda/drivers/stir4200.c1134
-rw-r--r--drivers/staging/irda/drivers/tekram-sir.c225
-rw-r--r--drivers/staging/irda/drivers/toim3232-sir.c358
-rw-r--r--drivers/staging/irda/drivers/via-ircc.c1593
-rw-r--r--drivers/staging/irda/drivers/via-ircc.h846
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c1872
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.h757
-rw-r--r--drivers/staging/irda/drivers/w83977af.h53
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.c1285
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.h198
-rw-r--r--drivers/staging/irda/include/net/irda/af_irda.h87
-rw-r--r--drivers/staging/irda/include/net/irda/crc.h29
-rw-r--r--drivers/staging/irda/include/net/irda/discovery.h95
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_core.h106
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_event.h83
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_lmp.h36
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_param.h147
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_ttp.h37
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty.h121
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty_attach.h92
-rw-r--r--drivers/staging/irda/include/net/irda/irda.h115
-rw-r--r--drivers/staging/irda/include/net/irda/irda_device.h285
-rw-r--r--drivers/staging/irda/include/net/irda/iriap.h108
-rw-r--r--drivers/staging/irda/include/net/irda/iriap_event.h85
-rw-r--r--drivers/staging/irda/include/net/irda/irias_object.h108
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_client.h42
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_common.h230
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_eth.h32
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_event.h81
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_filter.h35
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_provider.h52
-rw-r--r--drivers/staging/irda/include/net/irda/irlap.h311
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_event.h129
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_frame.h167
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp.h295
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_event.h98
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_frame.h62
-rw-r--r--drivers/staging/irda/include/net/irda/irmod.h109
-rw-r--r--drivers/staging/irda/include/net/irda/irqueue.h96
-rw-r--r--drivers/staging/irda/include/net/irda/irttp.h210
-rw-r--r--drivers/staging/irda/include/net/irda/parameters.h100
-rw-r--r--drivers/staging/irda/include/net/irda/qos.h101
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h105
-rw-r--r--drivers/staging/irda/include/net/irda/wrapper.h58
-rw-r--r--drivers/staging/irda/net/Kconfig96
-rw-r--r--drivers/staging/irda/net/Makefile17
-rw-r--r--drivers/staging/irda/net/af_irda.c2695
-rw-r--r--drivers/staging/irda/net/discovery.c417
-rw-r--r--drivers/staging/irda/net/ircomm/Kconfig12
-rw-r--r--drivers/staging/irda/net/ircomm/Makefile8
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_core.c563
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_event.c246
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_lmp.c350
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_param.c501
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_ttp.c350
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty.c1329
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_attach.c987
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c291
-rw-r--r--drivers/staging/irda/net/irda_device.c316
-rw-r--r--drivers/staging/irda/net/iriap.c1085
-rw-r--r--drivers/staging/irda/net/iriap_event.c496
-rw-r--r--drivers/staging/irda/net/irias_object.c555
-rw-r--r--drivers/staging/irda/net/irlan/Kconfig14
-rw-r--r--drivers/staging/irda/net/irlan/Makefile7
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client.c559
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client_event.c511
-rw-r--r--drivers/staging/irda/net/irlan/irlan_common.c1176
-rw-r--r--drivers/staging/irda/net/irlan/irlan_eth.c340
-rw-r--r--drivers/staging/irda/net/irlan/irlan_event.c60
-rw-r--r--drivers/staging/irda/net/irlan/irlan_filter.c240
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider.c408
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider_event.c233
-rw-r--r--drivers/staging/irda/net/irlap.c1207
-rw-r--r--drivers/staging/irda/net/irlap_event.c2316
-rw-r--r--drivers/staging/irda/net/irlap_frame.c1407
-rw-r--r--drivers/staging/irda/net/irlmp.c1996
-rw-r--r--drivers/staging/irda/net/irlmp_event.c886
-rw-r--r--drivers/staging/irda/net/irlmp_frame.c476
-rw-r--r--drivers/staging/irda/net/irmod.c199
-rw-r--r--drivers/staging/irda/net/irnet/Kconfig13
-rw-r--r--drivers/staging/irda/net/irnet/Makefile7
-rw-r--r--drivers/staging/irda/net/irnet/irnet.h522
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.c1885
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.h178
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c1189
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h116
-rw-r--r--drivers/staging/irda/net/irnetlink.c162
-rw-r--r--drivers/staging/irda/net/irproc.c96
-rw-r--r--drivers/staging/irda/net/irqueue.c911
-rw-r--r--drivers/staging/irda/net/irsysctl.c258
-rw-r--r--drivers/staging/irda/net/irttp.c1891
-rw-r--r--drivers/staging/irda/net/parameters.c584
-rw-r--r--drivers/staging/irda/net/qos.c771
-rw-r--r--drivers/staging/irda/net/timer.c231
-rw-r--r--drivers/staging/irda/net/wrapper.c492
135 files changed, 68995 insertions, 0 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 1b0a1bed8e11..554683912cff 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,6 +24,8 @@ menuconfig STAGING
if STAGING
+source "drivers/staging/irda/net/Kconfig"
+
source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/comedi/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2b61cbd44d13..8951c37d8d80 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -2,6 +2,8 @@
obj-y += media/
obj-y += typec/
+obj-$(CONFIG_IRDA) += irda/net/
+obj-$(CONFIG_IRDA) += irda/drivers/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
diff --git a/drivers/staging/irda/TODO b/drivers/staging/irda/TODO
new file mode 100644
index 000000000000..7d98a5cffaff
--- /dev/null
+++ b/drivers/staging/irda/TODO
@@ -0,0 +1,4 @@
+The irda code will be removed soon from the kernel tree as it is old and
+obsolete and broken.
+
+Don't worry about fixing up anything here, it's not needed.
diff --git a/drivers/staging/irda/drivers/Kconfig b/drivers/staging/irda/drivers/Kconfig
new file mode 100644
index 000000000000..e070e1222733
--- /dev/null
+++ b/drivers/staging/irda/drivers/Kconfig
@@ -0,0 +1,398 @@
+menu "Infrared-port device drivers"
+ depends on IRDA!=n
+
+comment "SIR device drivers"
+
+config IRTTY_SIR
+ tristate "IrTTY (uses Linux serial driver)"
+ depends on IRDA && TTY
+ help
+ Say Y here if you want to build support for the IrTTY line
+ discipline. To compile it as a module, choose M here: the module
+ will be called irtty-sir. IrTTY makes it possible to use Linux's
+ own serial driver for all IrDA ports that are 16550 compatible.
+ Most IrDA chips are 16550 compatible so you should probably say Y
+ to this option. Using IrTTY will however limit the speed of the
+ connection to 115200 bps (IrDA SIR mode).
+
+ If unsure, say Y.
+
+config BFIN_SIR
+ tristate "Blackfin SIR on UART"
+ depends on BLACKFIN && IRDA
+ default n
+ help
+ Say Y here if your want to enable SIR function on Blackfin UART
+ devices.
+
+ To activate this driver you can start irattach like:
+ "irattach irda0 -s"
+
+ Saying M, it will be built as a module named bfin_sir.
+
+ Note that you need to turn off one of the serial drivers for SIR
+ to use that UART.
+
+config BFIN_SIR0
+ bool "Blackfin SIR on UART0"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART0
+
+config BFIN_SIR1
+ bool "Blackfin SIR on UART1"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART1 && (!BF531 && !BF532 && !BF533 && !BF561)
+
+config BFIN_SIR2
+ bool "Blackfin SIR on UART2"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART2 && (BF54x || BF538 || BF539)
+
+config BFIN_SIR3
+ bool "Blackfin SIR on UART3"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART3 && (BF54x)
+
+choice
+ prompt "SIR Mode"
+ depends on BFIN_SIR
+ default SIR_BFIN_DMA
+
+config SIR_BFIN_DMA
+ bool "DMA mode"
+ depends on !DMA_UNCACHED_NONE
+
+config SIR_BFIN_PIO
+ bool "PIO mode"
+endchoice
+
+config SH_SIR
+ tristate "SuperH SIR on UART"
+ depends on IRDA && SUPERH && \
+ (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
+ CPU_SUBTYPE_SH7724)
+ default n
+ help
+ Say Y here if your want to enable SIR function on SuperH UART
+ devices.
+
+comment "Dongle support"
+
+config DONGLE
+ bool "Serial dongle support"
+ depends on IRTTY_SIR
+ help
+ Say Y here if you have an infrared device that connects to your
+ computer's serial port. These devices are called dongles. Then say Y
+ or M to the driver for your particular dongle below.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about serial dongles.
+
+config ESI_DONGLE
+ tristate "ESI JetEye PC dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Extended Systems
+ JetEye PC dongle. To compile it as a module, choose M here. The ESI
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for ESI
+ dongles you will have to start irattach like this:
+ "irattach -d esi".
+
+config ACTISYS_DONGLE
+ tristate "ACTiSYS IR-220L and IR220L+ dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-220L and
+ IR220L+ dongles. To compile it as a module, choose M here. The
+ ACTiSYS dongles attaches to the normal 9-pin serial port connector,
+ and can currently only be used by IrTTY. To activate support for
+ ACTiSYS dongles you will have to start irattach like this:
+ "irattach -d actisys" or "irattach -d actisys+".
+
+config TEKRAM_DONGLE
+ tristate "Tekram IrMate 210B dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Tekram IrMate 210B
+ dongle. To compile it as a module, choose M here. The Tekram dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Tekram
+ dongles you will have to start irattach like this:
+ "irattach -d tekram".
+
+config TOIM3232_DONGLE
+ tristate "TOIM3232 IrDa dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Vishay/Temic
+ TOIM3232 and TOIM4232 based dongles.
+ To compile it as a module, choose M here.
+
+config LITELINK_DONGLE
+ tristate "Parallax LiteLink dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Parallax Litelink
+ dongle. To compile it as a module, choose M here. The Parallax
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Parallax
+ dongles you will have to start irattach like this:
+ "irattach -d litelink".
+
+config MA600_DONGLE
+ tristate "Mobile Action MA600 dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Mobile Action MA600
+ dongle. To compile it as a module, choose M here. The MA600 dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. The driver should also support
+ the MA620 USB version of the dongle, if the integrated USB-to-RS232
+ converter is supported by usbserial. To activate support for
+ MA600 dongle you will have to start irattach like this:
+ "irattach -d ma600".
+
+config GIRBIL_DONGLE
+ tristate "Greenwich GIrBIL dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Greenwich GIrBIL
+ dongle. If you want to compile it as a module, choose M here.
+ The Greenwich dongle attaches to the normal 9-pin serial port
+ connector, and can currently only be used by IrTTY. To activate
+ support for Greenwich dongles you will have to start irattach
+ like this: "irattach -d girbil".
+
+config MCP2120_DONGLE
+ tristate "Microchip MCP2120"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Microchip MCP2120
+ dongle. If you want to compile it as a module, choose M here.
+ The MCP2120 dongle attaches to the normal 9-pin serial port
+ connector, and can currently only be used by IrTTY. To activate
+ support for MCP2120 dongles you will have to start irattach
+ like this: "irattach -d mcp2120".
+
+ You must build this dongle yourself. For more information see:
+ <http://www.eyetap.org/~tangf/irda_sir_linux.html>
+
+config OLD_BELKIN_DONGLE
+ tristate "Old Belkin dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Adaptec Airport 1000
+ and 2000 dongles. If you want to compile it as a module, choose
+ M here. Some information is contained in the comments
+ at the top of <file:drivers/net/irda/old_belkin-sir.c>.
+
+config ACT200L_DONGLE
+ tristate "ACTiSYS IR-200L dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-200L
+ dongle. If you want to compile it as a module, choose M here.
+ The ACTiSYS IR-200L dongle attaches to the normal 9-pin serial
+ port connector, and can currently only be used by IrTTY.
+ To activate support for ACTiSYS IR-200L dongle you will have to
+ start irattach like this: "irattach -d act200l".
+
+config KINGSUN_DONGLE
+ tristate "KingSun/DonShine DS-620 IrDA-USB dongle"
+ depends on IRDA && USB
+ help
+ Say Y or M here if you want to build support for the KingSun/DonShine
+ DS-620 IrDA-USB bridge device driver.
+
+ This USB bridge does not conform to the IrDA-USB device class
+ specification, and therefore needs its own specific driver. This
+ dongle supports SIR speed only (9600 bps).
+
+ To compile it as a module, choose M here: the module will be called
+ kingsun-sir.
+
+config KSDAZZLE_DONGLE
+ tristate "KingSun Dazzle IrDA-USB dongle"
+ depends on IRDA && USB
+ help
+ Say Y or M here if you want to build support for the KingSun Dazzle
+ IrDA-USB bridge device driver.
+
+ This USB bridge does not conform to the IrDA-USB device class
+ specification, and therefore needs its own specific driver. This
+ dongle supports SIR speeds only (9600 through 115200 bps).
+
+ To compile it as a module, choose M here: the module will be called
+ ksdazzle-sir.
+
+config KS959_DONGLE
+ tristate "KingSun KS-959 IrDA-USB dongle"
+ depends on IRDA && USB
+ help
+ Say Y or M here if you want to build support for the KingSun KS-959
+ IrDA-USB bridge device driver.
+
+ This USB bridge does not conform to the IrDA-USB device class
+ specification, and therefore needs its own specific driver. This
+ dongle supports SIR speeds only (9600 through 57600 bps).
+
+ To compile it as a module, choose M here: the module will be called
+ ks959-sir.
+
+comment "FIR device drivers"
+
+config USB_IRDA
+ tristate "IrDA USB dongles"
+ depends on IRDA && USB
+ select FW_LOADER
+ ---help---
+ Say Y here if you want to build support for the USB IrDA FIR Dongle
+ device driver. To compile it as a module, choose M here: the module
+ will be called irda-usb. IrDA-USB support the various IrDA USB
+ dongles available and most of their peculiarities. Those dongles
+ plug in the USB port of your computer, are plug and play, and
+ support SIR and FIR (4Mbps) speeds. On the other hand, those
+ dongles tend to be less efficient than a FIR chipset.
+
+ Please note that the driver is still experimental. And of course,
+ you will need both USB and IrDA support in your kernel...
+
+config SIGMATEL_FIR
+ tristate "SigmaTel STIr4200 bridge"
+ depends on IRDA && USB
+ select CRC32
+ ---help---
+ Say Y here if you want to build support for the SigmaTel STIr4200
+ USB IrDA FIR bridge device driver.
+
+ USB bridge based on the SigmaTel STIr4200 don't conform to the
+ IrDA-USB device class specification, and therefore need their
+ own specific driver. Those dongles support SIR and FIR (4Mbps)
+ speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ stir4200.
+
+config NSC_FIR
+ tristate "NSC PC87108/PC87338"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the NSC PC87108 and
+ PC87338 IrDA chipsets. This driver supports SIR,
+ MIR and FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ nsc-ircc.
+
+config WINBOND_FIR
+ tristate "Winbond W83977AF (IR)"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build IrDA support for the Winbond
+ W83977AF super-io chipset. This driver should be used for the IrDA
+ chipset in the Corel NetWinder. The driver supports SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ w83977af_ir.
+
+config TOSHIBA_FIR
+ tristate "Toshiba Type-O IR Port"
+ depends on IRDA && PCI && !64BIT && VIRT_TO_BUS
+ help
+ Say Y here if you want to build support for the Toshiba Type-O IR
+ and Donau oboe chipsets. These chipsets are used by the Toshiba
+ Libretto 100/110CT, Tecra 8100, Portege 7020 and many more laptops.
+ To compile it as a module, choose M here: the module will be called
+ donauboe.
+
+config AU1000_FIR
+ tristate "Alchemy IrDA SIR/FIR"
+ depends on IRDA && MIPS_ALCHEMY
+ help
+ Say Y/M here to build support the IrDA peripheral on the
+ Alchemy Au1000 and Au1100 SoCs.
+ Say M to build a module; it will be called au1k_ir.ko
+
+config SMC_IRCC_FIR
+ tristate "SMSC IrCC"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the SMC Infrared
+ Communications Controller. It is used in a wide variety of
+ laptops (Fujitsu, Sony, Compaq and some Toshiba).
+ To compile it as a module, choose M here: the module will be called
+ smsc-ircc2.o.
+
+config ALI_FIR
+ tristate "ALi M5123 FIR"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the ALi M5123 FIR
+ Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
+ M1535, M1535D, M1535+, M1535D South Bridge. This driver supports
+ SIR, MIR and FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ ali-ircc.
+
+config VLSI_FIR
+ tristate "VLSI 82C147 SIR/MIR/FIR"
+ depends on IRDA && PCI
+ help
+ Say Y here if you want to build support for the VLSI 82C147
+ PCI-IrDA Controller. This controller is used by the HP OmniBook 800
+ and 5500 notebooks. The driver provides support for SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ vlsi_ir.
+
+config SA1100_FIR
+ tristate "SA1100 Internal IR"
+ depends on ARCH_SA1100 && IRDA && DMA_SA11X0
+
+config VIA_FIR
+ tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the VIA VT8231
+ and VIA VT1211 IrDA controllers, found on the motherboards using
+ those VIA chipsets. To use this controller, you will need
+ to plug a specific 5 pins FIR IrDA dongle in the specific
+ motherboard connector. The driver provides support for SIR, MIR
+ and FIR (4Mbps) speeds.
+
+ You will need to specify the 'dongle_id' module parameter to
+ indicate the FIR dongle attached to the controller.
+
+ To compile it as a module, choose M here: the module will be called
+ via-ircc.
+
+config PXA_FICP
+ tristate "Intel PXA2xx Internal FICP"
+ depends on ARCH_PXA && IRDA
+ help
+ Say Y or M here if you want to build support for the PXA2xx
+ built-in IRDA interface which can support both SIR and FIR.
+ This driver relies on platform specific helper routines so
+ available capabilities may vary from one PXA2xx target to
+ another.
+
+config MCS_FIR
+ tristate "MosChip MCS7780 IrDA-USB dongle"
+ depends on IRDA && USB
+ select CRC32
+ help
+ Say Y or M here if you want to build support for the MosChip
+ MCS7780 IrDA-USB bridge device driver.
+
+ USB bridge based on the MosChip MCS7780 don't conform to the
+ IrDA-USB device class specification, and therefore need their
+ own specific driver. Those dongles support SIR and FIR (4Mbps)
+ speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ mcs7780.
+
+endmenu
+
diff --git a/drivers/staging/irda/drivers/Makefile b/drivers/staging/irda/drivers/Makefile
new file mode 100644
index 000000000000..e2901b135528
--- /dev/null
+++ b/drivers/staging/irda/drivers/Makefile
@@ -0,0 +1,44 @@
+#
+# Makefile for the Linux IrDA infrared port device drivers.
+#
+# 9 Aug 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+subdir-ccflags-y += -I$(srctree)/drivers/staging/irda/include
+
+# FIR drivers
+obj-$(CONFIG_USB_IRDA) += irda-usb.o
+obj-$(CONFIG_SIGMATEL_FIR) += stir4200.o
+obj-$(CONFIG_NSC_FIR) += nsc-ircc.o
+obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o
+obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o
+obj-$(CONFIG_TOSHIBA_FIR) += donauboe.o
+obj-$(CONFIG_SMC_IRCC_FIR) += smsc-ircc2.o
+obj-$(CONFIG_ALI_FIR) += ali-ircc.o
+obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
+obj-$(CONFIG_VIA_FIR) += via-ircc.o
+obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
+obj-$(CONFIG_MCS_FIR) += mcs7780.o
+obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+# SIR drivers
+obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
+obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
+obj-$(CONFIG_SH_SIR) += sh_sir.o
+# dongle drivers for SIR drivers
+obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
+obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
+obj-$(CONFIG_ACTISYS_DONGLE) += actisys-sir.o
+obj-$(CONFIG_LITELINK_DONGLE) += litelink-sir.o
+obj-$(CONFIG_GIRBIL_DONGLE) += girbil-sir.o
+obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o
+obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
+obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
+obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
+obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
+obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
+obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
+obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
+
+# The SIR helper module
+sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/staging/irda/drivers/act200l-sir.c b/drivers/staging/irda/drivers/act200l-sir.c
new file mode 100644
index 000000000000..e8917511e1aa
--- /dev/null
+++ b/drivers/staging/irda/drivers/act200l-sir.c
@@ -0,0 +1,250 @@
+/*********************************************************************
+ *
+ * Filename: act200l.c
+ * Version: 0.8
+ * Description: Implementation for the ACTiSYS ACT-IR200L dongle
+ * Status: Experimental.
+ * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ * Created at: Fri Aug 3 17:35:42 2001
+ * Modified at: Fri Aug 17 10:22:40 2001
+ * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ *
+ * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int act200l_reset(struct sir_dev *dev);
+static int act200l_open(struct sir_dev *dev);
+static int act200l_close(struct sir_dev *dev);
+static int act200l_change_speed(struct sir_dev *dev, unsigned speed);
+
+/* Regsiter 0: Control register #1 */
+#define ACT200L_REG0 0x00
+#define ACT200L_TXEN 0x01 /* Enable transmitter */
+#define ACT200L_RXEN 0x02 /* Enable receiver */
+
+/* Register 1: Control register #2 */
+#define ACT200L_REG1 0x10
+#define ACT200L_LODB 0x01 /* Load new baud rate count value */
+#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
+
+/* Register 4: Output Power register */
+#define ACT200L_REG4 0x40
+#define ACT200L_OP0 0x01 /* Enable LED1C output */
+#define ACT200L_OP1 0x02 /* Enable LED2C output */
+#define ACT200L_BLKR 0x04
+
+/* Register 5: Receive Mode register */
+#define ACT200L_REG5 0x50
+#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
+
+/* Register 6: Receive Sensitivity register #1 */
+#define ACT200L_REG6 0x60
+#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
+#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
+
+/* Register 7: Receive Sensitivity register #2 */
+#define ACT200L_REG7 0x70
+#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
+
+/* Register 8,9: Baud Rate Dvider register #1,#2 */
+#define ACT200L_REG8 0x80
+#define ACT200L_REG9 0x90
+
+#define ACT200L_2400 0x5f
+#define ACT200L_9600 0x17
+#define ACT200L_19200 0x0b
+#define ACT200L_38400 0x05
+#define ACT200L_57600 0x03
+#define ACT200L_115200 0x01
+
+/* Register 13: Control register #3 */
+#define ACT200L_REG13 0xd0
+#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
+
+/* Register 15: Status register */
+#define ACT200L_REG15 0xf0
+
+/* Register 21: Control register #4 */
+#define ACT200L_REG21 0x50
+#define ACT200L_EXCK 0x02 /* Disable clock output driver */
+#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
+
+static struct dongle_driver act200l = {
+ .owner = THIS_MODULE,
+ .driver_name = "ACTiSYS ACT-IR200L",
+ .type = IRDA_ACT200L_DONGLE,
+ .open = act200l_open,
+ .close = act200l_close,
+ .reset = act200l_reset,
+ .set_speed = act200l_change_speed,
+};
+
+static int __init act200l_sir_init(void)
+{
+ return irda_register_dongle(&act200l);
+}
+
+static void __exit act200l_sir_cleanup(void)
+{
+ irda_unregister_dongle(&act200l);
+}
+
+static int act200l_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power on the dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int act200l_close(struct sir_dev *dev)
+{
+ /* Power off the dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function act200l_change_speed (dev, speed)
+ *
+ * Set the speed for the ACTiSYS ACT-IR200L type dongle.
+ *
+ */
+static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ u8 control[3];
+ int ret = 0;
+
+ /* Clear DTR and set RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ switch (speed) {
+ default:
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
+ break;
+ case 19200:
+ control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
+ break;
+ case 38400:
+ control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
+ break;
+ case 57600:
+ control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
+ break;
+ case 115200:
+ control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
+ break;
+ }
+ control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 3);
+ msleep(5);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ dev->speed = speed;
+ return ret;
+}
+
+/*
+ * Function act200l_reset (driver)
+ *
+ * Reset the ACTiSYS ACT-IR200L type dongle.
+ */
+
+#define ACT200L_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
+#define ACT200L_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
+
+static int act200l_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ static const u8 control[9] = {
+ ACT200L_REG15,
+ ACT200L_REG13 | ACT200L_SHDW,
+ ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
+ ACT200L_REG13,
+ ACT200L_REG7 | ACT200L_ENPOS,
+ ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
+ ACT200L_REG5 | ACT200L_RWIDL,
+ ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
+ ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
+ };
+ int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ /* Reset the dongle : set RTS low for 25 ms */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ state = ACT200L_STATE_WAIT1_RESET;
+ delay = 50;
+ break;
+
+ case ACT200L_STATE_WAIT1_RESET:
+ /* Clear DTR and set RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ udelay(25); /* better wait for some short while */
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, sizeof(control));
+ state = ACT200L_STATE_WAIT2_RESET;
+ delay = 15;
+ break;
+
+ case ACT200L_STATE_WAIT2_RESET:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ dev->speed = 9600;
+ break;
+ default:
+ net_err_ratelimited("%s(), unknown state %d\n",
+ __func__, state);
+ ret = -1;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
+MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
+
+module_init(act200l_sir_init);
+module_exit(act200l_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/actisys-sir.c b/drivers/staging/irda/drivers/actisys-sir.c
new file mode 100644
index 000000000000..e224b8b99517
--- /dev/null
+++ b/drivers/staging/irda/drivers/actisys-sir.c
@@ -0,0 +1,245 @@
+/*********************************************************************
+ *
+ * Filename: actisys.c
+ * Version: 1.1
+ * Description: Implementation for the ACTiSYS IR-220L and IR-220L+
+ * dongles
+ * Status: Beta.
+ * Authors: Dag Brattli <dagb@cs.uit.no> (initially)
+ * Jean Tourrilhes <jt@hpl.hp.com> (new version)
+ * Martin Diehl <mad@mdiehl.de> (new version for sir_dev)
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Sun Oct 27 22:02:13 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999 Jean Tourrilhes
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * Changelog
+ *
+ * 0.8 -> 0.9999 - Jean
+ * o New initialisation procedure : much safer and correct
+ * o New procedure the change speed : much faster and simpler
+ * o Other cleanups & comments
+ * Thanks to Lichen Wang @ Actisys for his excellent help...
+ *
+ * 1.0 -> 1.1 - Martin Diehl
+ * modified for new sir infrastructure
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/*
+ * Define the timing of the pulses we send to the dongle (to reset it, and
+ * to toggle speeds). Basically, the limit here is the propagation speed of
+ * the signals through the serial port, the dongle being much faster. Any
+ * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can
+ * go through cleanly . If you are on the wild side, you can try to lower
+ * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!)
+ */
+#define MIN_DELAY 10 /* 10 us to be on the conservative side */
+
+static int actisys_open(struct sir_dev *);
+static int actisys_close(struct sir_dev *);
+static int actisys_change_speed(struct sir_dev *, unsigned);
+static int actisys_reset(struct sir_dev *);
+
+/* These are the baudrates supported, in the order available */
+/* Note : the 220L doesn't support 38400, but we will fix that below */
+static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 };
+
+#define MAX_SPEEDS ARRAY_SIZE(baud_rates)
+
+static struct dongle_driver act220l = {
+ .owner = THIS_MODULE,
+ .driver_name = "Actisys ACT-220L",
+ .type = IRDA_ACTISYS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .set_speed = actisys_change_speed,
+};
+
+static struct dongle_driver act220l_plus = {
+ .owner = THIS_MODULE,
+ .driver_name = "Actisys ACT-220L+",
+ .type = IRDA_ACTISYS_PLUS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .set_speed = actisys_change_speed,
+};
+
+static int __init actisys_sir_init(void)
+{
+ int ret;
+
+ /* First, register an Actisys 220L dongle */
+ ret = irda_register_dongle(&act220l);
+ if (ret < 0)
+ return ret;
+
+ /* Now, register an Actisys 220L+ dongle */
+ ret = irda_register_dongle(&act220l_plus);
+ if (ret < 0) {
+ irda_unregister_dongle(&act220l);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit actisys_sir_cleanup(void)
+{
+ /* We have to remove both dongles */
+ irda_unregister_dongle(&act220l_plus);
+ irda_unregister_dongle(&act220l);
+}
+
+static int actisys_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* Remove support for 38400 if this is not a 220L+ dongle */
+ if (dev->dongle_drv->type == IRDA_ACTISYS_DONGLE)
+ qos->baud_rate.bits &= ~IR_38400;
+
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int actisys_close(struct sir_dev *dev)
+{
+ /* Power off the dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function actisys_change_speed (task)
+ *
+ * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles.
+ * To cycle through the available baud rates, pulse RTS low for a few us.
+ *
+ * First, we reset the dongle to always start from a known state.
+ * Then, we cycle through the speeds by pulsing RTS low and then up.
+ * The dongle allow us to pulse quite fast, se we can set speed in one go,
+ * which is must faster ( < 100 us) and less complex than what is found
+ * in some other dongle drivers...
+ * Note that even if the new speed is the same as the current speed,
+ * we reassert the speed. This make sure that things are all right,
+ * and it's fast anyway...
+ * By the way, this function will work for both type of dongles,
+ * because the additional speed is at the end of the sequence...
+ */
+static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int ret = 0;
+ int i = 0;
+
+ pr_debug("%s(), speed=%d (was %d)\n", __func__, speed, dev->speed);
+
+ /* dongle was already resetted from irda_request state machine,
+ * we are in known state (dongle default)
+ */
+
+ /*
+ * Now, we can set the speed requested. Send RTS pulses until we
+ * reach the target speed
+ */
+ for (i = 0; i < MAX_SPEEDS; i++) {
+ if (speed == baud_rates[i]) {
+ dev->speed = speed;
+ break;
+ }
+ /* Set RTS low for 10 us */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ udelay(MIN_DELAY);
+
+ /* Set RTS high for 10 us */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ udelay(MIN_DELAY);
+ }
+
+ /* Check if life is sweet... */
+ if (i >= MAX_SPEEDS) {
+ actisys_reset(dev);
+ ret = -EINVAL; /* This should not happen */
+ }
+
+ /* Basta lavoro, on se casse d'ici... */
+ return ret;
+}
+
+/*
+ * Function actisys_reset (task)
+ *
+ * Reset the Actisys type dongle. Warning, this function must only be
+ * called with a process context!
+ *
+ * We need to do two things in this function :
+ * o first make sure that the dongle is in a state where it can operate
+ * o second put the dongle in a know state
+ *
+ * The dongle is powered of the RTS and DTR lines. In the dongle, there
+ * is a big capacitor to accommodate the current spikes. This capacitor
+ * takes a least 50 ms to be charged. In theory, the Bios set those lines
+ * up, so by the time we arrive here we should be set. It doesn't hurt
+ * to be on the conservative side, so we will wait...
+ * <Martin : move above comment to irda_config_fsm>
+ * Then, we set the speed to 9600 b/s to get in a known state (see in
+ * change_speed for details). It is needed because the IrDA stack
+ * has tried to set the speed immediately after our first return,
+ * so before we can be sure the dongle is up and running.
+ */
+
+static int actisys_reset(struct sir_dev *dev)
+{
+ /* Reset the dongle : set DTR low for 10 us */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ dev->speed = 9600; /* That's the default */
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */
+MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */
+
+module_init(actisys_sir_init);
+module_exit(actisys_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/ali-ircc.c b/drivers/staging/irda/drivers/ali-ircc.c
new file mode 100644
index 000000000000..35f198d83701
--- /dev/null
+++ b/drivers/staging/irda/drivers/ali-ircc.c
@@ -0,0 +1,2218 @@
+/*********************************************************************
+ *
+ * Filename: ali-ircc.h
+ * Version: 0.5
+ * Description: Driver for the ALI M1535D and M1543C FIR Controller
+ * Status: Experimental.
+ * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Created at: 2000/10/16 03:46PM
+ * Modified at: 2001/1/3 02:55PM
+ * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563
+ * Modified by: Clear Zhang <clear_zhang@ali.com.tw>
+ *
+ * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "ali-ircc.h"
+
+#define CHIP_IO_EXTENT 8
+#define BROKEN_DONGLE_ID
+
+#define ALI_IRCC_DRIVER_NAME "ali-ircc"
+
+/* Power Management */
+static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int ali_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver ali_ircc_driver = {
+ .suspend = ali_ircc_suspend,
+ .resume = ali_ircc_resume,
+ .driver = {
+ .name = ALI_IRCC_DRIVER_NAME,
+ },
+};
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+/* Use BIOS settions by default, but user may supply module parameters */
+static unsigned int io[] = { ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0 };
+
+static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
+static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
+static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
+
+/* These are the currently known ALi south-bridge chipsets, the only one difference
+ * is that M1543C doesn't support HP HDSL-3600
+ */
+static ali_chip_t chips[] =
+{
+ { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
+ { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
+ { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
+ { NULL }
+};
+
+/* Max 4 instances for now */
+static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
+
+/* Dongle Types */
+static char *dongle_types[] = {
+ "TFDS6000",
+ "HP HSDL-3600",
+ "HP HSDL-1100",
+ "No dongle connected",
+};
+
+/* Some prototypes */
+static int ali_ircc_open(int i, chipio_t *info);
+
+static int ali_ircc_close(struct ali_ircc_cb *self);
+
+static int ali_ircc_setup(chipio_t *info);
+static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
+static int ali_ircc_net_open(struct net_device *dev);
+static int ali_ircc_net_close(struct net_device *dev);
+static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
+
+/* SIR function */
+static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
+static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
+static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
+static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
+static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
+
+/* FIR function */
+static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
+static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
+static int ali_ircc_dma_receive(struct ali_ircc_cb *self);
+static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
+static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
+static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
+
+/* My Function */
+static int ali_ircc_read_dongle_id (int i, chipio_t *info);
+static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
+
+/* ALi chip function */
+static void SIR2FIR(int iobase);
+static void FIR2SIR(int iobase);
+static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
+
+/*
+ * Function ali_ircc_init ()
+ *
+ * Initialize chip. Find out whay kinds of chips we are dealing with
+ * and their configuration registers address
+ */
+static int __init ali_ircc_init(void)
+{
+ ali_chip_t *chip;
+ chipio_t info;
+ int ret;
+ int cfg, cfg_base;
+ int reg, revision;
+ int i = 0;
+
+ ret = platform_driver_register(&ali_ircc_driver);
+ if (ret) {
+ net_err_ratelimited("%s, Can't register driver!\n",
+ ALI_IRCC_DRIVER_NAME);
+ return ret;
+ }
+
+ ret = -ENODEV;
+
+ /* Probe for all the ALi chipsets we know about */
+ for (chip= chips; chip->name; chip++, i++)
+ {
+ pr_debug("%s(), Probing for %s ...\n", __func__, chip->name);
+
+ /* Try all config registers for this chip */
+ for (cfg=0; cfg<2; cfg++)
+ {
+ cfg_base = chip->cfg[cfg];
+ if (!cfg_base)
+ continue;
+
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+
+ /* Enter Configuration */
+ outb(chip->entr1, cfg_base);
+ outb(chip->entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read Chip Identification Register */
+ outb(chip->cid_index, cfg_base);
+ reg = inb(cfg_base+1);
+
+ if (reg == chip->cid_value)
+ {
+ pr_debug("%s(), Chip found at 0x%03x\n",
+ __func__, cfg_base);
+
+ outb(0x1F, cfg_base);
+ revision = inb(cfg_base+1);
+ pr_debug("%s(), Found %s chip, revision=%d\n",
+ __func__, chip->name, revision);
+
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 2000)
+ {
+ chip->init(chip, &info);
+ }
+ else
+ {
+ chip->probe(chip, &info);
+ }
+
+ if (ali_ircc_open(i, &info) == 0)
+ ret = 0;
+ i++;
+ }
+ else
+ {
+ pr_debug("%s(), No %s chip at 0x%03x\n",
+ __func__, chip->name, cfg_base);
+ }
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+ }
+ }
+
+ if (ret)
+ platform_driver_unregister(&ali_ircc_driver);
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit ali_ircc_cleanup(void)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(dev_self); i++) {
+ if (dev_self[i])
+ ali_ircc_close(dev_self[i]);
+ }
+
+ platform_driver_unregister(&ali_ircc_driver);
+
+}
+
+static const struct net_device_ops ali_ircc_sir_ops = {
+ .ndo_open = ali_ircc_net_open,
+ .ndo_stop = ali_ircc_net_close,
+ .ndo_start_xmit = ali_ircc_sir_hard_xmit,
+ .ndo_do_ioctl = ali_ircc_net_ioctl,
+};
+
+static const struct net_device_ops ali_ircc_fir_ops = {
+ .ndo_open = ali_ircc_net_open,
+ .ndo_stop = ali_ircc_net_close,
+ .ndo_start_xmit = ali_ircc_fir_hard_xmit,
+ .ndo_do_ioctl = ali_ircc_net_ioctl,
+};
+
+/*
+ * Function ali_ircc_open (int i, chipio_t *inf)
+ *
+ * Open driver instance
+ *
+ */
+static int ali_ircc_open(int i, chipio_t *info)
+{
+ struct net_device *dev;
+ struct ali_ircc_cb *self;
+ int dongle_id;
+ int err;
+
+ if (i >= ARRAY_SIZE(dev_self)) {
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Set FIR FIFO and DMA Threshold */
+ if ((ali_ircc_setup(info)) == -1)
+ return -1;
+
+ dev = alloc_irdadev(sizeof(*self));
+ if (dev == NULL) {
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+ self->index = i;
+
+ /* Initialize IO */
+ self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */
+ self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */
+ self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
+
+ /* Reserve the ioports that we need */
+ if (!request_region(self->io.fir_base, self->io.fir_ext,
+ ALI_IRCC_DRIVER_NAME)) {
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
+ err = -ENODEV;
+ goto err_out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 14384;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ dev->netdev_ops = &ali_ircc_sir_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
+ goto err_out4;
+ }
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+
+ /* Check dongle id */
+ dongle_id = ali_ircc_read_dongle_id(i, info);
+ net_info_ratelimited("%s(), %s, Found dongle: %s\n",
+ __func__, ALI_IRCC_DRIVER_NAME,
+ dongle_types[dongle_id]);
+
+ self->io.dongle_id = dongle_id;
+
+
+ return 0;
+
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ err_out1:
+ dev_self[i] = NULL;
+ free_netdev(dev);
+ return err;
+}
+
+
+/*
+ * Function ali_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit ali_ircc_close(struct ali_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_init_43 (chip, info)
+ *
+ * Initialize the ALi M1543 chip.
+ */
+static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info)
+{
+ /* All controller information like I/O address, DMA channel, IRQ
+ * are set by BIOS
+ */
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_init_53 (chip, info)
+ *
+ * Initialize the ALi M1535 chip.
+ */
+static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info)
+{
+ /* All controller information like I/O address, DMA channel, IRQ
+ * are set by BIOS
+ */
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_probe_53 (chip, info)
+ *
+ * Probes for the ALi M1535D or M1535
+ */
+static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int hi, low, reg;
+
+
+ /* Enter Configuration */
+ outb(chip->entr1, cfg_base);
+ outb(chip->entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read address control register */
+ outb(0x60, cfg_base);
+ hi = inb(cfg_base+1);
+ outb(0x61, cfg_base);
+ low = inb(cfg_base+1);
+ info->fir_base = (hi<<8) + low;
+
+ info->sir_base = info->fir_base;
+
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
+
+ /* Read IRQ control register */
+ outb(0x70, cfg_base);
+ reg = inb(cfg_base+1);
+ info->irq = reg & 0x0f;
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
+
+ /* Read DMA channel */
+ outb(0x74, cfg_base);
+ reg = inb(cfg_base+1);
+ info->dma = reg & 0x07;
+
+ if(info->dma == 0x04)
+ net_warn_ratelimited("%s(), No DMA channel assigned !\n",
+ __func__);
+ else
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
+
+ /* Read Enabled Status */
+ outb(0x30, cfg_base);
+ reg = inb(cfg_base+1);
+ info->enabled = (reg & 0x80) && (reg & 0x01);
+ pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled);
+
+ /* Read Power Status */
+ outb(0x22, cfg_base);
+ reg = inb(cfg_base+1);
+ info->suspended = (reg & 0x20);
+ pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended);
+
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_setup (info)
+ *
+ * Set FIR FIFO and DMA Threshold
+ * Returns non-negative on success.
+ *
+ */
+static int ali_ircc_setup(chipio_t *info)
+{
+ unsigned char tmp;
+ int version;
+ int iobase = info->fir_base;
+
+
+ /* Locking comments :
+ * Most operations here need to be protected. We are called before
+ * the device instance is created in ali_ircc_open(), therefore
+ * nobody can bother us - Jean II */
+
+ /* Switch to FIR space */
+ SIR2FIR(iobase);
+
+ /* Master Reset */
+ outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
+
+ /* Read FIR ID Version Register */
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+FIR_ID_VR);
+
+ /* Should be 0x00 in the M1535/M1535D */
+ if(version != 0x00)
+ {
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ ALI_IRCC_DRIVER_NAME, version);
+ return -1;
+ }
+
+ /* Set FIR FIFO Threshold Register */
+ switch_bank(iobase, BANK1);
+ outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
+
+ /* Set FIR DMA Threshold Register */
+ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* CRC enable */
+ switch_bank(iobase, BANK2);
+ outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
+
+ /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
+
+ /* Switch to Bank 0 */
+ switch_bank(iobase, BANK0);
+
+ tmp = inb(iobase+FIR_LCR_B);
+ tmp &=~0x20; // disable SIP
+ tmp |= 0x80; // these two steps make RX mode
+ tmp &= 0xbf;
+ outb(tmp, iobase+FIR_LCR_B);
+
+ /* Disable Interrupt */
+ outb(0x00, iobase+FIR_IER);
+
+
+ /* Switch to SIR space */
+ FIR2SIR(iobase);
+
+ net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n",
+ ALI_IRCC_DRIVER_NAME);
+
+ /* Enable receive interrupts */
+ // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
+ // Turn on the interrupts in ali_ircc_net_open
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_read_dongle_id (int index, info)
+ *
+ * Try to read dongle identification. This procedure needs to be executed
+ * once after power-on/reset. It also needs to be used whenever you suspect
+ * that the user may have plugged/unplugged the IrDA Dongle.
+ */
+static int ali_ircc_read_dongle_id (int i, chipio_t *info)
+{
+ int dongle_id, reg;
+ int cfg_base = info->cfg_base;
+
+
+ /* Enter Configuration */
+ outb(chips[i].entr1, cfg_base);
+ outb(chips[i].entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read Dongle ID */
+ outb(0xf0, cfg_base);
+ reg = inb(cfg_base+1);
+ dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
+ pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n",
+ __func__, dongle_id, dongle_types[dongle_id]);
+
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+
+
+ return dongle_id;
+}
+
+/*
+ * Function ali_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ali_ircc_cb *self;
+ int ret;
+
+
+ self = netdev_priv(dev);
+
+ spin_lock(&self->lock);
+
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > 115200)
+ ret = ali_ircc_fir_interrupt(self);
+ else
+ ret = ali_ircc_sir_interrupt(self);
+
+ spin_unlock(&self->lock);
+
+ return ret;
+}
+/*
+ * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
+{
+ __u8 eir, OldMessageCount;
+ int iobase, tmp;
+
+
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK0);
+ self->InterruptID = inb(iobase+FIR_IIR);
+ self->BusStatus = inb(iobase+FIR_BSR);
+
+ OldMessageCount = (self->LineStatus + 1) & 0x07;
+ self->LineStatus = inb(iobase+FIR_LSR);
+ //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
+ eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
+
+ pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID);
+ pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus);
+ pr_debug("%s(), self->ier = %x\n", __func__, self->ier);
+ pr_debug("%s(), eir = %x\n", __func__, eir);
+
+ /* Disable interrupts */
+ SetCOMInterrupts(self, FALSE);
+
+ /* Tx or Rx Interrupt */
+
+ if (eir & IIR_EOM)
+ {
+ if (self->io.direction == IO_XMIT) /* TX */
+ {
+ pr_debug("%s(), ******* IIR_EOM (Tx) *******\n",
+ __func__);
+
+ if(ali_ircc_dma_xmit_complete(self))
+ {
+ if (irda_device_txqueue_empty(self->netdev))
+ {
+ /* Prepare for receive */
+ ali_ircc_dma_receive(self);
+ self->ier = IER_EOM;
+ }
+ }
+ else
+ {
+ self->ier = IER_EOM;
+ }
+
+ }
+ else /* RX */
+ {
+ pr_debug("%s(), ******* IIR_EOM (Rx) *******\n",
+ __func__);
+
+ if(OldMessageCount > ((self->LineStatus+1) & 0x07))
+ {
+ self->rcvFramesOverflow = TRUE;
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n",
+ __func__);
+ }
+
+ if (ali_ircc_dma_receive_complete(self))
+ {
+ pr_debug("%s(), ******* receive complete ********\n",
+ __func__);
+
+ self->ier = IER_EOM;
+ }
+ else
+ {
+ pr_debug("%s(), ******* Not receive complete ********\n",
+ __func__);
+
+ self->ier = IER_EOM | IER_TIMER;
+ }
+
+ }
+ }
+ /* Timer Interrupt */
+ else if (eir & IIR_TIMER)
+ {
+ if(OldMessageCount > ((self->LineStatus+1) & 0x07))
+ {
+ self->rcvFramesOverflow = TRUE;
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n",
+ __func__);
+ }
+ /* Disable Timer */
+ switch_bank(iobase, BANK1);
+ tmp = inb(iobase+FIR_CR);
+ outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
+
+ /* Check if this is a Tx timer interrupt */
+ if (self->io.direction == IO_XMIT)
+ {
+ ali_ircc_dma_xmit(self);
+
+ /* Interrupt on EOM */
+ self->ier = IER_EOM;
+
+ }
+ else /* Rx */
+ {
+ if(ali_ircc_dma_receive_complete(self))
+ {
+ self->ier = IER_EOM;
+ }
+ else
+ {
+ self->ier = IER_EOM | IER_TIMER;
+ }
+ }
+ }
+
+ /* Restore Interrupt */
+ SetCOMInterrupts(self, TRUE);
+
+ return IRQ_RETVAL(eir);
+}
+
+/*
+ * Function ali_ircc_sir_interrupt (irq, self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
+{
+ int iobase;
+ int iir, lsr;
+
+
+ iobase = self->io.sir_base;
+
+ iir = inb(iobase+UART_IIR) & UART_IIR_ID;
+ if (iir) {
+ /* Clear interrupt */
+ lsr = inb(iobase+UART_LSR);
+
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
+
+ switch (iir)
+ {
+ case UART_IIR_RLSI:
+ pr_debug("%s(), RLSI\n", __func__);
+ break;
+ case UART_IIR_RDI:
+ /* Receive interrupt */
+ ali_ircc_sir_receive(self);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ {
+ /* Transmitter ready for data */
+ ali_ircc_sir_write_wakeup(self);
+ }
+ break;
+ default:
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
+ break;
+ }
+
+ }
+
+
+ return IRQ_RETVAL(iir);
+}
+
+
+/*
+ * Function ali_ircc_sir_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ */
+static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
+ inb(iobase+UART_RX));
+
+ /* Make sure we don't stay here too long */
+ if (boguscount++ > 32) {
+ pr_debug("%s(), breaking!\n", __func__);
+ break;
+ }
+ } while (inb(iobase+UART_LSR) & UART_LSR_DR);
+
+}
+
+/*
+ * Function ali_ircc_sir_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ */
+static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
+{
+ int actual = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0)
+ {
+ /* Write data left in transmit buffer */
+ actual = ali_ircc_sir_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ }
+ else
+ {
+ if (self->new_speed)
+ {
+ /* We must wait until all data are gone */
+ while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
+ pr_debug("%s(), UART_LSR_THRE\n", __func__);
+
+ pr_debug("%s(), Changing speed! self->new_speed = %d\n",
+ __func__, self->new_speed);
+ ali_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+
+ // benjamin 2000/11/10 06:32PM
+ if (self->io.speed > 115200)
+ {
+ pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n",
+ __func__);
+
+ self->ier = IER_EOM;
+ // SetCOMInterrupts(self, TRUE);
+ return;
+ }
+ }
+ else
+ {
+ netif_wake_queue(self->netdev);
+ }
+
+ self->netdev->stats.tx_packets++;
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase+UART_IER);
+ }
+
+}
+
+static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
+{
+ struct net_device *dev = self->netdev;
+ int iobase;
+
+
+ pr_debug("%s(), setting speed = %d\n", __func__, baud);
+
+ /* This function *must* be called with irq off and spin-lock.
+ * - Jean II */
+
+ iobase = self->io.fir_base;
+
+ SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
+
+ /* Go to MIR, FIR Speed */
+ if (baud > 115200)
+ {
+
+
+ ali_ircc_fir_change_speed(self, baud);
+
+ /* Install FIR xmit handler*/
+ dev->netdev_ops = &ali_ircc_fir_ops;
+
+ /* Enable Interuupt */
+ self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
+
+ /* Be ready for incoming frames */
+ ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
+ }
+ /* Go to SIR Speed */
+ else
+ {
+ ali_ircc_sir_change_speed(self, baud);
+
+ /* Install SIR xmit handler*/
+ dev->netdev_ops = &ali_ircc_sir_ops;
+ }
+
+
+ SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM
+
+ netif_wake_queue(self->netdev);
+
+}
+
+static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
+{
+
+ int iobase;
+ struct ali_ircc_cb *self = priv;
+ struct net_device *dev;
+
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ dev = self->netdev;
+ iobase = self->io.fir_base;
+
+ pr_debug("%s(), self->io.speed = %d, change to speed = %d\n",
+ __func__, self->io.speed, baud);
+
+ /* Come from SIR speed */
+ if(self->io.speed <=115200)
+ {
+ SIR2FIR(iobase);
+ }
+
+ /* Update accounting for new speed */
+ self->io.speed = baud;
+
+ // Set Dongle Speed mode
+ ali_ircc_change_dongle_speed(self, baud);
+
+}
+
+/*
+ * Function ali_sir_change_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ */
+static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
+{
+ struct ali_ircc_cb *self = priv;
+ int iobase;
+ int fcr; /* FIFO control reg */
+ int lcr; /* Line control reg */
+ int divisor;
+
+
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /* Come from MIR or FIR speed */
+ if(self->io.speed >115200)
+ {
+ // Set Dongle Speed mode first
+ ali_ircc_change_dongle_speed(self, speed);
+
+ FIR2SIR(iobase);
+ }
+
+ // Clear Line and Auxiluary status registers 2000/11/24 11:47AM
+
+ inb(iobase+UART_LSR);
+ inb(iobase+UART_SCR);
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ divisor = 115200/speed;
+
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ if (self->io.speed < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase+UART_DLM);
+ outb(lcr, iobase+UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
+
+ /* without this, the connection will be broken after come back from FIR speed,
+ but with this, the SIR connection is harder to established */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
+}
+
+static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
+{
+
+ struct ali_ircc_cb *self = priv;
+ int iobase,dongle_id;
+ int tmp = 0;
+
+
+ iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
+ dongle_id = self->io.dongle_id;
+
+ /* We are already locked, no need to do it again */
+
+ pr_debug("%s(), Set Speed for %s , Speed = %d\n",
+ __func__, dongle_types[dongle_id], speed);
+
+ switch_bank(iobase, BANK2);
+ tmp = inb(iobase+FIR_IRDA_CR);
+
+ /* IBM type dongle */
+ if(dongle_id == 0)
+ {
+ if(speed == 4000000)
+ {
+ // __ __
+ // SD/MODE __| |__ __
+ // __ __
+ // IRTX __ __| |__
+ // T1 T2 T3 T4 T5
+
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ // T1 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T2 -> SD/MODE:1 IRTX:0
+ tmp &= ~0x01;
+ tmp |= 0x0a;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T3 -> SD/MODE:1 IRTX:1
+ tmp |= 0x0b;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T4 -> SD/MODE:0 IRTX:1
+ tmp &= ~0x08;
+ tmp |= 0x03;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T5 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // reset -> Normal TX output Signal
+ outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
+ }
+ else /* speed <=1152000 */
+ {
+ // __
+ // SD/MODE __| |__
+ //
+ // IRTX ________
+ // T1 T2 T3
+
+ /* MIR 115200, 57600 */
+ if (speed==1152000)
+ {
+ tmp |= 0xA0; //HDLC=1, 1.152Mbps=1
+ }
+ else
+ {
+ tmp &=~0x80; //HDLC 0.576Mbps
+ tmp |= 0x20; //HDLC=1,
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ /* MIR 115200, 57600 */
+
+ //switch_bank(iobase, BANK2);
+ // T1 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T2 -> SD/MODE:1 IRTX:0
+ tmp &= ~0x01;
+ tmp |= 0x0a;
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ // T3 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // reset -> Normal TX output Signal
+ outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
+ }
+ }
+ else if (dongle_id == 1) /* HP HDSL-3600 */
+ {
+ switch(speed)
+ {
+ case 4000000:
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ break;
+
+ case 1152000:
+ tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
+ break;
+
+ case 576000:
+ tmp &=~0x80; // HDLC 0.576Mbps
+ tmp |= 0x20; // HDLC=1,
+ break;
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ else /* HP HDSL-1100 */
+ {
+ if(speed <= 115200) /* SIR */
+ {
+
+ tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ else /* MIR FIR */
+ {
+
+ switch(speed)
+ {
+ case 4000000:
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ break;
+
+ case 1152000:
+ tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
+ break;
+
+ case 576000:
+ tmp &=~0x80; // HDLC 0.576Mbps
+ tmp |= 0x20; // HDLC=1,
+ break;
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+ tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ }
+
+ switch_bank(iobase, BANK0);
+
+}
+
+/*
+ * Function ali_ircc_sir_write (driver)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ */
+static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
+ pr_debug("%s(), failed, fifo not empty!\n", __func__);
+ return 0;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase+UART_TX);
+
+ actual++;
+ }
+
+ return actual;
+}
+
+/*
+ * Function ali_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int ali_ircc_net_open(struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Request IRQ and install Interrupt Handler */
+ if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
+ {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.irq);
+ return -EAGAIN;
+ }
+
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Turn on interrups */
+ outb(UART_IER_RDI , iobase+UART_IER);
+
+ /* Ready to play! */
+ netif_start_queue(dev); //benjamin by irport
+
+ /* Give self a hardware name */
+ sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int ali_ircc_net_close(struct net_device *dev)
+{
+
+ struct ali_ircc_cb *self;
+ //int iobase;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ disable_dma(self->io.dma);
+
+ /* Disable interrupts */
+ SetCOMInterrupts(self, FALSE);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_fir_hard_xmit (skb, dev)
+ *
+ * Transmit the frame
+ *
+ */
+static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __u32 speed;
+ int mtt, diff;
+
+
+ self = netdev_priv(dev);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Note : you should make sure that speed changes are not going
+ * to corrupt any outgoing frame. Look at nsc-ircc for the gory
+ * details - Jean II */
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ ali_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Register and copy this frame to DMA memory */
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+ self->tx_fifo.tail += skb->len;
+
+ dev->stats.tx_bytes += skb->len;
+
+ skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
+ skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+
+ /* Start transmit only if there is currently no transmit going on */
+ if (self->tx_fifo.len == 1)
+ {
+ /* Check if we must wait the min turn time or not */
+ mtt = irda_get_mtt(skb);
+
+ if (mtt)
+ {
+ /* Check how much time we have used already */
+ diff = ktime_us_delta(ktime_get(), self->stamp);
+ /* self->stamp is set from ali_ircc_dma_receive_complete() */
+
+ pr_debug("%s(), ******* diff = %d *******\n",
+ __func__, diff);
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff)
+ {
+ mtt -= diff;
+
+ /*
+ * Use timer if delay larger than 1000 us, and
+ * use udelay for smaller values which should
+ * be acceptable
+ */
+ if (mtt > 500)
+ {
+ /* Adjust for timer resolution */
+ mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
+
+ pr_debug("%s(), ************** mtt = %d ***********\n",
+ __func__, mtt);
+
+ /* Setup timer */
+ if (mtt == 1) /* 500 us */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
+ }
+ else if (mtt == 2) /* 1 ms */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
+ }
+ else /* > 2ms -> 4ms */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
+ }
+
+
+ /* Start timer */
+ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ self->ier = IER_TIMER;
+ SetCOMInterrupts(self, TRUE);
+
+ /* Timer will take care of the rest */
+ goto out;
+ }
+ else
+ udelay(mtt);
+ } // if (if (mtt > diff)
+ }// if (mtt)
+
+ /* Enable EOM interrupt */
+ self->ier = IER_EOM;
+ SetCOMInterrupts(self, TRUE);
+
+ /* Transmit frame */
+ ali_ircc_dma_xmit(self);
+ } // if (self->tx_fifo.len == 1)
+
+ out:
+
+ /* Not busy transmitting anymore if window is not full */
+ if (self->tx_fifo.free < MAX_TX_WINDOW)
+ netif_wake_queue(self->netdev);
+
+ /* Restore bank register */
+ switch_bank(iobase, BANK0);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+
+static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
+{
+ int iobase, tmp;
+ unsigned char FIFO_OPTI, Hi, Lo;
+
+
+
+ iobase = self->io.fir_base;
+
+ /* FIFO threshold , this method comes from NDIS5 code */
+
+ if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
+ FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
+ else
+ FIFO_OPTI = TX_FIFO_Threshold;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ self->io.direction = IO_XMIT;
+
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ DMA_TX_MODE);
+
+ /* Reset Tx FIFO */
+ switch_bank(iobase, BANK0);
+ outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
+
+ /* Set Tx FIFO threshold */
+ if (self->fifo_opti_buf!=FIFO_OPTI)
+ {
+ switch_bank(iobase, BANK1);
+ outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
+ self->fifo_opti_buf=FIFO_OPTI;
+ }
+
+ /* Set Tx DMA threshold */
+ switch_bank(iobase, BANK1);
+ outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* Set max Tx frame size */
+ Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
+ Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
+ switch_bank(iobase, BANK2);
+ outb(Hi, iobase+FIR_TX_DSR_HI);
+ outb(Lo, iobase+FIR_TX_DSR_LO);
+
+ /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
+ switch_bank(iobase, BANK0);
+ tmp = inb(iobase+FIR_LCR_B);
+ tmp &= ~0x20; // Disable SIP
+ outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
+ pr_debug("%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
+
+ outb(0, iobase+FIR_LSR);
+
+ /* Enable DMA and Burst Mode */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
+
+ switch_bank(iobase, BANK0);
+
+}
+
+static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
+{
+ int iobase;
+ int ret = TRUE;
+
+
+ iobase = self->io.fir_base;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ /* Check for underrun! */
+ switch_bank(iobase, BANK0);
+ if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
+
+ {
+ net_err_ratelimited("%s(), ********* LSR_FRAME_ABORT *********\n",
+ __func__);
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+ }
+ else
+ {
+ self->netdev->stats.tx_packets++;
+ }
+
+ /* Check if we need to change the speed */
+ if (self->new_speed)
+ {
+ ali_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ self->tx_fifo.ptr++;
+ self->tx_fifo.len--;
+
+ /* Any frames to be sent back-to-back? */
+ if (self->tx_fifo.len)
+ {
+ ali_ircc_dma_xmit(self);
+
+ /* Not finished yet! */
+ ret = FALSE;
+ }
+ else
+ { /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ }
+
+ /* Make sure we have room for more frames */
+ if (self->tx_fifo.free < MAX_TX_WINDOW) {
+ /* Not busy transmitting anymore */
+ /* Tell the network layer, that we can accept more frames */
+ netif_wake_queue(self->netdev);
+ }
+
+ switch_bank(iobase, BANK0);
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
+{
+ int iobase, tmp;
+
+
+ iobase = self->io.fir_base;
+
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ /* Reset Message Count */
+ switch_bank(iobase, BANK0);
+ outb(0x07, iobase+FIR_LSR);
+
+ self->rcvFramesOverflow = FALSE;
+
+ self->LineStatus = inb(iobase+FIR_LSR) ;
+
+ /* Reset Rx FIFO info */
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Rx FIFO */
+ // switch_bank(iobase, BANK0);
+ outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
+
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Set Receive Mode,Brick Wall */
+ //switch_bank(iobase, BANK0);
+ tmp = inb(iobase+FIR_LCR_B);
+ outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
+ pr_debug("%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
+
+ /* Set Rx Threshold */
+ switch_bank(iobase, BANK1);
+ outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
+ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* Enable DMA and Burst Mode */
+ // switch_bank(iobase, BANK1);
+ outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
+
+ switch_bank(iobase, BANK0);
+ return 0;
+}
+
+static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ __u8 status, MessageCount;
+ int len, i, iobase, val;
+
+ st_fifo = &self->st_fifo;
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK0);
+ MessageCount = inb(iobase+ FIR_LSR)&0x07;
+
+ if (MessageCount > 0)
+ pr_debug("%s(), Message count = %d\n", __func__, MessageCount);
+
+ for (i=0; i<=MessageCount; i++)
+ {
+ /* Bank 0 */
+ switch_bank(iobase, BANK0);
+ status = inb(iobase+FIR_LSR);
+
+ switch_bank(iobase, BANK2);
+ len = inb(iobase+FIR_RX_DSR_HI) & 0x0f;
+ len = len << 8;
+ len |= inb(iobase+FIR_RX_DSR_LO);
+
+ pr_debug("%s(), RX Length = 0x%.2x,\n", __func__ , len);
+ pr_debug("%s(), RX Status = 0x%.2x,\n", __func__ , status);
+
+ if (st_fifo->tail >= MAX_RX_WINDOW) {
+ pr_debug("%s(), window is full!\n", __func__);
+ continue;
+ }
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+
+ for (i=0; i<=MessageCount; i++)
+ {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->pending_bytes -= len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
+ {
+ pr_debug("%s(), ************* RX Errors ************\n",
+ __func__);
+
+ /* Skip frame */
+ self->netdev->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & LSR_FIFO_UR)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ************* FIFO Errors ************\n",
+ __func__);
+ }
+ if (status & LSR_FRAME_ERROR)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ************* FRAME Errors ************\n",
+ __func__);
+ }
+
+ if (status & LSR_CRC_ERROR)
+ {
+ self->netdev->stats.rx_crc_errors++;
+ pr_debug("%s(), ************* CRC Errors ************\n",
+ __func__);
+ }
+
+ if(self->rcvFramesOverflow)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ************* Overran DMA buffer ************\n",
+ __func__);
+ }
+ if(len == 0)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ********** Receive Frame Size = 0 *********\n",
+ __func__);
+ }
+ }
+ else
+ {
+
+ if (st_fifo->pending_bytes < 32)
+ {
+ switch_bank(iobase, BANK0);
+ val = inb(iobase+FIR_BSR);
+ if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
+ {
+ pr_debug("%s(), ************* BSR_FIFO_NOT_EMPTY ************\n",
+ __func__);
+
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->pending_bytes += len;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+
+ /*
+ * DMA not finished yet, so try again
+ * later, set timer value, resolution
+ * 500 us
+ */
+
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM
+
+ /* Enable Timer */
+ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
+
+ return FALSE; /* I'll be back! */
+ }
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ self->stamp = ktime_get();
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL)
+ {
+ self->netdev->stats.rx_dropped++;
+
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC, CRC is removed by hardware*/
+ skb_put(skb, len);
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len);
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ }
+ }
+
+ switch_bank(iobase, BANK0);
+
+ return TRUE;
+}
+
+
+
+/*
+ * Function ali_ircc_sir_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __u32 speed;
+
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ iobase = self->io.sir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Note : you should make sure that speed changes are not going
+ * to corrupt any outgoing frame. Look at nsc-ircc for the gory
+ * details - Jean II */
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ ali_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->netdev->stats.tx_bytes += self->tx_buff.len;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, iobase+UART_IER);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Function ali_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ pr_debug("%s(), SIOCSBANDWIDTH\n", __func__);
+ /*
+ * This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ if (!in_interrupt() && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ali_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ pr_debug("%s(), SIOCSMEDIABUSY\n", __func__);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ pr_debug("%s(), SIOCGRECEIVING\n", __func__);
+ /* This is protected */
+ irq->ifr_receiving = ali_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
+{
+ unsigned long flags;
+ int status = FALSE;
+ int iobase;
+
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ if (self->io.speed > 115200)
+ {
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK1);
+ if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
+ {
+ /* We are receiving something */
+ pr_debug("%s(), We are receiving something\n",
+ __func__);
+ status = TRUE;
+ }
+ switch_bank(iobase, BANK0);
+ }
+ else
+ {
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+ }
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+
+ return status;
+}
+
+static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct ali_ircc_cb *self = platform_get_drvdata(dev);
+
+ net_info_ratelimited("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
+
+ if (self->io.suspended)
+ return 0;
+
+ ali_ircc_net_close(self->netdev);
+
+ self->io.suspended = 1;
+
+ return 0;
+}
+
+static int ali_ircc_resume(struct platform_device *dev)
+{
+ struct ali_ircc_cb *self = platform_get_drvdata(dev);
+
+ if (!self->io.suspended)
+ return 0;
+
+ ali_ircc_net_open(self->netdev);
+
+ net_info_ratelimited("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
+
+ self->io.suspended = 0;
+
+ return 0;
+}
+
+/* ALi Chip Function */
+
+static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
+{
+
+ unsigned char newMask;
+
+ int iobase = self->io.fir_base; /* or sir_base */
+
+ pr_debug("%s(), -------- Start -------- ( Enable = %d )\n",
+ __func__, enable);
+
+ /* Enable the interrupt which we wish to */
+ if (enable){
+ if (self->io.direction == IO_XMIT)
+ {
+ if (self->io.speed > 115200) /* FIR, MIR */
+ {
+ newMask = self->ier;
+ }
+ else /* SIR */
+ {
+ newMask = UART_IER_THRI | UART_IER_RDI;
+ }
+ }
+ else {
+ if (self->io.speed > 115200) /* FIR, MIR */
+ {
+ newMask = self->ier;
+ }
+ else /* SIR */
+ {
+ newMask = UART_IER_RDI;
+ }
+ }
+ }
+ else /* Disable all the interrupts */
+ {
+ newMask = 0x00;
+
+ }
+
+ //SIR and FIR has different registers
+ if (self->io.speed > 115200)
+ {
+ switch_bank(iobase, BANK0);
+ outb(newMask, iobase+FIR_IER);
+ }
+ else
+ outb(newMask, iobase+UART_IER);
+
+}
+
+static void SIR2FIR(int iobase)
+{
+ //unsigned char tmp;
+
+
+ /* Already protected (change_speed() or setup()), no need to lock.
+ * Jean II */
+
+ outb(0x28, iobase+UART_MCR);
+ outb(0x68, iobase+UART_MCR);
+ outb(0x88, iobase+UART_MCR);
+
+ outb(0x60, iobase+FIR_MCR); /* Master Reset */
+ outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
+
+ //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */
+ //tmp |= 0x20;
+ //outb(tmp, iobase+FIR_LCR_B);
+
+}
+
+static void FIR2SIR(int iobase)
+{
+ unsigned char val;
+
+
+ /* Already protected (change_speed() or setup()), no need to lock.
+ * Jean II */
+
+ outb(0x20, iobase+FIR_MCR); /* IRQ to low */
+ outb(0x00, iobase+UART_IER);
+
+ outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */
+ outb(0x00, iobase+UART_FCR);
+ outb(0x07, iobase+UART_FCR);
+
+ val = inb(iobase+UART_RX);
+ val = inb(iobase+UART_LSR);
+ val = inb(iobase+UART_MSR);
+
+}
+
+MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
+MODULE_DESCRIPTION("ALi FIR Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME);
+
+
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+module_param_hw_array(dma, int, dma, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channels");
+
+module_init(ali_ircc_init);
+module_exit(ali_ircc_cleanup);
diff --git a/drivers/staging/irda/drivers/ali-ircc.h b/drivers/staging/irda/drivers/ali-ircc.h
new file mode 100644
index 000000000000..c2d9747a5108
--- /dev/null
+++ b/drivers/staging/irda/drivers/ali-ircc.h
@@ -0,0 +1,227 @@
+/*********************************************************************
+ *
+ * Filename: ali-ircc.h
+ * Version: 0.5
+ * Description: Driver for the ALI M1535D and M1543C FIR Controller
+ * Status: Experimental.
+ * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Created at: 2000/10/16 03:46PM
+ * Modified at: 2001/1/3 02:56PM
+ * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
+ *
+ * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef ALI_IRCC_H
+#define ALI_IRCC_H
+
+#include <linux/ktime.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* SIR Register */
+/* Usr definition of linux/serial_reg.h */
+
+/* FIR Register */
+#define BANK0 0x20
+#define BANK1 0x21
+#define BANK2 0x22
+#define BANK3 0x23
+
+#define FIR_MCR 0x07 /* Master Control Register */
+
+/* Bank 0 */
+#define FIR_DR 0x00 /* Alias 0, FIR Data Register (R/W) */
+#define FIR_IER 0x01 /* Alias 1, FIR Interrupt Enable Register (R/W) */
+#define FIR_IIR 0x02 /* Alias 2, FIR Interrupt Identification Register (Read only) */
+#define FIR_LCR_A 0x03 /* Alias 3, FIR Line Control Register A (R/W) */
+#define FIR_LCR_B 0x04 /* Alias 4, FIR Line Control Register B (R/W) */
+#define FIR_LSR 0x05 /* Alias 5, FIR Line Status Register (R/W) */
+#define FIR_BSR 0x06 /* Alias 6, FIR Bus Status Register (Read only) */
+
+
+ /* Alias 1 */
+ #define IER_FIFO 0x10 /* FIR FIFO Interrupt Enable */
+ #define IER_TIMER 0x20 /* Timer Interrupt Enable */
+ #define IER_EOM 0x40 /* End of Message Interrupt Enable */
+ #define IER_ACT 0x80 /* Active Frame Interrupt Enable */
+
+ /* Alias 2 */
+ #define IIR_FIFO 0x10 /* FIR FIFO Interrupt */
+ #define IIR_TIMER 0x20 /* Timer Interrupt */
+ #define IIR_EOM 0x40 /* End of Message Interrupt */
+ #define IIR_ACT 0x80 /* Active Frame Interrupt */
+
+ /* Alias 3 */
+ #define LCR_A_FIFO_RESET 0x80 /* FIFO Reset */
+
+ /* Alias 4 */
+ #define LCR_B_BW 0x10 /* Brick Wall */
+ #define LCR_B_SIP 0x20 /* SIP Enable */
+ #define LCR_B_TX_MODE 0x40 /* Transmit Mode */
+ #define LCR_B_RX_MODE 0x80 /* Receive Mode */
+
+ /* Alias 5 */
+ #define LSR_FIR_LSA 0x00 /* FIR Line Status Address */
+ #define LSR_FRAME_ABORT 0x08 /* Frame Abort */
+ #define LSR_CRC_ERROR 0x10 /* CRC Error */
+ #define LSR_SIZE_ERROR 0x20 /* Size Error */
+ #define LSR_FRAME_ERROR 0x40 /* Frame Error */
+ #define LSR_FIFO_UR 0x80 /* FIFO Underrun */
+ #define LSR_FIFO_OR 0x80 /* FIFO Overrun */
+
+ /* Alias 6 */
+ #define BSR_FIFO_NOT_EMPTY 0x80 /* FIFO Not Empty */
+
+/* Bank 1 */
+#define FIR_CR 0x00 /* Alias 0, FIR Configuration Register (R/W) */
+#define FIR_FIFO_TR 0x01 /* Alias 1, FIR FIFO Threshold Register (R/W) */
+#define FIR_DMA_TR 0x02 /* Alias 2, FIR DMA Threshold Register (R/W) */
+#define FIR_TIMER_IIR 0x03 /* Alias 3, FIR Timer interrupt interval register (W/O) */
+#define FIR_FIFO_FR 0x03 /* Alias 3, FIR FIFO Flag register (R/O) */
+#define FIR_FIFO_RAR 0x04 /* Alias 4, FIR FIFO Read Address register (R/O) */
+#define FIR_FIFO_WAR 0x05 /* Alias 5, FIR FIFO Write Address register (R/O) */
+#define FIR_TR 0x06 /* Alias 6, Test REgister (W/O) */
+
+ /* Alias 0 */
+ #define CR_DMA_EN 0x01 /* DMA Enable */
+ #define CR_DMA_BURST 0x02 /* DMA Burst Mode */
+ #define CR_TIMER_EN 0x08 /* Timer Enable */
+
+ /* Alias 3 */
+ #define TIMER_IIR_500 0x00 /* 500 us */
+ #define TIMER_IIR_1ms 0x01 /* 1 ms */
+ #define TIMER_IIR_2ms 0x02 /* 2 ms */
+ #define TIMER_IIR_4ms 0x03 /* 4 ms */
+
+/* Bank 2 */
+#define FIR_IRDA_CR 0x00 /* Alias 0, IrDA Control Register (R/W) */
+#define FIR_BOF_CR 0x01 /* Alias 1, BOF Count Register (R/W) */
+#define FIR_BW_CR 0x02 /* Alias 2, Brick Wall Count Register (R/W) */
+#define FIR_TX_DSR_HI 0x03 /* Alias 3, TX Data Size Register (high) (R/W) */
+#define FIR_TX_DSR_LO 0x04 /* Alias 4, TX Data Size Register (low) (R/W) */
+#define FIR_RX_DSR_HI 0x05 /* Alias 5, RX Data Size Register (high) (R/W) */
+#define FIR_RX_DSR_LO 0x06 /* Alias 6, RX Data Size Register (low) (R/W) */
+
+ /* Alias 0 */
+ #define IRDA_CR_HDLC1152 0x80 /* 1.152Mbps HDLC Select */
+ #define IRDA_CR_CRC 0X40 /* CRC Select. */
+ #define IRDA_CR_HDLC 0x20 /* HDLC select. */
+ #define IRDA_CR_HP_MODE 0x10 /* HP mode (read only) */
+ #define IRDA_CR_SD_ST 0x08 /* SD/MODE State. */
+ #define IRDA_CR_FIR_SIN 0x04 /* FIR SIN Select. */
+ #define IRDA_CR_ITTX_0 0x02 /* SOUT State. IRTX force to 0 */
+ #define IRDA_CR_ITTX_1 0x03 /* SOUT State. IRTX force to 1 */
+
+/* Bank 3 */
+#define FIR_ID_VR 0x00 /* Alias 0, FIR ID Version Register (R/O) */
+#define FIR_MODULE_CR 0x01 /* Alias 1, FIR Module Control Register (R/W) */
+#define FIR_IO_BASE_HI 0x02 /* Alias 2, FIR Higher I/O Base Address Register (R/O) */
+#define FIR_IO_BASE_LO 0x03 /* Alias 3, FIR Lower I/O Base Address Register (R/O) */
+#define FIR_IRQ_CR 0x04 /* Alias 4, FIR IRQ Channel Register (R/O) */
+#define FIR_DMA_CR 0x05 /* Alias 5, FIR DMA Channel Register (R/O) */
+
+struct ali_chip {
+ char *name;
+ int cfg[2];
+ unsigned char entr1;
+ unsigned char entr2;
+ unsigned char cid_index;
+ unsigned char cid_value;
+ int (*probe)(struct ali_chip *chip, chipio_t *info);
+ int (*init)(struct ali_chip *chip, chipio_t *info);
+};
+typedef struct ali_chip ali_chip_t;
+
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+#define TX_FIFO_Threshold 8
+#define RX_FIFO_Threshold 1
+#define TX_DMA_Threshold 1
+#define RX_DMA_Threshold 1
+
+/* For storing entries in the status FIFO */
+
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Length of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+/* Private data for each instance */
+struct ali_ircc_cb {
+
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ __u8 InterruptID; /* Interrupt ID */
+ __u8 BusStatus; /* Bus Status */
+ __u8 LineStatus; /* Line Status */
+
+ unsigned char rcvFramesOverflow;
+
+ ktime_t stamp;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ unsigned char fifo_opti_buf;
+};
+
+static inline void switch_bank(int iobase, int bank)
+{
+ outb(bank, iobase+FIR_MCR);
+}
+
+#endif /* ALI_IRCC_H */
diff --git a/drivers/staging/irda/drivers/au1k_ir.c b/drivers/staging/irda/drivers/au1k_ir.c
new file mode 100644
index 000000000000..be4ea6aa57a9
--- /dev/null
+++ b/drivers/staging/irda/drivers/au1k_ir.c
@@ -0,0 +1,989 @@
+/*
+ * Alchemy Semi Au1000 IrDA driver
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* registers */
+#define IR_RING_PTR_STATUS 0x00
+#define IR_RING_BASE_ADDR_H 0x04
+#define IR_RING_BASE_ADDR_L 0x08
+#define IR_RING_SIZE 0x0C
+#define IR_RING_PROMPT 0x10
+#define IR_RING_ADDR_CMPR 0x14
+#define IR_INT_CLEAR 0x18
+#define IR_CONFIG_1 0x20
+#define IR_SIR_FLAGS 0x24
+#define IR_STATUS 0x28
+#define IR_READ_PHY_CONFIG 0x2C
+#define IR_WRITE_PHY_CONFIG 0x30
+#define IR_MAX_PKT_LEN 0x34
+#define IR_RX_BYTE_CNT 0x38
+#define IR_CONFIG_2 0x3C
+#define IR_ENABLE 0x40
+
+/* Config1 */
+#define IR_RX_INVERT_LED (1 << 0)
+#define IR_TX_INVERT_LED (1 << 1)
+#define IR_ST (1 << 2)
+#define IR_SF (1 << 3)
+#define IR_SIR (1 << 4)
+#define IR_MIR (1 << 5)
+#define IR_FIR (1 << 6)
+#define IR_16CRC (1 << 7)
+#define IR_TD (1 << 8)
+#define IR_RX_ALL (1 << 9)
+#define IR_DMA_ENABLE (1 << 10)
+#define IR_RX_ENABLE (1 << 11)
+#define IR_TX_ENABLE (1 << 12)
+#define IR_LOOPBACK (1 << 14)
+#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
+ IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
+ IR_16CRC)
+
+/* ir_status */
+#define IR_RX_STATUS (1 << 9)
+#define IR_TX_STATUS (1 << 10)
+#define IR_PHYEN (1 << 15)
+
+/* ir_write_phy_config */
+#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
+#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
+#define IR_P(x) ((x) & 0x1f) /* preamble bits */
+
+/* Config2 */
+#define IR_MODE_INV (1 << 0)
+#define IR_ONE_PIN (1 << 1)
+#define IR_PHYCLK_40MHZ (0 << 2)
+#define IR_PHYCLK_48MHZ (1 << 2)
+#define IR_PHYCLK_56MHZ (2 << 2)
+#define IR_PHYCLK_64MHZ (3 << 2)
+#define IR_DP (1 << 4)
+#define IR_DA (1 << 5)
+#define IR_FLT_HIGH (0 << 6)
+#define IR_FLT_MEDHI (1 << 6)
+#define IR_FLT_MEDLO (2 << 6)
+#define IR_FLT_LO (3 << 6)
+#define IR_IEN (1 << 8)
+
+/* ir_enable */
+#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
+#define IR_CE (1 << 2) /* clock enable */
+#define IR_C (1 << 1) /* coherency bit */
+#define IR_BE (1 << 0) /* set in big endian mode */
+
+#define NUM_IR_DESC 64
+#define RING_SIZE_4 0x0
+#define RING_SIZE_16 0x3
+#define RING_SIZE_64 0xF
+#define MAX_NUM_IR_DESC 64
+#define MAX_BUF_SIZE 2048
+
+/* Ring descriptor flags */
+#define AU_OWN (1 << 7) /* tx,rx */
+#define IR_DIS_CRC (1 << 6) /* tx */
+#define IR_BAD_CRC (1 << 5) /* tx */
+#define IR_NEED_PULSE (1 << 4) /* tx */
+#define IR_FORCE_UNDER (1 << 3) /* tx */
+#define IR_DISABLE_TX (1 << 2) /* tx */
+#define IR_HW_UNDER (1 << 0) /* tx */
+#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
+
+#define IR_PHY_ERROR (1 << 6) /* rx */
+#define IR_CRC_ERROR (1 << 5) /* rx */
+#define IR_MAX_LEN (1 << 4) /* rx */
+#define IR_FIFO_OVER (1 << 3) /* rx */
+#define IR_SIR_ERROR (1 << 2) /* rx */
+#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
+ IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
+
+struct db_dest {
+ struct db_dest *pnext;
+ volatile u32 *vaddr;
+ dma_addr_t dma_addr;
+};
+
+struct ring_dest {
+ u8 count_0; /* 7:0 */
+ u8 count_1; /* 12:8 */
+ u8 reserved;
+ u8 flags;
+ u8 addr_0; /* 7:0 */
+ u8 addr_1; /* 15:8 */
+ u8 addr_2; /* 23:16 */
+ u8 addr_3; /* 31:24 */
+};
+
+/* Private data for each instance */
+struct au1k_private {
+ void __iomem *iobase;
+ int irq_rx, irq_tx;
+
+ struct db_dest *pDBfree;
+ struct db_dest db[2 * NUM_IR_DESC];
+ volatile struct ring_dest *rx_ring[NUM_IR_DESC];
+ volatile struct ring_dest *tx_ring[NUM_IR_DESC];
+ struct db_dest *rx_db_inuse[NUM_IR_DESC];
+ struct db_dest *tx_db_inuse[NUM_IR_DESC];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ iobuff_t rx_buff;
+
+ struct net_device *netdev;
+ struct qos_info qos;
+ struct irlap_cb *irlap;
+
+ u8 open;
+ u32 speed;
+ u32 newspeed;
+
+ struct resource *ioarea;
+ struct au1k_irda_platform_data *platdata;
+ struct clk *irda_clk;
+};
+
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
+{
+ if (p->platdata && p->platdata->set_phy_mode)
+ p->platdata->set_phy_mode(mode);
+}
+
+static inline unsigned long irda_read(struct au1k_private *p,
+ unsigned long ofs)
+{
+ /*
+ * IrDA peripheral bug. You have to read the register
+ * twice to get the right value.
+ */
+ (void)__raw_readl(p->iobase + ofs);
+ return __raw_readl(p->iobase + ofs);
+}
+
+static inline void irda_write(struct au1k_private *p, unsigned long ofs,
+ unsigned long val)
+{
+ __raw_writel(val, p->iobase + ofs);
+ wmb();
+}
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static struct db_dest *GetFreeDB(struct au1k_private *aup)
+{
+ struct db_dest *db;
+ db = aup->pDBfree;
+
+ if (db)
+ aup->pDBfree = db->pnext;
+ return db;
+}
+
+/*
+ DMA memory allocation, derived from pci_alloc_consistent.
+ However, the Au1000 data cache is coherent (when programmed
+ so), therefore we return KSEG0 address, not KSEG1.
+*/
+static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ ret = (void *)KSEG0ADDR(ret);
+ }
+ return ret;
+}
+
+static void dma_free(void *vaddr, size_t size)
+{
+ vaddr = (void *)KSEG0ADDR(vaddr);
+ free_pages((unsigned long) vaddr, get_order(size));
+}
+
+
+static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
+{
+ int i;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->rx_ring[i] = (volatile struct ring_dest *)
+ (rx_base + sizeof(struct ring_dest) * i);
+ }
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->tx_ring[i] = (volatile struct ring_dest *)
+ (tx_base + sizeof(struct ring_dest) * i);
+ }
+}
+
+static int au1k_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int au1k_irda_set_speed(struct net_device *dev, int speed)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *ptxd;
+ unsigned long control;
+ int ret = 0, timeout = 10, i;
+
+ if (speed == aup->speed)
+ return ret;
+
+ /* disable PHY first */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
+
+ /* disable RX/TX */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
+ msleep(20);
+ while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
+ msleep(20);
+ if (!timeout--) {
+ printk(KERN_ERR "%s: rx/tx disable timeout\n",
+ dev->name);
+ break;
+ }
+ }
+
+ /* disable DMA */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
+ msleep(20);
+
+ /* After we disable tx/rx. the index pointers go back to zero. */
+ aup->tx_head = aup->tx_tail = aup->rx_head = 0;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->tx_ring[i];
+ ptxd->flags = 0;
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ }
+
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->rx_ring[i];
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ ptxd->flags = AU_OWN;
+ }
+
+ if (speed == 4000000)
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
+ else
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
+
+ switch (speed) {
+ case 9600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 19200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 38400:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 57600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 115200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 4000000:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
+ irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
+ IR_RX_ENABLE);
+ break;
+ default:
+ printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ ret = -EINVAL;
+ break;
+ }
+
+ aup->speed = speed;
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
+
+ control = irda_read(aup, IR_STATUS);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ if (control & (1 << 14)) {
+ printk(KERN_ERR "%s: configuration error\n", dev->name);
+ } else {
+ if (control & (1 << 11))
+ printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ if (control & (1 << 12))
+ printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ if (control & (1 << 13))
+ printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ if (control & (1 << 10))
+ printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ if (control & (1 << 9))
+ printk(KERN_DEBUG "%s RX enabled\n", dev->name);
+ }
+
+ return ret;
+}
+
+static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+
+ if (status & IR_RX_ERROR) {
+ ps->rx_errors++;
+ if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
+ ps->rx_missed_errors++;
+ if (status & IR_MAX_LEN)
+ ps->rx_length_errors++;
+ if (status & IR_CRC_ERROR)
+ ps->rx_crc_errors++;
+ } else
+ ps->rx_bytes += count;
+}
+
+static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->tx_packets++;
+ ps->tx_bytes += pkt_len;
+
+ if (status & IR_TX_ERROR) {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+}
+
+static void au1k_tx_ack(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *ptxd;
+
+ ptxd = aup->tx_ring[aup->tx_tail];
+ while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
+ update_tx_stats(dev, ptxd->flags,
+ (ptxd->count_1 << 8) | ptxd->count_0);
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ wmb();
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
+ ptxd = aup->tx_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+
+ if (aup->tx_tail == aup->tx_head) {
+ if (aup->newspeed) {
+ au1k_irda_set_speed(dev, aup->newspeed);
+ aup->newspeed = 0;
+ } else {
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
+ }
+ }
+}
+
+static int au1k_irda_rx(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *prxd;
+ struct sk_buff *skb;
+ struct db_dest *pDB;
+ u32 flags, count;
+
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ while (!(flags & AU_OWN)) {
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ count = (prxd->count_1 << 8) | prxd->count_0;
+ if (!(flags & IR_RX_ERROR)) {
+ /* good frame */
+ update_rx_stats(dev, flags, count);
+ skb = alloc_skb(count + 1, GFP_ATOMIC);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ if (aup->speed == 4000000)
+ skb_put(skb, count);
+ else
+ skb_put(skb, count - 2);
+ skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
+ count - 2);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ prxd->count_0 = 0;
+ prxd->count_1 = 0;
+ }
+ prxd->flags |= AU_OWN;
+ aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ /* next descriptor */
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ }
+ return 0;
+}
+
+static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
+
+ au1k_irda_rx(dev);
+ au1k_tx_ack(dev);
+
+ return IRQ_HANDLED;
+}
+
+static int au1k_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ u32 enable, ring_address, phyck;
+ struct clk *c;
+ int i;
+
+ c = clk_get(NULL, "irda_clk");
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ i = clk_prepare_enable(c);
+ if (i) {
+ clk_put(c);
+ return i;
+ }
+
+ switch (clk_get_rate(c)) {
+ case 40000000:
+ phyck = IR_PHYCLK_40MHZ;
+ break;
+ case 48000000:
+ phyck = IR_PHYCLK_48MHZ;
+ break;
+ case 56000000:
+ phyck = IR_PHYCLK_56MHZ;
+ break;
+ case 64000000:
+ phyck = IR_PHYCLK_64MHZ;
+ break;
+ default:
+ clk_disable_unprepare(c);
+ clk_put(c);
+ return -EINVAL;
+ }
+ aup->irda_clk = c;
+
+ enable = IR_HC | IR_CE | IR_C;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ enable |= IR_BE;
+#endif
+ aup->tx_head = 0;
+ aup->tx_tail = 0;
+ aup->rx_head = 0;
+
+ for (i = 0; i < NUM_IR_DESC; i++)
+ aup->rx_ring[i]->flags = AU_OWN;
+
+ irda_write(aup, IR_ENABLE, enable);
+ msleep(20);
+
+ /* disable PHY */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
+ msleep(20);
+
+ irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
+
+ ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
+ irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
+ irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
+
+ irda_write(aup, IR_RING_SIZE,
+ (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
+
+ irda_write(aup, IR_CONFIG_2, phyck | IR_ONE_PIN);
+ irda_write(aup, IR_RING_ADDR_CMPR, 0);
+
+ au1k_irda_set_speed(dev, 9600);
+ return 0;
+}
+
+static int au1k_irda_start(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ char hwname[32];
+ int retval;
+
+ retval = au1k_init(dev);
+ if (retval) {
+ printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
+ return retval;
+ }
+
+ retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+ retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ free_irq(aup->irq_tx, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
+ /* Give self a hardware name */
+ sprintf(hwname, "Au1000 SIR/FIR");
+ aup->irlap = irlap_open(dev, &aup->qos, hwname);
+ netif_start_queue(dev);
+
+ /* int enable */
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
+
+ /* power up */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
+
+ return 0;
+}
+
+static int au1k_irda_stop(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+
+ /* disable interrupts */
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
+ irda_write(aup, IR_CONFIG_1, 0);
+ irda_write(aup, IR_ENABLE, 0); /* disable clock */
+
+ if (aup->irlap) {
+ irlap_close(aup->irlap);
+ aup->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+ /* disable the interrupt */
+ free_irq(aup->irq_tx, dev);
+ free_irq(aup->irq_rx, dev);
+
+ clk_disable_unprepare(aup->irda_clk);
+ clk_put(aup->irda_clk);
+
+ return 0;
+}
+
+/*
+ * Au1000 transmit routine.
+ */
+static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+ volatile struct ring_dest *ptxd;
+ struct db_dest *pDB;
+ u32 len, flags;
+
+ if (speed != aup->speed && speed != -1)
+ aup->newspeed = speed;
+
+ if ((skb->len == 0) && (aup->newspeed)) {
+ if (aup->tx_tail == aup->tx_head) {
+ au1k_irda_set_speed(dev, speed);
+ aup->newspeed = 0;
+ }
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ ptxd = aup->tx_ring[aup->tx_head];
+ flags = ptxd->flags;
+
+ if (flags & AU_OWN) {
+ printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
+ printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+
+#if 0
+ if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
+ printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
+ irda_read(aup, IR_RX_BYTE_CNT));
+ }
+#endif
+
+ if (aup->speed == 4000000) {
+ /* FIR */
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
+ ptxd->count_0 = skb->len & 0xff;
+ ptxd->count_1 = (skb->len >> 8) & 0xff;
+ } else {
+ /* SIR */
+ len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
+ ptxd->count_0 = len & 0xff;
+ ptxd->count_1 = (len >> 8) & 0xff;
+ ptxd->flags |= IR_DIS_CRC;
+ }
+ ptxd->flags |= AU_OWN;
+ wmb();
+
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1k_tx_timeout(struct net_device *dev)
+{
+ u32 speed;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: tx timeout\n", dev->name);
+ speed = aup->speed;
+ aup->speed = 0;
+ au1k_irda_set_speed(dev, speed);
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+}
+
+static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct au1k_private *aup = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (aup->open)
+ ret = au1k_irda_set_speed(dev,
+ rq->ifr_baudrate);
+ else {
+ printk(KERN_ERR "%s ioctl: !netif_running\n",
+ dev->name);
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static const struct net_device_ops au1k_irda_netdev_ops = {
+ .ndo_open = au1k_irda_start,
+ .ndo_stop = au1k_irda_stop,
+ .ndo_start_xmit = au1k_irda_hard_xmit,
+ .ndo_tx_timeout = au1k_tx_timeout,
+ .ndo_do_ioctl = au1k_irda_ioctl,
+};
+
+static int au1k_irda_net_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct db_dest *pDB, *pDBfree;
+ int i, err, retval = 0;
+ dma_addr_t temp;
+
+ err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
+ if (err)
+ goto out1;
+
+ dev->netdev_ops = &au1k_irda_netdev_ops;
+
+ irda_init_max_qos_capabilies(&aup->qos);
+
+ /* The only value we must override it the baudrate */
+ aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
+ IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
+
+ aup->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&aup->qos);
+
+ retval = -ENOMEM;
+
+ /* Tx ring follows rx ring + 512 bytes */
+ /* we need a 1k aligned buffer */
+ aup->rx_ring[0] = (struct ring_dest *)
+ dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
+ &temp);
+ if (!aup->rx_ring[0])
+ goto out2;
+
+ /* allocate the data buffers */
+ aup->db[0].vaddr =
+ dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ if (!aup->db[0].vaddr)
+ goto out3;
+
+ setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+
+ pDBfree = NULL;
+ pDB = aup->db;
+ for (i = 0; i < (2 * NUM_IR_DESC); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr =
+ (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ /* attach a data buffer to each descriptor */
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->tx_ring[i]->count_0 = 0;
+ aup->tx_ring[i]->count_1 = 0;
+ aup->tx_ring[i]->flags = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ return 0;
+
+out3:
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+out2:
+ kfree(aup->rx_buff.head);
+out1:
+ printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
+ return retval;
+}
+
+static int au1k_irda_probe(struct platform_device *pdev)
+{
+ struct au1k_private *aup;
+ struct net_device *dev;
+ struct resource *r;
+ struct clk *c;
+ int err;
+
+ dev = alloc_irdadev(sizeof(struct au1k_private));
+ if (!dev)
+ return -ENOMEM;
+
+ aup = netdev_priv(dev);
+
+ aup->platdata = pdev->dev.platform_data;
+
+ err = -EINVAL;
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r)
+ goto out;
+
+ aup->irq_tx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!r)
+ goto out;
+
+ aup->irq_rx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ goto out;
+
+ err = -EBUSY;
+ aup->ioarea = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!aup->ioarea)
+ goto out;
+
+ /* bail out early if clock doesn't exist */
+ c = clk_get(NULL, "irda_clk");
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto out;
+ }
+ clk_put(c);
+
+ aup->iobase = ioremap_nocache(r->start, resource_size(r));
+ if (!aup->iobase)
+ goto out2;
+
+ dev->irq = aup->irq_rx;
+
+ err = au1k_irda_net_init(dev);
+ if (err)
+ goto out3;
+ err = register_netdev(dev);
+ if (err)
+ goto out4;
+
+ platform_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ return 0;
+
+out4:
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+out3:
+ iounmap(aup->iobase);
+out2:
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+out:
+ free_netdev(dev);
+ return err;
+}
+
+static int au1k_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1k_private *aup = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+
+ iounmap(aup->iobase);
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1k_irda_driver = {
+ .driver = {
+ .name = "au1000-irda",
+ },
+ .probe = au1k_irda_probe,
+ .remove = au1k_irda_remove,
+};
+
+module_platform_driver(au1k_irda_driver);
+
+MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
+MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
diff --git a/drivers/staging/irda/drivers/bfin_sir.c b/drivers/staging/irda/drivers/bfin_sir.c
new file mode 100644
index 000000000000..3151b580dbd6
--- /dev/null
+++ b/drivers/staging/irda/drivers/bfin_sir.c
@@ -0,0 +1,817 @@
+/*
+ * Blackfin Infra-red Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ */
+#include "bfin_sir.h"
+
+#ifdef CONFIG_SIR_BFIN_DMA
+#define DMA_SIR_RX_XCNT 10
+#define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
+#define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
+#endif
+
+#if ANOMALY_05000447
+static int max_rate = 57600;
+#else
+static int max_rate = 115200;
+#endif
+
+static void turnaround_delay(int mtt)
+{
+ long ticks;
+
+ mtt = mtt < 10000 ? 10000 : mtt;
+ ticks = 1 + mtt / (USEC_PER_SEC / HZ);
+ schedule_timeout_uninterruptible(ticks);
+}
+
+static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
+{
+ int i;
+ struct resource *res;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = &pdev->resource[i];
+ switch (res->flags) {
+ case IORESOURCE_MEM:
+ sp->membase = (void __iomem *)res->start;
+ break;
+ case IORESOURCE_IRQ:
+ sp->irq = res->start;
+ break;
+ case IORESOURCE_DMA:
+ sp->rx_dma_channel = res->start;
+ sp->tx_dma_channel = res->end;
+ break;
+ default:
+ break;
+ }
+ }
+
+ sp->clk = get_sclk();
+#ifdef CONFIG_SIR_BFIN_DMA
+ sp->tx_done = 1;
+ init_timer(&(sp->rx_dma_timer));
+#endif
+}
+
+static void bfin_sir_stop_tx(struct bfin_sir_port *port)
+{
+#ifdef CONFIG_SIR_BFIN_DMA
+ disable_dma(port->tx_dma_channel);
+#endif
+
+ while (!(UART_GET_LSR(port) & THRE)) {
+ cpu_relax();
+ continue;
+ }
+
+ UART_CLEAR_IER(port, ETBEI);
+}
+
+static void bfin_sir_enable_tx(struct bfin_sir_port *port)
+{
+ UART_SET_IER(port, ETBEI);
+}
+
+static void bfin_sir_stop_rx(struct bfin_sir_port *port)
+{
+ UART_CLEAR_IER(port, ERBFI);
+}
+
+static void bfin_sir_enable_rx(struct bfin_sir_port *port)
+{
+ UART_SET_IER(port, ERBFI);
+}
+
+static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
+{
+ int ret = -EINVAL;
+ unsigned int quot;
+ unsigned short val, lsr, lcr;
+ static int utime;
+ int count = 10;
+
+ lcr = WLS(8);
+
+ switch (speed) {
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+
+ /*
+ * IRDA is not affected by anomaly 05000230, so there is no
+ * need to tweak the divisor like he UART driver (which will
+ * slightly speed up the baud rate on us).
+ */
+ quot = (port->clk + (8 * speed)) / (16 * speed);
+
+ do {
+ udelay(utime);
+ lsr = UART_GET_LSR(port);
+ } while (!(lsr & TEMT) && count--);
+
+ /* The useconds for 1 bits to transmit */
+ utime = 1000000 / speed + 1;
+
+ /* Clear UCEN bit to reset the UART state machine
+ * and control registers
+ */
+ val = UART_GET_GCTL(port);
+ val &= ~UCEN;
+ UART_PUT_GCTL(port, val);
+
+ /* Set DLAB in LCR to Access THR RBR IER */
+ UART_SET_DLAB(port);
+ SSYNC();
+
+ UART_PUT_DLL(port, quot & 0xFF);
+ UART_PUT_DLH(port, (quot >> 8) & 0xFF);
+ SSYNC();
+
+ /* Clear DLAB in LCR */
+ UART_CLEAR_DLAB(port);
+ SSYNC();
+
+ UART_PUT_LCR(port, lcr);
+
+ val = UART_GET_GCTL(port);
+ val |= UCEN;
+ UART_PUT_GCTL(port, val);
+
+ ret = 0;
+ break;
+ default:
+ printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
+ break;
+ }
+
+ val = UART_GET_GCTL(port);
+ /* If not add the 'RPOLC', we can't catch the receive interrupt.
+ * It's related with the HW layout and the IR transiver.
+ */
+ val |= UMOD_IRDA | RPOLC;
+ UART_PUT_GCTL(port, val);
+ return ret;
+}
+
+static int bfin_sir_is_receiving(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (!(UART_GET_IER(port) & ERBFI))
+ return 0;
+ return self->rx_buff.state != OUTSIDE_FRAME;
+}
+
+#ifdef CONFIG_SIR_BFIN_PIO
+static void bfin_sir_tx_chars(struct net_device *dev)
+{
+ unsigned int chr;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (self->tx_buff.len != 0) {
+ chr = *(self->tx_buff.data);
+ UART_PUT_CHAR(port, chr);
+ self->tx_buff.data++;
+ self->tx_buff.len--;
+ } else {
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_stop_tx(port);
+ bfin_sir_enable_rx(port);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ }
+}
+
+static void bfin_sir_rx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned char ch;
+
+ UART_CLEAR_LSR(port);
+ ch = UART_GET_CHAR(port);
+ async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
+}
+
+static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ while ((UART_GET_LSR(port) & DR))
+ bfin_sir_rx_chars(dev);
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ if (UART_GET_LSR(port) & THRE)
+ bfin_sir_tx_chars(dev);
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SIR_BFIN_PIO */
+
+#ifdef CONFIG_SIR_BFIN_DMA
+static void bfin_sir_dma_tx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (!port->tx_done)
+ return;
+ port->tx_done = 0;
+
+ if (self->tx_buff.len == 0) {
+ self->stats.tx_packets++;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_enable_rx(port);
+ port->tx_done = 1;
+ netif_wake_queue(dev);
+ return;
+ }
+
+ blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
+ (unsigned long)(self->tx_buff.data+self->tx_buff.len));
+ set_dma_config(port->tx_dma_channel,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
+ INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_start_addr(port->tx_dma_channel,
+ (unsigned long)(self->tx_buff.data));
+ set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
+ set_dma_x_modify(port->tx_dma_channel, 1);
+ enable_dma(port->tx_dma_channel);
+}
+
+static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
+ clear_dma_irqstat(port->tx_dma_channel);
+ bfin_sir_stop_tx(port);
+
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.len;
+ self->tx_buff.len = 0;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_enable_rx(port);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ port->tx_done = 1;
+ }
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void bfin_sir_dma_rx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int i;
+
+ UART_CLEAR_LSR(port);
+
+ for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
+ async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
+}
+
+void bfin_sir_rx_dma_timeout(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int x_pos, pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
+ if (x_pos == DMA_SIR_RX_XCNT)
+ x_pos = 0;
+
+ pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
+
+ if (pos > port->rx_dma_buf.tail) {
+ port->rx_dma_buf.tail = pos;
+ bfin_sir_dma_rx_chars(dev);
+ port->rx_dma_buf.head = port->rx_dma_buf.tail;
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned short irqstat;
+
+ spin_lock(&self->lock);
+
+ port->rx_dma_nrows++;
+ port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
+ bfin_sir_dma_rx_chars(dev);
+ if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
+ port->rx_dma_nrows = 0;
+ port->rx_dma_buf.tail = 0;
+ }
+ port->rx_dma_buf.head = port->rx_dma_buf.tail;
+
+ irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
+ clear_dma_irqstat(port->rx_dma_channel);
+ spin_unlock(&self->lock);
+
+ mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SIR_BFIN_DMA */
+
+static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
+{
+#ifdef CONFIG_SIR_BFIN_DMA
+ dma_addr_t dma_handle;
+#endif /* CONFIG_SIR_BFIN_DMA */
+
+ if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
+ dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
+ return -EBUSY;
+ }
+
+ if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
+ dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
+ free_dma(port->rx_dma_channel);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_SIR_BFIN_DMA
+
+ set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
+ set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
+
+ port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_handle, GFP_DMA);
+ port->rx_dma_buf.head = 0;
+ port->rx_dma_buf.tail = 0;
+ port->rx_dma_nrows = 0;
+
+ set_dma_config(port->rx_dma_channel,
+ set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
+ INTR_ON_ROW, DIMENSION_2D,
+ DATA_SIZE_8, DMA_SYNC_RESTART));
+ set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
+ set_dma_x_modify(port->rx_dma_channel, 1);
+ set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
+ set_dma_y_modify(port->rx_dma_channel, 1);
+ set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
+ enable_dma(port->rx_dma_channel);
+
+ port->rx_dma_timer.data = (unsigned long)(dev);
+ port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
+
+#else
+
+ if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
+ dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
+ return -EBUSY;
+ }
+
+ if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
+ dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
+ free_irq(port->irq, dev);
+ return -EBUSY;
+ }
+#endif
+
+ return 0;
+}
+
+static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
+{
+ unsigned short val;
+
+ bfin_sir_stop_rx(port);
+
+ val = UART_GET_GCTL(port);
+ val &= ~(UCEN | UMOD_MASK | RPOLC);
+ UART_PUT_GCTL(port, val);
+
+#ifdef CONFIG_SIR_BFIN_DMA
+ disable_dma(port->tx_dma_channel);
+ disable_dma(port->rx_dma_channel);
+ del_timer(&(port->rx_dma_timer));
+ dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
+#else
+ free_irq(port->irq+1, dev);
+ free_irq(port->irq, dev);
+#endif
+ free_dma(port->tx_dma_channel);
+ free_dma(port->rx_dma_channel);
+}
+
+#ifdef CONFIG_PM
+static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ if (self->open) {
+ flush_work(&self->work);
+ bfin_sir_shutdown(self->sir_port, dev);
+ netif_device_detach(dev);
+ }
+
+ return 0;
+}
+static int bfin_sir_resume(struct platform_device *pdev)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+ struct bfin_sir_port *port;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ port = self->sir_port;
+ if (self->open) {
+ if (self->newspeed) {
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_startup(port, dev);
+ bfin_sir_set_speed(port, 9600);
+ bfin_sir_enable_rx(port);
+ netif_device_attach(dev);
+ }
+ return 0;
+}
+#else
+#define bfin_sir_suspend NULL
+#define bfin_sir_resume NULL
+#endif
+
+static void bfin_sir_send_work(struct work_struct *work)
+{
+ struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
+ struct net_device *dev = self->sir_port->dev;
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned short val;
+ int tx_cnt = 10;
+
+ while (bfin_sir_is_receiving(dev) && --tx_cnt)
+ turnaround_delay(self->mtt);
+
+ bfin_sir_stop_rx(port);
+
+ /* To avoid losting RX interrupt, we reset IR function before
+ * sending data. We also can set the speed, which will
+ * reset all the UART.
+ */
+ val = UART_GET_GCTL(port);
+ val &= ~(UMOD_MASK | RPOLC);
+ UART_PUT_GCTL(port, val);
+ SSYNC();
+ val |= UMOD_IRDA | RPOLC;
+ UART_PUT_GCTL(port, val);
+ SSYNC();
+ /* bfin_sir_set_speed(port, self->speed); */
+
+#ifdef CONFIG_SIR_BFIN_DMA
+ bfin_sir_dma_tx_chars(dev);
+#endif
+ bfin_sir_enable_tx(port);
+ netif_trans_update(dev);
+}
+
+static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ netif_stop_queue(dev);
+
+ self->mtt = irda_get_mtt(skb);
+
+ if (speed != self->speed && speed != -1)
+ self->newspeed = speed;
+
+ self->tx_buff.data = self->tx_buff.head;
+ if (skb->len == 0)
+ self->tx_buff.len = 0;
+ else
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
+
+ schedule_work(&self->work);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ if (self->open) {
+ ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
+ bfin_sir_enable_rx(port);
+ } else {
+ dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = bfin_sir_is_receiving(dev);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+
+ return &self->stats;
+}
+
+static int bfin_sir_open(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int err;
+
+ self->newspeed = 0;
+ self->speed = 9600;
+
+ spin_lock_init(&self->lock);
+
+ err = bfin_sir_startup(port, dev);
+ if (err)
+ goto err_startup;
+
+ bfin_sir_set_speed(port, 9600);
+
+ self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENOMEM;
+ goto err_irlap;
+ }
+
+ INIT_WORK(&self->work, bfin_sir_send_work);
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ self->open = 1;
+ bfin_sir_enable_rx(port);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_irlap:
+ self->open = 0;
+ bfin_sir_shutdown(port, dev);
+err_startup:
+ return err;
+}
+
+static int bfin_sir_stop(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+
+ flush_work(&self->work);
+ bfin_sir_shutdown(self->sir_port, dev);
+
+ if (self->rxskb) {
+ dev_kfree_skb(self->rxskb);
+ self->rxskb = NULL;
+ }
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+ self->open = 0;
+
+ return 0;
+}
+
+static int bfin_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ return 0;
+}
+
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
+static int bfin_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+ unsigned int baudrate_mask;
+ struct bfin_sir_port *sir_port;
+ int err;
+
+ if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
+ per[pdev->id][3] == pdev->id) {
+ err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
+ if (err)
+ return err;
+ } else {
+ dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
+ return -ENODEV;
+ }
+
+ err = -ENOMEM;
+ sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
+ if (!sir_port)
+ goto err_mem_0;
+
+ bfin_sir_init_ports(sir_port, pdev);
+
+ dev = alloc_irdadev(sizeof(*self));
+ if (!dev)
+ goto err_mem_1;
+
+ self = netdev_priv(dev);
+ self->dev = &pdev->dev;
+ self->sir_port = sir_port;
+ sir_port->dev = dev;
+
+ err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
+ if (err)
+ goto err_mem_2;
+ err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_3;
+
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ baudrate_mask = IR_9600;
+
+ switch (max_rate) {
+ case 115200:
+ baudrate_mask |= IR_115200;
+ case 57600:
+ baudrate_mask |= IR_57600;
+ case 38400:
+ baudrate_mask |= IR_38400;
+ case 19200:
+ baudrate_mask |= IR_19200;
+ case 9600:
+ break;
+ default:
+ dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
+ }
+
+ self->qos.baud_rate.bits &= baudrate_mask;
+
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(dev);
+
+ if (err) {
+ kfree(self->tx_buff.head);
+err_mem_3:
+ kfree(self->rx_buff.head);
+err_mem_2:
+ free_netdev(dev);
+err_mem_1:
+ kfree(sir_port);
+err_mem_0:
+ peripheral_free_list(per[pdev->id]);
+ } else
+ platform_set_drvdata(pdev, sir_port);
+
+ return err;
+}
+
+static int bfin_sir_remove(struct platform_device *pdev)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev = NULL;
+ struct bfin_sir_self *self;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ unregister_netdev(dev);
+ kfree(self->tx_buff.head);
+ kfree(self->rx_buff.head);
+ free_netdev(dev);
+ kfree(sir_port);
+
+ return 0;
+}
+
+static struct platform_driver bfin_ir_driver = {
+ .probe = bfin_sir_probe,
+ .remove = bfin_sir_remove,
+ .suspend = bfin_sir_suspend,
+ .resume = bfin_sir_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(bfin_ir_driver);
+
+module_param(max_rate, int, 0);
+MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
+
+MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
+MODULE_DESCRIPTION("Blackfin IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/bfin_sir.h b/drivers/staging/irda/drivers/bfin_sir.h
new file mode 100644
index 000000000000..d47cf14bb4a5
--- /dev/null
+++ b/drivers/staging/irda/drivers/bfin_sir.h
@@ -0,0 +1,93 @@
+/*
+ * Blackfin Infra-red Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ */
+
+#include <linux/serial.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/irq.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#undef DRIVER_NAME
+
+#ifdef CONFIG_SIR_BFIN_DMA
+struct dma_rx_buf {
+ char *buf;
+ int head;
+ int tail;
+};
+#endif
+
+struct bfin_sir_port {
+ unsigned char __iomem *membase;
+ unsigned int irq;
+ unsigned int lsr;
+ unsigned long clk;
+ struct net_device *dev;
+#ifdef CONFIG_SIR_BFIN_DMA
+ int tx_done;
+ struct dma_rx_buf rx_dma_buf;
+ struct timer_list rx_dma_timer;
+ int rx_dma_nrows;
+#endif
+ unsigned int tx_dma_channel;
+ unsigned int rx_dma_channel;
+};
+
+struct bfin_sir_port_res {
+ unsigned long base_addr;
+ int irq;
+ unsigned int rx_dma_channel;
+ unsigned int tx_dma_channel;
+};
+
+struct bfin_sir_self {
+ struct bfin_sir_port *sir_port;
+ spinlock_t lock;
+ unsigned int open;
+ int speed;
+ int newspeed;
+
+ struct sk_buff *txskb;
+ struct sk_buff *rxskb;
+ struct net_device_stats stats;
+ struct device *dev;
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ struct work_struct work;
+ int mtt;
+};
+
+#define DRIVER_NAME "bfin_sir"
+
+#include <asm/bfin_serial.h>
+
+static const unsigned short per[][4] = {
+ /* rx pin tx pin NULL uart_number */
+ {P_UART0_RX, P_UART0_TX, 0, 0},
+ {P_UART1_RX, P_UART1_TX, 0, 1},
+ {P_UART2_RX, P_UART2_TX, 0, 2},
+ {P_UART3_RX, P_UART3_TX, 0, 3},
+};
diff --git a/drivers/staging/irda/drivers/donauboe.c b/drivers/staging/irda/drivers/donauboe.c
new file mode 100644
index 000000000000..b337e6d23a88
--- /dev/null
+++ b/drivers/staging/irda/drivers/donauboe.c
@@ -0,0 +1,1732 @@
+/*****************************************************************
+ *
+ * Filename: donauboe.c
+ * Version: 2.17
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Documentation: http://libxg.free.fr/irda/lib-irda.html
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: Paul Bristow <paul.bristow@technologist.com>
+ * Modified: Mon Nov 11 19:10:05 1999
+ * Modified: James McKenzie <james@fishsoup.dhs.org>
+ * Modified: Thu Mar 16 12:49:00 2000 (Substantial rewrite)
+ * Modified: Sat Apr 29 00:23:03 2000 (Added DONAUOBOE support)
+ * Modified: Wed May 24 23:45:02 2000 (Fixed chipio_t structure)
+ * Modified: 2.13 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.13 dim jan 07 21:57:39 2001 (tested with kernel 2.4 & irnet/ppp)
+ * Modified: 2.14 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.14 lun fev 05 17:55:59 2001 (adapted to patch-2.4.1-pre8-irda1)
+ * Modified: 2.15 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.15 Fri Jun 21 20:40:59 2002 (sync with 2.4.18, substantial fixes)
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (fix freeregion, default to verbose)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (save_flags();cli(); replaced by spinlocks)
+ * Modified: 2.18 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.18 ven jan 10 03:14:16 2003 Change probe default options
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ ********************************************************************/
+
+/* Look at toshoboe.h (currently in include/net/irda) for details of */
+/* Where to get documentation on the chip */
+
+/* See below for a description of the logic in this driver */
+
+/* User servicable parts */
+/* USE_PROBE Create the code which probes the chip and does a few tests */
+/* do_probe module parameter Enable this code */
+/* Probe code is very useful for understanding how the hardware works */
+/* Use it with various combinations of TT_LEN, RX_LEN */
+/* Strongly recommended, disable if the probe fails on your machine */
+/* and send me <james@fishsoup.dhs.org> the output of dmesg */
+#define USE_PROBE 1
+#undef USE_PROBE
+
+/* Trace Transmit ring, interrupts, Receive ring or not ? */
+#define PROBE_VERBOSE 1
+
+/* Debug option, examine sent and received raw data */
+/* Irdadump is better, but does not see all packets. enable it if you want. */
+#undef DUMP_PACKETS
+
+/* MIR mode has not been tested. Some behaviour is different */
+/* Seems to work against an Ericsson R520 for me. -Martin */
+#define USE_MIR
+
+/* Schedule back to back hardware transmits wherever possible, otherwise */
+/* we need an interrupt for every frame, unset if oboe works for a bit and */
+/* then hangs */
+#define OPTIMIZE_TX
+
+/* Set the number of slots in the rings */
+/* If you get rx/tx fifo overflows at high bitrates, you can try increasing */
+/* these */
+
+#define RING_SIZE (OBOE_RING_SIZE_RX8 | OBOE_RING_SIZE_TX8)
+#define TX_SLOTS 8
+#define RX_SLOTS 8
+
+
+/* Less user servicable parts below here */
+
+/* Test, Transmit and receive buffer sizes, adjust at your peril */
+/* remarks: nfs usually needs 1k blocks */
+/* remarks: in SIR mode, CRC is received, -> RX_LEN=TX_LEN+2 */
+/* remarks: test accepts large blocks. Standard is 0x80 */
+/* When TT_LEN > RX_LEN (SIR mode) data is stored in successive slots. */
+/* When 3 or more slots are needed for each test packet, */
+/* data received in the first slots is overwritten, even */
+/* if OBOE_CTL_RX_HW_OWNS is not set, without any error! */
+#define TT_LEN 0x80
+#define TX_LEN 0xc00
+#define RX_LEN 0xc04
+/* Real transmitted length (SIR mode) is about 14+(2%*TX_LEN) more */
+/* long than user-defined length (see async_wrap_skb) and is less then 4K */
+/* Real received length is (max RX_LEN) differs from user-defined */
+/* length only b the CRC (2 or 4 bytes) */
+#define BUF_SAFETY 0x7a
+#define RX_BUF_SZ (RX_LEN)
+#define TX_BUF_SZ (TX_LEN+BUF_SAFETY)
+
+
+/* Logic of the netdev part of this driver */
+
+/* The RX ring is filled with buffers, when a packet arrives */
+/* it is DMA'd into the buffer which is marked used and RxDone called */
+/* RxDone forms an skb (and checks the CRC if in SIR mode) and ships */
+/* the packet off upstairs */
+
+/* The transmitter on the oboe chip can work in one of two modes */
+/* for each ring->tx[] the transmitter can either */
+/* a) transmit the packet, leave the trasmitter enabled and proceed to */
+/* the next ring */
+/* OR */
+/* b) transmit the packet, switch off the transmitter and issue TxDone */
+
+/* All packets are entered into the ring in mode b), if the ring was */
+/* empty the transmitter is started. */
+
+/* If OPTIMIZE_TX is defined then in TxDone if the ring contains */
+/* more than one packet, all but the last are set to mode a) [HOWEVER */
+/* the hardware may not notice this, this is why we start in mode b) ] */
+/* then restart the transmitter */
+
+/* If OPTIMIZE_TX is not defined then we just restart the transmitter */
+/* if the ring isn't empty */
+
+/* Speed changes are delayed until the TxRing is empty */
+/* mtt is handled by generating packets with bad CRCs, before the data */
+
+/* TODO: */
+/* check the mtt works ok */
+/* finish the watchdog */
+
+/* No user servicable parts below here */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include <asm/io.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+//#include <net/irda/irmod.h>
+//#include <net/irda/irlap_frame.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/crc.h>
+
+#include "donauboe.h"
+
+#define INB(port) inb_p(port)
+#define OUTB(val,port) outb_p(val,port)
+#define OUTBP(val,port) outb_p(val,port)
+
+#define PROMPT OUTB(OBOE_PROMPT_BIT,OBOE_PROMPT);
+
+#if PROBE_VERBOSE
+#define PROBE_DEBUG(args...) (printk (args))
+#else
+#define PROBE_DEBUG(args...) ;
+#endif
+
+/* Set the DMA to be byte at a time */
+#define CONFIG0H_DMA_OFF OBOE_CONFIG0H_RCVANY
+#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
+#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
+
+static const struct pci_device_id toshoboe_pci_tbl[] = {
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, toshoboe_pci_tbl);
+
+#define DRIVER_NAME "toshoboe"
+static char *driver_name = DRIVER_NAME;
+
+static int max_baud = 4000000;
+#ifdef USE_PROBE
+static bool do_probe = false;
+#endif
+
+
+/**********************************************************************/
+static int
+toshoboe_checkfcs (unsigned char *buf, int len)
+{
+ int i;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ fcs.value = INIT_FCS;
+
+ for (i = 0; i < len; ++i)
+ fcs.value = irda_fcs (fcs.value, *(buf++));
+
+ return fcs.value == GOOD_FCS;
+}
+
+/***********************************************************************/
+/* Generic chip handling code */
+#ifdef DUMP_PACKETS
+static unsigned char dump[50];
+static void
+_dumpbufs (unsigned char *data, int len, char tete)
+{
+int i,j;
+char head=tete;
+for (i=0;i<len;i+=16) {
+ for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
+ dump [3*j]=0;
+ pr_debug("%c%s\n", head, dump);
+ head='+';
+ }
+}
+#endif
+
+#ifdef USE_PROBE
+/* Dump the registers */
+static void
+toshoboe_dumpregs (struct toshoboe_cb *self)
+{
+ __u32 ringbase;
+
+ ringbase = INB (OBOE_RING_BASE0) << 10;
+ ringbase |= INB (OBOE_RING_BASE1) << 18;
+ ringbase |= INB (OBOE_RING_BASE2) << 26;
+
+ printk (KERN_ERR DRIVER_NAME ": Register dump:\n");
+ printk (KERN_ERR "Interrupts: Tx:%d Rx:%d TxUnder:%d RxOver:%d Sip:%d\n",
+ self->int_tx, self->int_rx, self->int_txunder, self->int_rxover,
+ self->int_sip);
+ printk (KERN_ERR "RX %02x TX %02x RingBase %08x\n",
+ INB (OBOE_RXSLOT), INB (OBOE_TXSLOT), ringbase);
+ printk (KERN_ERR "RING_SIZE %02x IER %02x ISR %02x\n",
+ INB (OBOE_RING_SIZE), INB (OBOE_IER), INB (OBOE_ISR));
+ printk (KERN_ERR "CONFIG1 %02x STATUS %02x\n",
+ INB (OBOE_CONFIG1), INB (OBOE_STATUS));
+ printk (KERN_ERR "CONFIG0 %02x%02x ENABLE %02x%02x\n",
+ INB (OBOE_CONFIG0H), INB (OBOE_CONFIG0L),
+ INB (OBOE_ENABLEH), INB (OBOE_ENABLEL));
+ printk (KERN_ERR "NEW_PCONFIG %02x%02x CURR_PCONFIG %02x%02x\n",
+ INB (OBOE_NEW_PCONFIGH), INB (OBOE_NEW_PCONFIGL),
+ INB (OBOE_CURR_PCONFIGH), INB (OBOE_CURR_PCONFIGL));
+ printk (KERN_ERR "MAXLEN %02x%02x RXCOUNT %02x%02x\n",
+ INB (OBOE_MAXLENH), INB (OBOE_MAXLENL),
+ INB (OBOE_RXCOUNTL), INB (OBOE_RXCOUNTH));
+
+ if (self->ring)
+ {
+ int i;
+ ringbase = virt_to_bus (self->ring);
+ printk (KERN_ERR "Ring at %08x:\n", ringbase);
+ printk (KERN_ERR "RX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ printk ("\n");
+ printk (KERN_ERR "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ printk ("\n");
+ }
+}
+#endif
+
+/*Don't let the chip look at memory */
+static void
+toshoboe_disablebm (struct toshoboe_cb *self)
+{
+ __u8 command;
+ pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_byte (self->pdev, PCI_COMMAND, command);
+
+}
+
+/* Shutdown the chip and point the taskfile reg somewhere else */
+static void
+toshoboe_stopchip (struct toshoboe_cb *self)
+{
+ /*Disable interrupts */
+ OUTB (0x0, OBOE_IER);
+ /*Disable DMA, Disable Rx, Disable Tx */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ /*Disable SIR MIR FIR, Tx and Rx */
+ OUTB (0x00, OBOE_ENABLEH);
+ /*Point the ring somewhere safe */
+ OUTB (0x3f, OBOE_RING_BASE2);
+ OUTB (0xff, OBOE_RING_BASE1);
+ OUTB (0xff, OBOE_RING_BASE0);
+
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Why */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*switch it off */
+ OUTB (OBOE_CONFIG1_OFF, OBOE_CONFIG1);
+
+ toshoboe_disablebm (self);
+}
+
+/* Transmitter initialization */
+static void
+toshoboe_start_DMA (struct toshoboe_cb *self, int opts)
+{
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON | opts, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+}
+
+/*Set the baud rate */
+static void
+toshoboe_setbaud (struct toshoboe_cb *self)
+{
+ __u16 pconfig = 0;
+ __u8 config0l = 0;
+
+ pr_debug("%s(%d/%d)\n", __func__, self->speed, self->io.speed);
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+#ifdef USE_MIR
+ case 1152000:
+#endif
+ case 4000000:
+ break;
+ default:
+
+ printk (KERN_ERR DRIVER_NAME ": switch to unsupported baudrate %d\n",
+ self->speed);
+ return;
+ }
+
+ switch (self->speed)
+ {
+ /* For SIR the preamble is done by adding XBOFs */
+ /* to the packet */
+ /* set to filtered SIR mode, filter looks for BOF and EOF */
+ case 2400:
+ pconfig |= 47 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 4800:
+ pconfig |= 23 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 9600:
+ pconfig |= 11 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 19200:
+ pconfig |= 5 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 38400:
+ pconfig |= 2 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 57600:
+ pconfig |= 1 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 115200:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ default:
+ /*Set to packet based reception */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ break;
+ }
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ config0l = OBOE_CONFIG0L_ENSIR;
+ if (self->async)
+ {
+ /*Set to character based reception */
+ /*System will lock if MAXLEN=0 */
+ /*so have to be careful */
+ OUTB (0x01, OBOE_MAXLENH);
+ OUTB (0x01, OBOE_MAXLENL);
+ OUTB (0x00, OBOE_MAXLENH);
+ }
+ else
+ {
+ /*Set to packet based reception */
+ config0l |= OBOE_CONFIG0L_ENSIRF;
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ }
+ break;
+
+#ifdef USE_MIR
+ /* MIR mode */
+ /* Set for 16 bit CRC and enable MIR */
+ /* Preamble now handled by the chip */
+ case 1152000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 8 << OBOE_PCONFIG_WIDTHSHIFT;
+ pconfig |= 1 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_CRC16 | OBOE_CONFIG0L_ENMIR;
+ break;
+#endif
+ /* FIR mode */
+ /* Set for 32 bit CRC and enable FIR */
+ /* Preamble handled by the chip */
+ case 4000000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ /* Documentation says 14, but toshiba use 15 in their drivers */
+ pconfig |= 15 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_ENFIR;
+ break;
+ }
+
+ /* Copy into new PHY config buffer */
+ OUTBP (pconfig >> 8, OBOE_NEW_PCONFIGH);
+ OUTB (pconfig & 0xff, OBOE_NEW_PCONFIGL);
+ OUTB (config0l, OBOE_CONFIG0L);
+
+ /* Now make OBOE copy from new PHY to current PHY */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+
+ /* speed change executed */
+ self->new_speed = 0;
+ self->io.speed = self->speed;
+}
+
+/*Let the chip look at memory */
+static void
+toshoboe_enablebm (struct toshoboe_cb *self)
+{
+ pci_set_master (self->pdev);
+}
+
+/*setup the ring */
+static void
+toshoboe_initring (struct toshoboe_cb *self)
+{
+ int i;
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->ring->tx[i].len = 0;
+ self->ring->tx[i].control = 0x00;
+ self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]);
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->ring->rx[i].len = RX_LEN;
+ self->ring->rx[i].len = 0;
+ self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]);
+ self->ring->rx[i].control = OBOE_CTL_RX_HW_OWNS;
+ }
+}
+
+static void
+toshoboe_resetptrs (struct toshoboe_cb *self)
+{
+ /* Can reset pointers by twidling DMA */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTBP (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->rxs = inb_p (OBOE_RXSLOT) & OBOE_SLOT_MASK;
+ self->txs = inb_p (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+}
+
+/* Called in locked state */
+static void
+toshoboe_initptrs (struct toshoboe_cb *self)
+{
+
+ /* spin_lock_irqsave(self->spinlock, flags); */
+ /* save_flags (flags); */
+
+ /* Can reset pointers by twidling DMA */
+ toshoboe_resetptrs (self);
+
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->txpending = 0;
+
+ /* spin_unlock_irqrestore(self->spinlock, flags); */
+ /* restore_flags (flags); */
+}
+
+/* Wake the chip up and get it looking at the rings */
+/* Called in locked state */
+static void
+toshoboe_startchip (struct toshoboe_cb *self)
+{
+ __u32 physaddr;
+
+ toshoboe_initring (self);
+ toshoboe_enablebm (self);
+ OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
+ OUTBP (OBOE_CONFIG1_ON, OBOE_CONFIG1);
+
+ /* Stop the clocks */
+ OUTB (0, OBOE_ENABLEH);
+
+ /*Set size of rings */
+ OUTB (RING_SIZE, OBOE_RING_SIZE);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Enable ints */
+ OUTB (OBOE_INT_TXDONE | OBOE_INT_RXDONE |
+ OBOE_INT_TXUNDER | OBOE_INT_RXOVER | OBOE_INT_SIP , OBOE_IER);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Set the maximum packet length to 0xfff (4095) */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Shutdown DMA */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+
+ /*Find out where the rings live */
+ physaddr = virt_to_bus (self->ring);
+
+ IRDA_ASSERT ((physaddr & 0x3ff) == 0,
+ printk (KERN_ERR DRIVER_NAME "ring not correctly aligned\n");
+ return;);
+
+ OUTB ((physaddr >> 10) & 0xff, OBOE_RING_BASE0);
+ OUTB ((physaddr >> 18) & 0xff, OBOE_RING_BASE1);
+ OUTB ((physaddr >> 26) & 0x3f, OBOE_RING_BASE2);
+
+ /*Enable DMA controller in byte mode and RX */
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+
+ /* Start up the clocks */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*set to sensible speed */
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+}
+
+static void
+toshoboe_isntstuck (struct toshoboe_cb *self)
+{
+}
+
+static void
+toshoboe_checkstuck (struct toshoboe_cb *self)
+{
+ unsigned long flags;
+
+ if (0)
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ /* This will reset the chip completely */
+ printk (KERN_ERR DRIVER_NAME ": Resetting chip\n");
+
+ toshoboe_stopchip (self);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+}
+
+/*Generate packet of about mtt us long */
+static int
+toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
+{
+ int xbofs;
+
+ xbofs = ((int) (mtt/100)) * (int) (self->speed);
+ xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
+ xbofs++;
+
+ pr_debug(DRIVER_NAME ": generated mtt of %d bytes for %d us at %d baud\n",
+ xbofs, mtt, self->speed);
+
+ if (xbofs > TX_LEN)
+ {
+ printk (KERN_ERR DRIVER_NAME ": wanted %d bytes MTT but TX_LEN is %d\n",
+ xbofs, TX_LEN);
+ xbofs = TX_LEN;
+ }
+
+ /*xbofs will do for SIR, MIR and FIR,SIR mode doesn't generate a checksum anyway */
+ memset (buf, XBOF, xbofs);
+
+ return xbofs;
+}
+
+#ifdef USE_PROBE
+/***********************************************************************/
+/* Probe code */
+
+static void
+toshoboe_dumptx (struct toshoboe_cb *self)
+{
+ int i;
+ PROBE_DEBUG(KERN_WARNING "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ PROBE_DEBUG(" [%d]\n",self->speed);
+}
+
+static void
+toshoboe_dumprx (struct toshoboe_cb *self, int score)
+{
+ int i;
+ PROBE_DEBUG(" %d\nRX:",score);
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ PROBE_DEBUG("\n");
+}
+
+static inline int
+stuff_byte (__u8 byte, __u8 * buf)
+{
+ switch (byte)
+ {
+ case BOF: /* FALLTHROUGH */
+ case EOF: /* FALLTHROUGH */
+ case CE:
+ /* Insert transparently coded */
+ buf[0] = CE; /* Send link escape */
+ buf[1] = byte ^ IRDA_TRANS; /* Complement bit 5 */
+ return 2;
+ /* break; */
+ default:
+ /* Non-special value, no transparency required */
+ buf[0] = byte;
+ return 1;
+ /* break; */
+ }
+}
+
+static irqreturn_t
+toshoboe_probeinterrupt (int irq, void *dev_id)
+{
+ struct toshoboe_cb *self = dev_id;
+ __u8 irqstat;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return IRQ_NONE;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp;
+
+ self->int_tx++;
+ PROBE_DEBUG("T");
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ self->int_tx+=100;
+ PROBE_DEBUG("S");
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+ }
+ }
+
+ if (irqstat & OBOE_INT_RXDONE) {
+ self->int_rx++;
+ PROBE_DEBUG("R"); }
+ if (irqstat & OBOE_INT_TXUNDER) {
+ self->int_txunder++;
+ PROBE_DEBUG("U"); }
+ if (irqstat & OBOE_INT_RXOVER) {
+ self->int_rxover++;
+ PROBE_DEBUG("O"); }
+ if (irqstat & OBOE_INT_SIP) {
+ self->int_sip++;
+ PROBE_DEBUG("I"); }
+ return IRQ_HANDLED;
+}
+
+static int
+toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
+{
+ int i;
+ int len = 0;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ if (fir)
+ {
+ memset (buf, 0, TT_LEN);
+ return TT_LEN;
+ }
+
+ fcs.value = INIT_FCS;
+
+ memset (buf, XBOF, 10);
+ len += 10;
+ buf[len++] = BOF;
+
+ for (i = 0; i < TT_LEN; ++i)
+ {
+ len += stuff_byte (i, buf + len);
+ fcs.value = irda_fcs (fcs.value, i);
+ }
+
+ len += stuff_byte (fcs.bytes[0] ^ badcrc, buf + len);
+ len += stuff_byte (fcs.bytes[1] ^ badcrc, buf + len);
+ buf[len++] = EOF;
+ len++;
+ return len;
+}
+
+static int
+toshoboe_probefail (struct toshoboe_cb *self, char *msg)
+{
+ printk (KERN_ERR DRIVER_NAME "probe(%d) failed %s\n",self-> speed, msg);
+ toshoboe_dumpregs (self);
+ toshoboe_stopchip (self);
+ free_irq (self->io.irq, (void *) self);
+ return 0;
+}
+
+static int
+toshoboe_numvalidrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if ((self->ring->rx[i].control & 0xe0) == 0)
+ ret++;
+
+ return ret;
+}
+
+static int
+toshoboe_numrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if (!(self->ring->rx[i].control & OBOE_CTL_RX_HW_OWNS))
+ ret++;
+
+ return ret;
+}
+
+static int
+toshoboe_probe (struct toshoboe_cb *self)
+{
+ int i, j, n;
+#ifdef USE_MIR
+ static const int bauds[] = { 9600, 115200, 4000000, 1152000 };
+#else
+ static const int bauds[] = { 9600, 115200, 4000000 };
+#endif
+ unsigned long flags;
+
+ if (request_irq (self->io.irq, toshoboe_probeinterrupt,
+ self->io.irqflags, "toshoboe", (void *) self))
+ {
+ printk (KERN_ERR DRIVER_NAME ": probe failed to allocate irq %d\n",
+ self->io.irq);
+ return 0;
+ }
+
+ /* test 1: SIR filter and back to back */
+
+ for (j = 0; j < ARRAY_SIZE(bauds); ++j)
+ {
+ int fir = (j > 1);
+ toshoboe_stopchip (self);
+
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ /*Address is already setup */
+ toshoboe_startchip (self);
+ self->int_rx = self->int_tx = 0;
+ self->speed = bauds[j];
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->ring->tx[self->txs].control =
+/* (FIR only) OBOE_CTL_TX_SIP needed for switching to next slot */
+/* MIR: all received data is stored in one slot */
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_SIP
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ | OBOE_CTL_TX_SIP | OBOE_CTL_TX_BAD_CRC
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ toshoboe_dumptx (self);
+ /* Turn on TX and RX and loopback */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ n = fir ? 1 : 4;
+ while (toshoboe_numvalidrcvs (self) != n)
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "filter test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+
+ n = fir ? 203 : 102;
+ while ((toshoboe_numrcvs(self) != self->int_rx) || (self->int_tx != n))
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "interrupt test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ }
+
+ /* test 2: SIR in char at a time */
+
+ toshoboe_stopchip (self);
+ self->int_rx = self->int_tx = 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->async = 1;
+ self->speed = 115200;
+ toshoboe_setbaud (self);
+ self->ring->tx[self->txs].control =
+ OBOE_CTL_TX_RTCENTX | OBOE_CTL_TX_HW_OWNS;
+ self->ring->tx[self->txs].len = 4;
+
+ ((unsigned char *) self->tx_bufs[self->txs])[0] = 'f';
+ ((unsigned char *) self->tx_bufs[self->txs])[1] = 'i';
+ ((unsigned char *) self->tx_bufs[self->txs])[2] = 's';
+ ((unsigned char *) self->tx_bufs[self->txs])[3] = 'h';
+ toshoboe_dumptx (self);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ while (toshoboe_numvalidrcvs (self) != 4)
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async test");
+ udelay (100);
+ i++;
+ }
+
+ while ((toshoboe_numrcvs (self) != self->int_rx) || (self->int_tx != 1))
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async interrupt test");
+ udelay (100);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ self->async = 0;
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_stopchip (self);
+
+ free_irq (self->io.irq, (void *) self);
+
+ printk (KERN_WARNING DRIVER_NAME ": Self test passed ok\n");
+
+ return 1;
+}
+#endif
+
+/******************************************************************/
+/* Netdev style code */
+
+/* Transmit something */
+static netdev_tx_t
+toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ __s32 speed;
+ int mtt, len, ctl;
+ unsigned long flags;
+ struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; );
+
+ pr_debug("%s.tx:%x(%x)%x\n",
+ __func__, skb->len, self->txpending, INB(OBOE_ENABLEH));
+ if (!cb->magic) {
+ pr_debug("%s.Not IrLAP:%x\n", __func__, cb->magic);
+#ifdef DUMP_PACKETS
+ _dumpbufs(skb->data,skb->len,'>');
+#endif
+ }
+
+ /* change speed pending, wait for its execution */
+ if (self->new_speed)
+ return NETDEV_TX_BUSY;
+
+ /* device stopped (apm) wait for restart */
+ if (self->stopped)
+ return NETDEV_TX_BUSY;
+
+ toshoboe_checkstuck (self);
+
+ /* Check if we need to change the speed */
+ /* But not now. Wait after transmission if mtt not required */
+ speed=irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1))
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending || skb->len)
+ {
+ self->new_speed = speed;
+ pr_debug("%s: Queued TxDone scheduled speed change %d\n" ,
+ __func__, speed);
+ /* if no data, that's all! */
+ if (!skb->len)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+ }
+ /* True packet, go on, but */
+ /* do not accept anything before change speed execution */
+ netif_stop_queue(dev);
+ /* ready to process TxDone interrupt */
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+ else
+ {
+ /* idle and no data, change speed now */
+ self->speed = speed;
+ toshoboe_setbaud (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+ }
+
+ }
+
+ if ((mtt = irda_get_mtt(skb)))
+ {
+ /* This is fair since the queue should be empty anyway */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* If in SIR mode we need to generate a string of XBOFs */
+ /* In MIR and FIR we need to generate a string of data */
+ /* which we will add a wrong checksum to */
+
+ mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
+ pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending);
+ if (mtt)
+ {
+ self->ring->tx[self->txs].len = mtt & 0xfff;
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC | OBOE_CTL_TX_SIP ;
+ }
+#ifdef USE_MIR
+ else if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_MIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC;
+ }
+#endif
+ self->ring->tx[self->txs].control = ctl;
+
+ OUTB (0x0, OBOE_ENABLEH);
+ /* It is only a timer. Do not send mtt packet outside! */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ }
+ else
+ {
+ printk(KERN_ERR DRIVER_NAME ": problem with mtt packet - ignored\n");
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+
+#ifdef DUMP_PACKETS
+dumpbufs(skb->data,skb->len,'>');
+#endif
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ pr_debug("%s.ful:%x(%x)%x\n",
+ __func__, skb->len, self->ring->tx[self->txs].control,
+ self->txpending);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
+ {
+ len = async_wrap_skb (skb, self->tx_bufs[self->txs], TX_BUF_SZ);
+ }
+ else
+ {
+ len = skb->len;
+ skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len);
+ }
+ self->ring->tx[self->txs].len = len & 0x0fff;
+
+ /*Sometimes the HW doesn't see us assert RTCENTX in the interrupt code */
+ /*later this plays safe, we garuntee the last packet to be transmitted */
+ /*has RTCENTX set */
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_SIP ;
+ }
+ self->ring->tx[self->txs].control = ctl;
+
+ /* If transmitter is idle start in one-shot mode */
+
+ if (!self->txpending)
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*interrupt handler */
+static irqreturn_t
+toshoboe_interrupt (int irq, void *dev_id)
+{
+ struct toshoboe_cb *self = dev_id;
+ __u8 irqstat;
+ struct sk_buff *skb = NULL;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return IRQ_NONE;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ toshoboe_isntstuck (self);
+
+/* Txdone */
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp, txpc;
+ int i;
+
+ txp = self->txpending;
+ self->txpending = 0;
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
+ self->txpending++;
+ }
+ pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending);
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+
+ /* Got anything queued ? start it together */
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txpc = txp;
+#ifdef OPTIMIZE_TX
+ while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txp = txpc;
+ txpc++;
+ txpc %= TX_SLOTS;
+ self->netdev->stats.tx_packets++;
+ if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
+ }
+ self->netdev->stats.tx_packets--;
+#else
+ self->netdev->stats.tx_packets++;
+#endif
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ }
+
+ if ((!self->txpending) && (self->new_speed))
+ {
+ self->speed = self->new_speed;
+ pr_debug("%s: Executed TxDone scheduled speed change %d\n",
+ __func__, self->speed);
+ toshoboe_setbaud (self);
+ }
+
+ /* Tell network layer that we want more frames */
+ if (!self->new_speed)
+ netif_wake_queue(self->netdev);
+ }
+
+ if (irqstat & OBOE_INT_RXDONE)
+ {
+ while (!(self->ring->rx[self->rxs].control & OBOE_CTL_RX_HW_OWNS))
+ {
+ int len = self->ring->rx[self->rxs].len;
+ skb = NULL;
+ pr_debug("%s.rcv:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
+
+#ifdef DUMP_PACKETS
+dumpbufs(self->rx_bufs[self->rxs],len,'<');
+#endif
+
+ if (self->ring->rx[self->rxs].control == 0)
+ {
+ __u8 enable = INB (OBOE_ENABLEH);
+
+ /* In SIR mode we need to check the CRC as this */
+ /* hasn't been done by the hardware */
+ if (enable & OBOE_ENABLEH_SIRON)
+ {
+ if (!toshoboe_checkfcs (self->rx_bufs[self->rxs], len))
+ len = 0;
+ /*Trim off the CRC */
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ pr_debug("%s.SIR:%x(%x)\n", __func__, len, enable);
+ }
+
+#ifdef USE_MIR
+ else if (enable & OBOE_ENABLEH_MIRON)
+ {
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ pr_debug("%s.MIR:%x(%x)\n", __func__, len, enable);
+ }
+#endif
+ else if (enable & OBOE_ENABLEH_FIRON)
+ {
+ if (len > 3)
+ len -= 4; /*FIXME: check this */
+ else
+ len = 0;
+ pr_debug("%s.FIR:%x(%x)\n", __func__, len, enable);
+ }
+ else
+ pr_debug("%s.?IR:%x(%x)\n", __func__, len, enable);
+
+ if (len)
+ {
+ skb = dev_alloc_skb (len + 1);
+ if (skb)
+ {
+ skb_reserve (skb, 1);
+
+ skb_put (skb, len);
+ skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
+ len);
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons (ETH_P_IRDA);
+ }
+ else
+ {
+ printk (KERN_INFO
+ "%s(), memory squeeze, dropping frame.\n",
+ __func__);
+ }
+ }
+ }
+ else
+ {
+ /* TODO: =========================================== */
+ /* if OBOE_CTL_RX_LENGTH, our buffers are too small */
+ /* (MIR or FIR) data is lost. */
+ /* (SIR) data is splitted in several slots. */
+ /* we have to join all the received buffers received */
+ /*in a large buffer before checking CRC. */
+ pr_debug("%s.err:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
+ }
+
+ self->ring->rx[self->rxs].len = 0x0;
+ self->ring->rx[self->rxs].control = OBOE_CTL_RX_HW_OWNS;
+
+ self->rxs++;
+ self->rxs %= RX_SLOTS;
+
+ if (skb)
+ netif_rx (skb);
+
+ }
+ }
+
+ if (irqstat & OBOE_INT_TXUNDER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n");
+ }
+ if (irqstat & OBOE_INT_RXOVER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": rx fifo overflow\n");
+ }
+/* This must be useful for something... */
+ if (irqstat & OBOE_INT_SIP)
+ {
+ self->int_sip++;
+ pr_debug("%s.sip:%x(%x)%x\n",
+ __func__, self->int_sip, irqstat, self->txpending);
+ }
+ return IRQ_HANDLED;
+}
+
+
+static int
+toshoboe_net_open (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ unsigned long flags;
+ int rc;
+
+ self = netdev_priv(dev);
+
+ if (self->async)
+ return -EBUSY;
+
+ if (self->stopped)
+ return 0;
+
+ rc = request_irq (self->io.irq, toshoboe_interrupt,
+ IRQF_SHARED, dev->name, self);
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open (dev, &self->qos, driver_name);
+
+ self->irdad = 1;
+
+ return 0;
+}
+
+static int
+toshoboe_net_close (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+ self = netdev_priv(dev);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close (self->irlap);
+ self->irlap = NULL;
+
+ self->irdad = 0;
+
+ free_irq (self->io.irq, (void *) self);
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ return 0;
+}
+
+/*
+ * Function toshoboe_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int
+toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct toshoboe_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT (self != NULL, return -1; );
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ switch (cmd)
+ {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ /* This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ pr_debug("%s(BANDWIDTH), %s, (%X/%ld\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_baudrate);
+ if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* self->speed=irq->ifr_baudrate; */
+ /* toshoboe_setbaud(self); */
+ /* Just change speed once - inserted by Paul Bristow */
+ self->new_speed = irq->ifr_baudrate;
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ pr_debug("%s(MEDIABUSY), %s, (%X/%x)\n",
+ __func__, dev->name,
+ INB(OBOE_STATUS), capable(CAP_NET_ADMIN));
+ if (!capable (CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy (self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
+ pr_debug("%s(RECEIVING), %s, (%X/%x)\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_receiving);
+ break;
+ default:
+ pr_debug("%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ ret = -EOPNOTSUPP;
+ }
+out:
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return ret;
+
+}
+
+MODULE_DESCRIPTION("Toshiba OBOE IrDA Device Driver");
+MODULE_AUTHOR("James McKenzie <james@fishsoup.dhs.org>");
+MODULE_LICENSE("GPL");
+
+module_param (max_baud, int, 0);
+MODULE_PARM_DESC(max_baud, "Maximum baud rate");
+
+#ifdef USE_PROBE
+module_param (do_probe, bool, 0);
+MODULE_PARM_DESC(do_probe, "Enable/disable chip probing and self-test");
+#endif
+
+static void
+toshoboe_close (struct pci_dev *pci_dev)
+{
+ int i;
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
+
+ IRDA_ASSERT (self != NULL, return; );
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ kfree (self->tx_bufs[i]);
+ self->tx_bufs[i] = NULL;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ kfree (self->rx_bufs[i]);
+ self->rx_bufs[i] = NULL;
+ }
+
+ unregister_netdev(self->netdev);
+
+ kfree (self->ringbuf);
+ self->ringbuf = NULL;
+ self->ring = NULL;
+
+ free_netdev(self->netdev);
+}
+
+static const struct net_device_ops toshoboe_netdev_ops = {
+ .ndo_open = toshoboe_net_open,
+ .ndo_stop = toshoboe_net_close,
+ .ndo_start_xmit = toshoboe_hard_xmit,
+ .ndo_do_ioctl = toshoboe_net_ioctl,
+};
+
+static int
+toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
+{
+ struct toshoboe_cb *self;
+ struct net_device *dev;
+ int i = 0;
+ int ok = 0;
+ int err;
+
+ if ((err=pci_enable_device(pci_dev)))
+ return err;
+
+ dev = alloc_irdadev(sizeof (struct toshoboe_cb));
+ if (dev == NULL)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate memory for "
+ "IrDA control block\n");
+ return -ENOMEM;
+ }
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ self->pdev = pci_dev;
+ self->base = pci_resource_start(pci_dev,0);
+
+ self->io.fir_base = self->base;
+ self->io.fir_ext = OBOE_IO_EXTENT;
+ self->io.irq = pci_dev->irq;
+ self->io.irqflags = IRQF_SHARED;
+
+ self->speed = self->io.speed = 9600;
+ self->async = 0;
+
+ /* Lock the port that we need */
+ if (NULL==request_region (self->io.fir_base, self->io.fir_ext, driver_name))
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't get iobase of 0x%03x\n"
+ ,self->io.fir_base);
+ err = -EBUSY;
+ goto freeself;
+ }
+
+ spin_lock_init(&self->spinlock);
+
+ irda_init_max_qos_capabilies (&self->qos);
+ self->qos.baud_rate.bits = 0;
+
+ if (max_baud >= 2400)
+ self->qos.baud_rate.bits |= IR_2400;
+ /*if (max_baud>=4800) idev->qos.baud_rate.bits|=IR_4800; */
+ if (max_baud >= 9600)
+ self->qos.baud_rate.bits |= IR_9600;
+ if (max_baud >= 19200)
+ self->qos.baud_rate.bits |= IR_19200;
+ if (max_baud >= 115200)
+ self->qos.baud_rate.bits |= IR_115200;
+#ifdef USE_MIR
+ if (max_baud >= 1152000)
+ {
+ self->qos.baud_rate.bits |= IR_1152000;
+ }
+#endif
+ if (max_baud >= 4000000)
+ {
+ self->qos.baud_rate.bits |= (IR_4000000 << 8);
+ }
+
+ /*FIXME: work this out... */
+ self->qos.min_turn_time.bits = 0xff;
+
+ irda_qos_bits_to_value (&self->qos);
+
+ /* Allocate twice the size to guarantee alignment */
+ self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL);
+ if (!self->ringbuf)
+ {
+ err = -ENOMEM;
+ goto freeregion;
+ }
+
+#if (BITS_PER_LONG == 64)
+#error broken on 64-bit: casts pointer to 32-bit, and then back to pointer.
+#endif
+
+ /*We need to align the taskfile on a taskfile size boundary */
+ {
+ unsigned long addr;
+
+ addr = (__u32) self->ringbuf;
+ addr &= ~(OBOE_RING_LEN - 1);
+ addr += OBOE_RING_LEN;
+ self->ring = (struct OboeRing *) addr;
+ }
+
+ memset (self->ring, 0, OBOE_RING_LEN);
+ self->io.mem_base = (__u32) self->ring;
+
+ ok = 1;
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->tx_bufs[i] = kmalloc (TX_BUF_SZ, GFP_KERNEL);
+ if (!self->tx_bufs[i])
+ ok = 0;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->rx_bufs[i] = kmalloc (RX_BUF_SZ, GFP_KERNEL);
+ if (!self->rx_bufs[i])
+ ok = 0;
+ }
+
+ if (!ok)
+ {
+ err = -ENOMEM;
+ goto freebufs;
+ }
+
+
+#ifdef USE_PROBE
+ if (do_probe)
+ if (!toshoboe_probe (self))
+ {
+ err = -ENODEV;
+ goto freebufs;
+ }
+#endif
+
+ SET_NETDEV_DEV(dev, &pci_dev->dev);
+ dev->netdev_ops = &toshoboe_netdev_ops;
+
+ err = register_netdev(dev);
+ if (err)
+ {
+ printk (KERN_ERR DRIVER_NAME ": register_netdev() failed\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+ printk (KERN_INFO "IrDA: Registered device %s\n", dev->name);
+
+ pci_set_drvdata(pci_dev,self);
+
+ printk (KERN_INFO DRIVER_NAME ": Using multiple tasks\n");
+
+ return 0;
+
+freebufs:
+ for (i = 0; i < TX_SLOTS; ++i)
+ kfree (self->tx_bufs[i]);
+ for (i = 0; i < RX_SLOTS; ++i)
+ kfree (self->rx_bufs[i]);
+ kfree(self->ringbuf);
+
+freeregion:
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+freeself:
+ free_netdev(dev);
+
+ return err;
+}
+
+static int
+toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
+{
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
+ unsigned long flags;
+ int i = 10;
+
+ if (!self || self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+/* Flush all packets */
+ while ((i--) && (self->txpending))
+ msleep(10);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_stopchip (self);
+ self->stopped = 1;
+ self->txpending = 0;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static int
+toshoboe_wakeup (struct pci_dev *pci_dev)
+{
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
+ unsigned long flags;
+
+ if (!self || !self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_startchip (self);
+ self->stopped = 0;
+
+ netif_wake_queue(self->netdev);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static struct pci_driver donauboe_pci_driver = {
+ .name = "donauboe",
+ .id_table = toshoboe_pci_tbl,
+ .probe = toshoboe_open,
+ .remove = toshoboe_close,
+ .suspend = toshoboe_gotosleep,
+ .resume = toshoboe_wakeup
+};
+
+module_pci_driver(donauboe_pci_driver);
diff --git a/drivers/staging/irda/drivers/donauboe.h b/drivers/staging/irda/drivers/donauboe.h
new file mode 100644
index 000000000000..d92d54e839b9
--- /dev/null
+++ b/drivers/staging/irda/drivers/donauboe.h
@@ -0,0 +1,362 @@
+/*********************************************************************
+ *
+ * Filename: toshoboe.h
+ * Version: 2.16
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (sync headers)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (add lock to be used by spinlocks)
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ * IrDA chip set list from Toshiba Computer Engineering Corp.
+ * model method maker controller Version
+ * Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
+ * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3020CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 7020CT FIR,SIR ? ?
+ *
+ * Satell. 4090XCDT FIR,SIR ? ?
+ *
+ * Libretto 100CT FIR,SIR Toshiba Oboe
+ * Libretto 1000CT FIR,SIR Toshiba Oboe
+ *
+ * TECRA750DVD FIR,SIR Toshiba Oboe(Triangle) REV ID=14h
+ * TECRA780 FIR,SIR Toshiba Oboe(Sandlot) REV ID=32h,33h
+ * TECRA750CDT FIR,SIR Toshiba Oboe(Triangle) REV ID=13h,14h
+ * TECRA8000 FIR,SIR Toshiba Oboe(ISKUR) REV ID=23h
+ *
+ ********************************************************************/
+
+/* The documentation for this chip is allegedly released */
+/* However I have not seen it, not have I managed to contact */
+/* anyone who has. HOWEVER the chip bears a striking resemblance */
+/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
+/* the documentation for this is freely available at */
+/* http://www.madingley.org/james/resources/toshoboe/TMPR3922.pdf */
+/* The mapping between the registers in that document and the */
+/* Registers in the 701 oboe chip are as follows */
+
+
+/* 3922 reg 701 regs, by bit numbers */
+/* 7- 0 15- 8 24-16 31-25 */
+/* $28 0x0 0x1 */
+/* $2c SEE NOTE 1 */
+/* $30 0x6 0x7 */
+/* $34 0x8 0x9 SEE NOTE 2 */
+/* $38 0x10 0x11 */
+/* $3C 0xe SEE NOTE 3 */
+/* $40 0x12 0x13 */
+/* $44 0x14 0x15 */
+/* $48 0x16 0x17 */
+/* $4c 0x18 0x19 */
+/* $50 0x1a 0x1b */
+
+/* FIXME: could be 0x1b 0x1a here */
+
+/* $54 0x1d 0x1c */
+/* $5C 0xf SEE NOTE 4 */
+/* $130 SEE NOTE 5 */
+/* $134 SEE NOTE 6 */
+/* */
+/* NOTES: */
+/* 1. The pointer to ring is packed in most unceremoniusly */
+/* 701 Register Address bits (A9-A0 must be zero) */
+/* 0x4: A17 A16 A15 A14 A13 A12 A11 A10 */
+/* 0x5: A25 A24 A23 A22 A21 A20 A19 A18 */
+/* 0x2: 0 0 A31 A30 A29 A28 A27 A26 */
+/* */
+/* 2. The M$ drivers do a write 0x1 to 0x9, however the 3922 */
+/* documentation would suggest that a write of 0x1 to 0x8 */
+/* would be more appropriate. */
+/* */
+/* 3. This assignment is tenuous at best, register 0xe seems to */
+/* have bits arranged 0 0 0 R/W R/W R/W R/W R/W */
+/* if either of the lower two bits are set the chip seems to */
+/* switch off */
+/* */
+/* 4. Bits 7-4 seem to be different 4 seems just to be generic */
+/* receiver busy flag */
+/* */
+/* 5. and 6. The IER and ISR have a different bit assignment */
+/* The lower three bits of both read back as ones */
+/* ISR is register 0xc, IER is register 0xd */
+/* 7 6 5 4 3 2 1 0 */
+/* 0xc: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* 0xd: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* TxDone xmitt done (generated only if generate interrupt bit */
+/* is set in the ring) */
+/* RxDone recv completed (or other recv condition if you set it */
+/* up */
+/* TxUnder underflow in Transmit FIFO */
+/* RxOver overflow in Recv FIFO */
+/* SipRcv received serial gap (or other condition you set) */
+/* Interrupts are enabled by writing a one to the IER register */
+/* Interrupts are cleared by writing a one to the ISR register */
+/* */
+/* 6. The remaining registers: 0x6 and 0x3 appear to be */
+/* reserved parts of 16 or 32 bit registersthe remainder */
+/* 0xa 0xb 0x1e 0x1f could possibly be (by their behaviour) */
+/* the Unicast Filter register at $58. */
+/* */
+/* 7. While the core obviously expects 32 bit accesses all the */
+/* M$ drivers do 8 bit accesses, infact the Miniport ones */
+/* write and read back the byte serveral times (why?) */
+
+
+#ifndef TOSHOBOE_H
+#define TOSHOBOE_H
+
+/* Registers */
+
+#define OBOE_IO_EXTENT 0x1f
+
+/*Receive and transmit slot pointers */
+#define OBOE_REG(i) (i+(self->base))
+#define OBOE_RXSLOT OBOE_REG(0x0)
+#define OBOE_TXSLOT OBOE_REG(0x1)
+#define OBOE_SLOT_MASK 0x3f
+
+#define OBOE_TXRING_OFFSET 0x200
+#define OBOE_TXRING_OFFSET_IN_SLOTS 0x40
+
+/*pointer to the ring */
+#define OBOE_RING_BASE0 OBOE_REG(0x4)
+#define OBOE_RING_BASE1 OBOE_REG(0x5)
+#define OBOE_RING_BASE2 OBOE_REG(0x2)
+#define OBOE_RING_BASE3 OBOE_REG(0x3)
+
+/*Number of slots in the ring */
+#define OBOE_RING_SIZE OBOE_REG(0x7)
+#define OBOE_RING_SIZE_RX4 0x00
+#define OBOE_RING_SIZE_RX8 0x01
+#define OBOE_RING_SIZE_RX16 0x03
+#define OBOE_RING_SIZE_RX32 0x07
+#define OBOE_RING_SIZE_RX64 0x0f
+#define OBOE_RING_SIZE_TX4 0x00
+#define OBOE_RING_SIZE_TX8 0x10
+#define OBOE_RING_SIZE_TX16 0x30
+#define OBOE_RING_SIZE_TX32 0x70
+#define OBOE_RING_SIZE_TX64 0xf0
+
+#define OBOE_RING_MAX_SIZE 64
+
+/*Causes the gubbins to re-examine the ring */
+#define OBOE_PROMPT OBOE_REG(0x9)
+#define OBOE_PROMPT_BIT 0x1
+
+/* Interrupt Status Register */
+#define OBOE_ISR OBOE_REG(0xc)
+/* Interrupt Enable Register */
+#define OBOE_IER OBOE_REG(0xd)
+/* Interrupt bits for IER and ISR */
+#define OBOE_INT_TXDONE 0x80
+#define OBOE_INT_RXDONE 0x40
+#define OBOE_INT_TXUNDER 0x20
+#define OBOE_INT_RXOVER 0x10
+#define OBOE_INT_SIP 0x08
+#define OBOE_INT_MASK 0xf8
+
+/*Reset Register */
+#define OBOE_CONFIG1 OBOE_REG(0xe)
+#define OBOE_CONFIG1_RST 0x01
+#define OBOE_CONFIG1_DISABLE 0x02
+#define OBOE_CONFIG1_4 0x08
+#define OBOE_CONFIG1_8 0x08
+
+#define OBOE_CONFIG1_ON 0x8
+#define OBOE_CONFIG1_RESET 0xf
+#define OBOE_CONFIG1_OFF 0xe
+
+#define OBOE_STATUS OBOE_REG(0xf)
+#define OBOE_STATUS_RXBUSY 0x10
+#define OBOE_STATUS_FIRRX 0x04
+#define OBOE_STATUS_MIRRX 0x02
+#define OBOE_STATUS_SIRRX 0x01
+
+
+/*Speed control registers */
+#define OBOE_CONFIG0L OBOE_REG(0x10)
+#define OBOE_CONFIG0H OBOE_REG(0x11)
+
+#define OBOE_CONFIG0H_TXONLOOP 0x80 /*Transmit when looping (dangerous) */
+#define OBOE_CONFIG0H_LOOP 0x40 /*Loopback Tx->Rx */
+#define OBOE_CONFIG0H_ENTX 0x10 /*Enable Tx */
+#define OBOE_CONFIG0H_ENRX 0x08 /*Enable Rx */
+#define OBOE_CONFIG0H_ENDMAC 0x04 /*Enable/reset* the DMA controller */
+#define OBOE_CONFIG0H_RCVANY 0x02 /*DMA mode 1=bytes, 0=dwords */
+
+#define OBOE_CONFIG0L_CRC16 0x80 /*CRC 1=16 bit 0=32 bit */
+#define OBOE_CONFIG0L_ENFIR 0x40 /*Enable FIR */
+#define OBOE_CONFIG0L_ENMIR 0x20 /*Enable MIR */
+#define OBOE_CONFIG0L_ENSIR 0x10 /*Enable SIR */
+#define OBOE_CONFIG0L_ENSIRF 0x08 /*Enable SIR framer */
+#define OBOE_CONFIG0L_SIRTEST 0x04 /*Enable SIR framer in MIR and FIR */
+#define OBOE_CONFIG0L_INVERTTX 0x02 /*Invert Tx Line */
+#define OBOE_CONFIG0L_INVERTRX 0x01 /*Invert Rx Line */
+
+#define OBOE_BOF OBOE_REG(0x12)
+#define OBOE_EOF OBOE_REG(0x13)
+
+#define OBOE_ENABLEL OBOE_REG(0x14)
+#define OBOE_ENABLEH OBOE_REG(0x15)
+
+#define OBOE_ENABLEH_PHYANDCLOCK 0x80 /*Toggle low to copy config in */
+#define OBOE_ENABLEH_CONFIGERR 0x40
+#define OBOE_ENABLEH_FIRON 0x20
+#define OBOE_ENABLEH_MIRON 0x10
+#define OBOE_ENABLEH_SIRON 0x08
+#define OBOE_ENABLEH_ENTX 0x04
+#define OBOE_ENABLEH_ENRX 0x02
+#define OBOE_ENABLEH_CRC16 0x01
+
+#define OBOE_ENABLEL_BROADCAST 0x01
+
+#define OBOE_CURR_PCONFIGL OBOE_REG(0x16) /*Current config */
+#define OBOE_CURR_PCONFIGH OBOE_REG(0x17)
+
+#define OBOE_NEW_PCONFIGL OBOE_REG(0x18)
+#define OBOE_NEW_PCONFIGH OBOE_REG(0x19)
+
+#define OBOE_PCONFIGH_BAUDMASK 0xfc
+#define OBOE_PCONFIGH_WIDTHMASK 0x04
+#define OBOE_PCONFIGL_WIDTHMASK 0xe0
+#define OBOE_PCONFIGL_PREAMBLEMASK 0x1f
+
+#define OBOE_PCONFIG_BAUDMASK 0xfc00
+#define OBOE_PCONFIG_BAUDSHIFT 10
+#define OBOE_PCONFIG_WIDTHMASK 0x04e0
+#define OBOE_PCONFIG_WIDTHSHIFT 5
+#define OBOE_PCONFIG_PREAMBLEMASK 0x001f
+#define OBOE_PCONFIG_PREAMBLESHIFT 0
+
+#define OBOE_MAXLENL OBOE_REG(0x1a)
+#define OBOE_MAXLENH OBOE_REG(0x1b)
+
+#define OBOE_RXCOUNTH OBOE_REG(0x1c) /*Reset on recipt */
+#define OBOE_RXCOUNTL OBOE_REG(0x1d) /*of whole packet */
+
+/* The PCI ID of the OBOE chip */
+#ifndef PCI_DEVICE_ID_FIR701
+#define PCI_DEVICE_ID_FIR701 0x0701
+#endif
+
+#ifndef PCI_DEVICE_ID_FIRD01
+#define PCI_DEVICE_ID_FIRD01 0x0d01
+#endif
+
+struct OboeSlot
+{
+ __u16 len; /*Tweleve bits of packet length */
+ __u8 unused;
+ __u8 control; /*Slot control/status see below */
+ __u32 address; /*Slot buffer address */
+}
+__packed;
+
+#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
+
+struct OboeRing
+{
+ struct OboeSlot rx[OBOE_NTASKS];
+ struct OboeSlot tx[OBOE_NTASKS];
+};
+
+#define OBOE_RING_LEN (sizeof(struct OboeRing))
+
+
+#define OBOE_CTL_TX_HW_OWNS 0x80 /*W/R This slot owned by the hardware */
+#define OBOE_CTL_TX_DISTX_CRC 0x40 /*W Disable CRC generation for [FM]IR */
+#define OBOE_CTL_TX_BAD_CRC 0x20 /*W Generate bad CRC */
+#define OBOE_CTL_TX_SIP 0x10 /*W Generate an SIP after xmittion */
+#define OBOE_CTL_TX_MKUNDER 0x08 /*W Generate an underrun error */
+#define OBOE_CTL_TX_RTCENTX 0x04 /*W Enable receiver and generate TXdone */
+ /* After this slot is processed */
+#define OBOE_CTL_TX_UNDER 0x01 /*R Set by hardware to indicate underrun */
+
+
+#define OBOE_CTL_RX_HW_OWNS 0x80 /*W/R This slot owned by hardware */
+#define OBOE_CTL_RX_PHYERR 0x40 /*R Decoder error on receiption */
+#define OBOE_CTL_RX_CRCERR 0x20 /*R CRC error only set for [FM]IR */
+#define OBOE_CTL_RX_LENGTH 0x10 /*R Packet > max Rx length */
+#define OBOE_CTL_RX_OVER 0x08 /*R set to indicate an overflow */
+#define OBOE_CTL_RX_SIRBAD 0x04 /*R SIR had BOF in packet or ABORT sequence */
+#define OBOE_CTL_RX_RXEOF 0x02 /*R Finished receiving on this slot */
+
+
+struct toshoboe_cb
+{
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct tty_driver ttydev;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ __u32 flags; /* Interface flags */
+
+ struct pci_dev *pdev; /*PCI device */
+ int base; /*IO base */
+
+
+ int txpending; /*how many tx's are pending */
+ int txs, rxs; /*Which slots are we at */
+
+ int irdad; /*Driver under control of netdev end */
+ int async; /*Driver under control of async end */
+
+
+ int stopped; /*Stopped by some or other APM stuff */
+
+ int filter; /*In SIR mode do we want to receive
+ frames or byte ranges */
+
+ void *ringbuf; /*The ring buffer */
+ struct OboeRing *ring; /*The ring */
+
+ void *tx_bufs[OBOE_RING_MAX_SIZE]; /*The buffers */
+ void *rx_bufs[OBOE_RING_MAX_SIZE];
+
+
+ int speed; /*Current setting of the speed */
+ int new_speed; /*Set to request a speed change */
+
+/* The spinlock protect critical parts of the driver.
+ * Locking is done like this :
+ * spin_lock_irqsave(&self->spinlock, flags);
+ * Releasing the lock :
+ * spin_unlock_irqrestore(&self->spinlock, flags);
+ */
+ spinlock_t spinlock;
+ /* Used for the probe and diagnostics code */
+ int int_rx;
+ int int_tx;
+ int int_txunder;
+ int int_rxover;
+ int int_sip;
+};
+
+
+#endif
diff --git a/drivers/staging/irda/drivers/esi-sir.c b/drivers/staging/irda/drivers/esi-sir.c
new file mode 100644
index 000000000000..019a3e848bcb
--- /dev/null
+++ b/drivers/staging/irda/drivers/esi-sir.c
@@ -0,0 +1,157 @@
+/*********************************************************************
+ *
+ * Filename: esi.c
+ * Version: 1.6
+ * Description: Driver for the Extended Systems JetEye PC dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 21 18:54:38 1998
+ * Modified at: Sun Oct 27 22:01:04 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
+ * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
+ * Copyright (c) 2002 Martin Diehl, <mad@mdiehl.de>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int esi_open(struct sir_dev *);
+static int esi_close(struct sir_dev *);
+static int esi_change_speed(struct sir_dev *, unsigned);
+static int esi_reset(struct sir_dev *);
+
+static struct dongle_driver esi = {
+ .owner = THIS_MODULE,
+ .driver_name = "JetEye PC ESI-9680 PC",
+ .type = IRDA_ESI_DONGLE,
+ .open = esi_open,
+ .close = esi_close,
+ .reset = esi_reset,
+ .set_speed = esi_change_speed,
+};
+
+static int __init esi_sir_init(void)
+{
+ return irda_register_dongle(&esi);
+}
+
+static void __exit esi_sir_cleanup(void)
+{
+ irda_unregister_dongle(&esi);
+}
+
+static int esi_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power up and set dongle to 9600 baud */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int esi_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function esi_change_speed (task)
+ *
+ * Set the speed for the Extended Systems JetEye PC ESI-9680 type dongle
+ * Apparently (see old esi-driver) no delays are needed here...
+ *
+ */
+static int esi_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int ret = 0;
+ int dtr, rts;
+
+ switch (speed) {
+ case 19200:
+ dtr = TRUE;
+ rts = FALSE;
+ break;
+ case 115200:
+ dtr = rts = TRUE;
+ break;
+ default:
+ ret = -EINVAL;
+ speed = 9600;
+ /* fall through */
+ case 9600:
+ dtr = FALSE;
+ rts = TRUE;
+ break;
+ }
+
+ /* Change speed of dongle */
+ sirdev_set_dtr_rts(dev, dtr, rts);
+ dev->speed = speed;
+
+ return ret;
+}
+
+/*
+ * Function esi_reset (task)
+ *
+ * Reset dongle;
+ *
+ */
+static int esi_reset(struct sir_dev *dev)
+{
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ /* Hm, the old esi-driver left the dongle unpowered relying on
+ * the following speed change to repower. This might work for
+ * the esi because we only need the modem lines. However, now the
+ * general rule is reset must bring the dongle to some working
+ * well-known state because speed change might write to registers.
+ * The old esi-driver didn't any delay here - let's hope it' fine.
+ */
+
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Extended Systems JetEye PC dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-1"); /* IRDA_ESI_DONGLE */
+
+module_init(esi_sir_init);
+module_exit(esi_sir_cleanup);
+
diff --git a/drivers/staging/irda/drivers/girbil-sir.c b/drivers/staging/irda/drivers/girbil-sir.c
new file mode 100644
index 000000000000..7e0a5b8c6d53
--- /dev/null
+++ b/drivers/staging/irda/drivers/girbil-sir.c
@@ -0,0 +1,252 @@
+/*********************************************************************
+ *
+ * Filename: girbil.c
+ * Version: 1.2
+ * Description: Implementation for the Greenwich GIrBIL dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 6 21:02:33 1999
+ * Modified at: Fri Dec 17 09:13:20 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int girbil_reset(struct sir_dev *dev);
+static int girbil_open(struct sir_dev *dev);
+static int girbil_close(struct sir_dev *dev);
+static int girbil_change_speed(struct sir_dev *dev, unsigned speed);
+
+/* Control register 1 */
+#define GIRBIL_TXEN 0x01 /* Enable transmitter */
+#define GIRBIL_RXEN 0x02 /* Enable receiver */
+#define GIRBIL_ECAN 0x04 /* Cancel self emitted data */
+#define GIRBIL_ECHO 0x08 /* Echo control characters */
+
+/* LED Current Register (0x2) */
+#define GIRBIL_HIGH 0x20
+#define GIRBIL_MEDIUM 0x21
+#define GIRBIL_LOW 0x22
+
+/* Baud register (0x3) */
+#define GIRBIL_2400 0x30
+#define GIRBIL_4800 0x31
+#define GIRBIL_9600 0x32
+#define GIRBIL_19200 0x33
+#define GIRBIL_38400 0x34
+#define GIRBIL_57600 0x35
+#define GIRBIL_115200 0x36
+
+/* Mode register (0x4) */
+#define GIRBIL_IRDA 0x40
+#define GIRBIL_ASK 0x41
+
+/* Control register 2 (0x5) */
+#define GIRBIL_LOAD 0x51 /* Load the new baud rate value */
+
+static struct dongle_driver girbil = {
+ .owner = THIS_MODULE,
+ .driver_name = "Greenwich GIrBIL",
+ .type = IRDA_GIRBIL_DONGLE,
+ .open = girbil_open,
+ .close = girbil_close,
+ .reset = girbil_reset,
+ .set_speed = girbil_change_speed,
+};
+
+static int __init girbil_sir_init(void)
+{
+ return irda_register_dongle(&girbil);
+}
+
+static void __exit girbil_sir_cleanup(void)
+{
+ irda_unregister_dongle(&girbil);
+}
+
+static int girbil_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int girbil_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function girbil_change_speed (dev, speed)
+ *
+ * Set the speed for the Girbil type dongle.
+ *
+ */
+
+#define GIRBIL_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[2];
+ static int ret = 0;
+
+ /* dongle alread reset - port and dongle at default speed */
+
+ switch(state) {
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ /* Set DTR and Clear RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ udelay(25); /* better wait a little while */
+
+ ret = 0;
+ switch (speed) {
+ default:
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = GIRBIL_9600;
+ break;
+ case 19200:
+ control[0] = GIRBIL_19200;
+ break;
+ case 34800:
+ control[0] = GIRBIL_38400;
+ break;
+ case 57600:
+ control[0] = GIRBIL_57600;
+ break;
+ case 115200:
+ control[0] = GIRBIL_115200;
+ break;
+ }
+ control[1] = GIRBIL_LOAD;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 2);
+
+ dev->speed = speed;
+
+ state = GIRBIL_STATE_WAIT_SPEED;
+ delay = 100;
+ break;
+
+ case GIRBIL_STATE_WAIT_SPEED:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ udelay(25); /* better wait a little while */
+ break;
+
+ default:
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function girbil_reset (driver)
+ *
+ * This function resets the girbil dongle.
+ *
+ * Algorithm:
+ * 0. set RTS, and wait at least 5 ms
+ * 1. clear RTS
+ */
+
+
+#define GIRBIL_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET + 1)
+#define GIRBIL_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET + 2)
+#define GIRBIL_STATE_WAIT3_RESET (SIRDEV_STATE_DONGLE_RESET + 3)
+
+static int girbil_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
+ int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ /* Reset dongle */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ /* Sleep at least 5 ms */
+ delay = 20;
+ state = GIRBIL_STATE_WAIT1_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT1_RESET:
+ /* Set DTR and clear RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ delay = 20;
+ state = GIRBIL_STATE_WAIT2_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT2_RESET:
+ /* Write control byte */
+ sirdev_raw_write(dev, &control, 1);
+ delay = 20;
+ state = GIRBIL_STATE_WAIT3_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT3_RESET:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ dev->speed = 9600;
+ break;
+
+ default:
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
+ ret = -1;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Greenwich GIrBIL dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-4"); /* IRDA_GIRBIL_DONGLE */
+
+module_init(girbil_sir_init);
+module_exit(girbil_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/irda-usb.c b/drivers/staging/irda/drivers/irda-usb.c
new file mode 100644
index 000000000000..723e49bc4baa
--- /dev/null
+++ b/drivers/staging/irda/drivers/irda-usb.c
@@ -0,0 +1,1914 @@
+/*****************************************************************************
+ *
+ * Filename: irda-usb.c
+ * Version: 0.10
+ * Description: IrDA-USB Driver
+ * Status: Experimental
+ * Author: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+ * Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+ * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+ * Copyright (C) 2004, SigmaTel, Inc. <irquality@sigmatel.com>
+ * Copyright (C) 2005, Milan Beno <beno@pobox.sk>
+ * Copyright (C) 2006, Nick Fedchik <nick@fedchik.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * IMPORTANT NOTE
+ * --------------
+ *
+ * As of kernel 2.5.20, this is the state of compliance and testing of
+ * this driver (irda-usb) with regards to the USB low level drivers...
+ *
+ * This driver has been tested SUCCESSFULLY with the following drivers :
+ * o usb-uhci-hcd (For Intel/Via USB controllers)
+ * o uhci-hcd (Alternate/JE driver for Intel/Via USB controllers)
+ * o ohci-hcd (For other USB controllers)
+ *
+ * This driver has NOT been tested with the following drivers :
+ * o ehci-hcd (USB 2.0 controllers)
+ *
+ * Note that all HCD drivers do URB_ZERO_PACKET and timeout properly,
+ * so we don't have to worry about that anymore.
+ * One common problem is the failure to set the address on the dongle,
+ * but this happens before the driver gets loaded...
+ *
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+
+#include "irda-usb.h"
+
+/*------------------------------------------------------------------*/
+
+static int qos_mtt_bits = 0;
+
+/* These are the currently known IrDA USB dongles. Add new dongles here */
+static const struct usb_device_id dongles[] = {
+ /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */
+ { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */
+ { USB_DEVICE(0x4428, 0x012), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* KC Technology Inc., KC-180 USB IrDA Device */
+ { USB_DEVICE(0x50f, 0x180), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* Extended Systems, Inc., XTNDAccess IrDA USB (ESI-9685) */
+ { USB_DEVICE(0x8e9, 0x100), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* SigmaTel STIR4210/4220/4116 USB IrDA (VFIR) Bridge */
+ { USB_DEVICE(0x66f, 0x4210), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
+ { USB_DEVICE(0x66f, 0x4220), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
+ { USB_DEVICE(0x66f, 0x4116), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_APP_SPEC,
+ .bInterfaceSubClass = USB_CLASS_IRDA,
+ .driver_info = IUC_DEFAULT, },
+ { }, /* The end */
+};
+
+/*
+ * Important note :
+ * Devices based on the SigmaTel chipset (0x66f, 0x4200) are not designed
+ * using the "USB-IrDA specification" (yes, there exist such a thing), and
+ * therefore not supported by this driver (don't add them above).
+ * There is a Linux driver, stir4200, that support those USB devices.
+ * Jean II
+ */
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+/*------------------------------------------------------------------*/
+
+static void irda_usb_init_qos(struct irda_usb_cb *self) ;
+static struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf);
+static void irda_usb_disconnect(struct usb_interface *intf);
+static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self);
+static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int irda_usb_open(struct irda_usb_cb *self);
+static void irda_usb_close(struct irda_usb_cb *self);
+static void speed_bulk_callback(struct urb *urb);
+static void write_bulk_callback(struct urb *urb);
+static void irda_usb_receive(struct urb *urb);
+static void irda_usb_rx_defer_expired(unsigned long data);
+static int irda_usb_net_open(struct net_device *dev);
+static int irda_usb_net_close(struct net_device *dev);
+static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void irda_usb_net_timeout(struct net_device *dev);
+
+/************************ TRANSMIT ROUTINES ************************/
+/*
+ * Receive packets from the IrDA stack and send them on the USB pipe.
+ * Handle speed change, timeout and lot's of ugliness...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_build_header(self, skb, header)
+ *
+ * Builds USB-IrDA outbound header
+ *
+ * When we send an IrDA frame over an USB pipe, we add to it a 1 byte
+ * header. This function create this header with the proper values.
+ *
+ * Important note : the USB-IrDA spec 1.0 say very clearly in chapter 5.4.2.2
+ * that the setting of the link speed and xbof number in this outbound header
+ * should be applied *AFTER* the frame has been sent.
+ * Unfortunately, some devices are not compliant with that... It seems that
+ * reading the spec is far too difficult...
+ * Jean II
+ */
+static void irda_usb_build_header(struct irda_usb_cb *self,
+ __u8 *header,
+ int force)
+{
+ /* Here we check if we have an STIR421x chip,
+ * and if either speed or xbofs (or both) needs
+ * to be changed.
+ */
+ if (self->capability & IUC_STIR421X &&
+ ((self->new_speed != -1) || (self->new_xbofs != -1))) {
+
+ /* With STIR421x, speed and xBOFs must be set at the same
+ * time, even if only one of them changes.
+ */
+ if (self->new_speed == -1)
+ self->new_speed = self->speed ;
+
+ if (self->new_xbofs == -1)
+ self->new_xbofs = self->xbofs ;
+ }
+
+ /* Set the link speed */
+ if (self->new_speed != -1) {
+ /* Hum... Ugly hack :-(
+ * Some device are not compliant with the spec and change
+ * parameters *before* sending the frame. - Jean II
+ */
+ if ((self->capability & IUC_SPEED_BUG) &&
+ (!force) && (self->speed != -1)) {
+ /* No speed and xbofs change here
+ * (we'll do it later in the write callback) */
+ pr_debug("%s(), not changing speed yet\n", __func__);
+ *header = 0;
+ return;
+ }
+
+ pr_debug("%s(), changing speed to %d\n",
+ __func__, self->new_speed);
+ self->speed = self->new_speed;
+ /* We will do ` self->new_speed = -1; ' in the completion
+ * handler just in case the current URB fail - Jean II */
+
+ switch (self->speed) {
+ case 2400:
+ *header = SPEED_2400;
+ break;
+ default:
+ case 9600:
+ *header = SPEED_9600;
+ break;
+ case 19200:
+ *header = SPEED_19200;
+ break;
+ case 38400:
+ *header = SPEED_38400;
+ break;
+ case 57600:
+ *header = SPEED_57600;
+ break;
+ case 115200:
+ *header = SPEED_115200;
+ break;
+ case 576000:
+ *header = SPEED_576000;
+ break;
+ case 1152000:
+ *header = SPEED_1152000;
+ break;
+ case 4000000:
+ *header = SPEED_4000000;
+ self->new_xbofs = 0;
+ break;
+ case 16000000:
+ *header = SPEED_16000000;
+ self->new_xbofs = 0;
+ break;
+ }
+ } else
+ /* No change */
+ *header = 0;
+
+ /* Set the negotiated additional XBOFS */
+ if (self->new_xbofs != -1) {
+ pr_debug("%s(), changing xbofs to %d\n",
+ __func__, self->new_xbofs);
+ self->xbofs = self->new_xbofs;
+ /* We will do ` self->new_xbofs = -1; ' in the completion
+ * handler just in case the current URB fail - Jean II */
+
+ switch (self->xbofs) {
+ case 48:
+ *header |= 0x10;
+ break;
+ case 28:
+ case 24: /* USB spec 1.0 says 24 */
+ *header |= 0x20;
+ break;
+ default:
+ case 12:
+ *header |= 0x30;
+ break;
+ case 5: /* Bug in IrLAP spec? (should be 6) */
+ case 6:
+ *header |= 0x40;
+ break;
+ case 3:
+ *header |= 0x50;
+ break;
+ case 2:
+ *header |= 0x60;
+ break;
+ case 1:
+ *header |= 0x70;
+ break;
+ case 0:
+ *header |= 0x80;
+ break;
+ }
+ }
+}
+
+/*
+* calculate turnaround time for SigmaTel header
+*/
+static __u8 get_turnaround_time(struct sk_buff *skb)
+{
+ int turnaround_time = irda_get_mtt(skb);
+
+ if ( turnaround_time == 0 )
+ return 0;
+ else if ( turnaround_time <= 10 )
+ return 1;
+ else if ( turnaround_time <= 50 )
+ return 2;
+ else if ( turnaround_time <= 100 )
+ return 3;
+ else if ( turnaround_time <= 500 )
+ return 4;
+ else if ( turnaround_time <= 1000 )
+ return 5;
+ else if ( turnaround_time <= 5000 )
+ return 6;
+ else
+ return 7;
+}
+
+
+/*------------------------------------------------------------------*/
+/*
+ * Send a command to change the speed of the dongle
+ * Need to be called with spinlock on.
+ */
+static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
+{
+ __u8 *frame;
+ struct urb *urb;
+ int ret;
+
+ pr_debug("%s(), speed=%d, xbofs=%d\n", __func__,
+ self->new_speed, self->new_xbofs);
+
+ /* Grab the speed URB */
+ urb = self->speed_urb;
+ if (urb->status != 0) {
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
+ return;
+ }
+
+ /* Allocate the fake frame */
+ frame = self->speed_buff;
+
+ /* Set the new speed and xbofs in this fake frame */
+ irda_usb_build_header(self, frame, 1);
+
+ if (self->capability & IUC_STIR421X) {
+ if (frame[0] == 0) return ; // do nothing if no change
+ frame[1] = 0; // other parameters don't change here
+ frame[2] = 0;
+ }
+
+ /* Submit the 0 length IrDA frame to trigger new speed settings */
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
+ frame, IRDA_USB_SPEED_MTU,
+ speed_bulk_callback, self);
+ urb->transfer_buffer_length = self->header_length;
+ urb->transfer_flags = 0;
+
+ /* Irq disabled -> GFP_ATOMIC */
+ if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
+ net_warn_ratelimited("%s(), failed Speed URB\n", __func__);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Speed URB callback
+ * Now, we can only get called for the speed URB.
+ */
+static void speed_bulk_callback(struct urb *urb)
+{
+ struct irda_usb_cb *self = urb->context;
+
+ /* We should always have a context */
+ IRDA_ASSERT(self != NULL, return;);
+ /* We should always be called for the speed URB */
+ IRDA_ASSERT(urb == self->speed_urb, return;);
+
+ /* Check for timeout and other USB nasties */
+ if (urb->status != 0) {
+ /* I get a lot of -ECONNABORTED = -103 here - Jean II */
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
+
+ /* Don't do anything here, that might confuse the USB layer.
+ * Instead, we will wait for irda_usb_net_timeout(), the
+ * network layer watchdog, to fix the situation.
+ * Jean II */
+ /* A reset of the dongle might be welcomed here - Jean II */
+ return;
+ }
+
+ /* urb is now available */
+ //urb->status = 0; -> tested above
+
+ /* New speed and xbof is now committed in hardware */
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+
+ /* Allow the stack to send more packets */
+ netif_wake_queue(self->netdev);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Send an IrDA frame to the USB dongle (for transmission)
+ */
+static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct irda_usb_cb *self = netdev_priv(netdev);
+ struct urb *urb = self->tx_urb;
+ unsigned long flags;
+ s32 speed;
+ s16 xbofs;
+ int res, mtt;
+
+ pr_debug("%s() on %s\n", __func__, netdev->name);
+
+ netif_stop_queue(netdev);
+
+ /* Protect us from USB callbacks, net watchdog and else. */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if the device is still there.
+ * We need to check self->present under the spinlock because
+ * of irda_usb_disconnect() is synchronous - Jean II */
+ if (!self->present) {
+ pr_debug("%s(), Device is gone...\n", __func__);
+ goto drop;
+ }
+
+ /* Check if we need to change the number of xbofs */
+ xbofs = irda_get_next_xbofs(skb);
+ if ((xbofs != self->xbofs) && (xbofs != -1)) {
+ self->new_xbofs = xbofs;
+ }
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->speed) && (speed != -1)) {
+ /* Set the desired speed */
+ self->new_speed = speed;
+
+ /* Check for empty frame */
+ if (!skb->len) {
+ /* IrLAP send us an empty frame to make us change the
+ * speed. Changing speed with the USB adapter is in
+ * fact sending an empty frame to the adapter, so we
+ * could just let the present function do its job.
+ * However, we would wait for min turn time,
+ * do an extra memcpy and increment packet counters...
+ * Jean II */
+ irda_usb_change_speed_xbofs(self);
+ netif_trans_update(netdev);
+ /* Will netif_wake_queue() in callback */
+ goto drop;
+ }
+ }
+
+ if (urb->status != 0) {
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
+ goto drop;
+ }
+
+ skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len);
+
+ /* Change setting for next frame */
+ if (self->capability & IUC_STIR421X) {
+ __u8 turnaround_time;
+ __u8* frame = self->tx_buff;
+ turnaround_time = get_turnaround_time( skb );
+ irda_usb_build_header(self, frame, 0);
+ frame[2] = turnaround_time;
+ if ((skb->len != 0) &&
+ ((skb->len % 128) == 0) &&
+ ((skb->len % 512) != 0)) {
+ /* add extra byte for special SigmaTel feature */
+ frame[1] = 1;
+ skb_put(skb, 1);
+ } else {
+ frame[1] = 0;
+ }
+ } else {
+ irda_usb_build_header(self, self->tx_buff, 0);
+ }
+
+ /* FIXME: Make macro out of this one */
+ ((struct irda_skb_cb *)skb->cb)->context = self;
+
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
+ self->tx_buff, skb->len + self->header_length,
+ write_bulk_callback, skb);
+
+ /* This flag (URB_ZERO_PACKET) indicates that what we send is not
+ * a continuous stream of data but separate packets.
+ * In this case, the USB layer will insert an empty USB frame (TD)
+ * after each of our packets that is exact multiple of the frame size.
+ * This is how the dongle will detect the end of packet - Jean II */
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ /* Generate min turn time. FIXME: can we do better than this? */
+ /* Trying to a turnaround time at this level is trying to measure
+ * processor clock cycle with a wrist-watch, approximate at best...
+ *
+ * What we know is the last time we received a frame over USB.
+ * Due to latency over USB that depend on the USB load, we don't
+ * know when this frame was received over IrDA (a few ms before ?)
+ * Then, same story for our outgoing frame...
+ *
+ * In theory, the USB dongle is supposed to handle the turnaround
+ * by itself (spec 1.0, chater 4, page 6). Who knows ??? That's
+ * why this code is enabled only for dongles that doesn't meet
+ * the spec.
+ * Jean II */
+ if (self->capability & IUC_NO_TURN) {
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ int diff;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
+#ifdef IU_USB_MIN_RTT
+ /* Factor in USB delays -> Get rid of udelay() that
+ * would be lost in the noise - Jean II */
+ diff += IU_USB_MIN_RTT;
+#endif /* IU_USB_MIN_RTT */
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff) {
+ mtt -= diff;
+ if (mtt > 1000)
+ mdelay(mtt/1000);
+ else
+ udelay(mtt);
+ }
+ }
+ }
+
+ /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
+ if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
+ net_warn_ratelimited("%s(), failed Tx URB\n", __func__);
+ netdev->stats.tx_errors++;
+ /* Let USB recover : We will catch that in the watchdog */
+ /*netif_start_queue(netdev);*/
+ } else {
+ /* Increment packet stats */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ netif_trans_update(netdev);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return NETDEV_TX_OK;
+
+drop:
+ /* Drop silently the skb and exit */
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Note : this function will be called only for tx_urb...
+ */
+static void write_bulk_callback(struct urb *urb)
+{
+ unsigned long flags;
+ struct sk_buff *skb = urb->context;
+ struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
+
+ /* We should always have a context */
+ IRDA_ASSERT(self != NULL, return;);
+ /* We should always be called for the speed URB */
+ IRDA_ASSERT(urb == self->tx_urb, return;);
+
+ /* Free up the skb */
+ dev_kfree_skb_any(skb);
+ urb->context = NULL;
+
+ /* Check for timeout and other USB nasties */
+ if (urb->status != 0) {
+ /* I get a lot of -ECONNABORTED = -103 here - Jean II */
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
+
+ /* Don't do anything here, that might confuse the USB layer,
+ * and we could go in recursion and blow the kernel stack...
+ * Instead, we will wait for irda_usb_net_timeout(), the
+ * network layer watchdog, to fix the situation.
+ * Jean II */
+ /* A reset of the dongle might be welcomed here - Jean II */
+ return;
+ }
+
+ /* urb is now available */
+ //urb->status = 0; -> tested above
+
+ /* Make sure we read self->present properly */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* If the network is closed, stop everything */
+ if ((!self->netopen) || (!self->present)) {
+ pr_debug("%s(), Network is gone...\n", __func__);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+ }
+
+ /* If changes to speed or xbofs is pending... */
+ if ((self->new_speed != -1) || (self->new_xbofs != -1)) {
+ if ((self->new_speed != self->speed) ||
+ (self->new_xbofs != self->xbofs)) {
+ /* We haven't changed speed yet (because of
+ * IUC_SPEED_BUG), so do it now - Jean II */
+ pr_debug("%s(), Changing speed now...\n", __func__);
+ irda_usb_change_speed_xbofs(self);
+ } else {
+ /* New speed and xbof is now committed in hardware */
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+ /* Done, waiting for next packet */
+ netif_wake_queue(self->netdev);
+ }
+ } else {
+ /* Otherwise, allow the stack to send more packets */
+ netif_wake_queue(self->netdev);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog timer from the network layer.
+ * After a predetermined timeout, if we don't give confirmation that
+ * the packet has been sent (i.e. no call to netif_wake_queue()),
+ * the network layer will call this function.
+ * Note that URB that we submit have also a timeout. When the URB timeout
+ * expire, the normal URB callback is called (write_bulk_callback()).
+ */
+static void irda_usb_net_timeout(struct net_device *netdev)
+{
+ unsigned long flags;
+ struct irda_usb_cb *self = netdev_priv(netdev);
+ struct urb *urb;
+ int done = 0; /* If we have made any progress */
+
+ pr_debug("%s(), Network layer thinks we timed out!\n", __func__);
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Protect us from USB callbacks, net Tx and else. */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* self->present *MUST* be read under spinlock */
+ if (!self->present) {
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+ }
+
+ /* Check speed URB */
+ urb = self->speed_urb;
+ if (urb->status != 0) {
+ pr_debug("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
+
+ switch (urb->status) {
+ case -EINPROGRESS:
+ usb_unlink_urb(urb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, we will come back here.
+ * Jean II */
+ done = 1;
+ break;
+ case -ECONNRESET:
+ case -ENOENT: /* urb unlinked by us */
+ default: /* ??? - Play safe */
+ urb->status = 0;
+ netif_wake_queue(self->netdev);
+ done = 1;
+ break;
+ }
+ }
+
+ /* Check Tx URB */
+ urb = self->tx_urb;
+ if (urb->status != 0) {
+ struct sk_buff *skb = urb->context;
+
+ pr_debug("%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
+
+ /* Increase error count */
+ netdev->stats.tx_errors++;
+
+#ifdef IU_BUG_KICK_TIMEOUT
+ /* Can't be a bad idea to reset the speed ;-) - Jean II */
+ if(self->new_speed == -1)
+ self->new_speed = self->speed;
+ if(self->new_xbofs == -1)
+ self->new_xbofs = self->xbofs;
+ irda_usb_change_speed_xbofs(self);
+#endif /* IU_BUG_KICK_TIMEOUT */
+
+ switch (urb->status) {
+ case -EINPROGRESS:
+ usb_unlink_urb(urb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, because urb->status will
+ * be -ENOENT. We will fix that at the next watchdog,
+ * leaving more time to USB to recover...
+ * Jean II */
+ done = 1;
+ break;
+ case -ECONNRESET:
+ case -ENOENT: /* urb unlinked by us */
+ default: /* ??? - Play safe */
+ if(skb != NULL) {
+ dev_kfree_skb_any(skb);
+ urb->context = NULL;
+ }
+ urb->status = 0;
+ netif_wake_queue(self->netdev);
+ done = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Maybe we need a reset */
+ /* Note : Some drivers seem to use a usb_set_interface() when they
+ * need to reset the hardware. Hum...
+ */
+
+ /* if(done == 0) */
+}
+
+/************************* RECEIVE ROUTINES *************************/
+/*
+ * Receive packets from the USB layer stack and pass them to the IrDA stack.
+ * Try to work around USB failures...
+ */
+
+/*
+ * Note :
+ * Some of you may have noticed that most dongle have an interrupt in pipe
+ * that we don't use. Here is the little secret...
+ * When we hang a Rx URB on the bulk in pipe, it generates some USB traffic
+ * in every USB frame. This is unnecessary overhead.
+ * The interrupt in pipe will generate an event every time a packet is
+ * received. Reading an interrupt pipe adds minimal overhead, but has some
+ * latency (~1ms).
+ * If we are connected (speed != 9600), we want to minimise latency, so
+ * we just always hang the Rx URB and ignore the interrupt.
+ * If we are not connected (speed == 9600), there is usually no Rx traffic,
+ * and we want to minimise the USB overhead. In this case we should wait
+ * on the interrupt pipe and hang the Rx URB only when an interrupt is
+ * received.
+ * Jean II
+ *
+ * Note : don't read the above as what we are currently doing, but as
+ * something we could do with KC dongle. Also don't forget that the
+ * interrupt pipe is not part of the original standard, so this would
+ * need to be optional...
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Submit a Rx URB to the USB layer to handle reception of a frame
+ * Mostly called by the completion callback of the previous URB.
+ *
+ * Jean II
+ */
+static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struct urb *urb)
+{
+ struct irda_skb_cb *cb;
+ int ret;
+
+ /* This should never happen */
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(urb != NULL, return;);
+
+ /* Save ourselves in the skb */
+ cb = (struct irda_skb_cb *) skb->cb;
+ cb->context = self;
+
+ /* Reinitialize URB */
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
+ skb->data, IRDA_SKB_MAX_MTU,
+ irda_usb_receive, skb);
+ urb->status = 0;
+
+ /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ /* If this ever happen, we are in deep s***.
+ * Basically, the Rx path will stop... */
+ net_warn_ratelimited("%s(), Failed to submit Rx URB %d\n",
+ __func__, ret);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_receive(urb)
+ *
+ * Called by the USB subsystem when a frame has been received
+ *
+ */
+static void irda_usb_receive(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct sk_buff *newskb;
+ struct sk_buff *dataskb;
+ struct urb *next_urb;
+ unsigned int len, docopy;
+
+ pr_debug("%s(), len=%d\n", __func__, urb->actual_length);
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* If the network is closed or the device gone, stop everything */
+ if ((!self->netopen) || (!self->present)) {
+ pr_debug("%s(), Network is gone!\n", __func__);
+ /* Don't re-submit the URB : will stall the Rx path */
+ return;
+ }
+
+ /* Check the status */
+ if (urb->status != 0) {
+ switch (urb->status) {
+ case -EILSEQ:
+ self->netdev->stats.rx_crc_errors++;
+ /* Also precursor to a hot-unplug on UHCI. */
+ /* Fallthrough... */
+ case -ECONNRESET:
+ /* Random error, if I remember correctly */
+ /* uhci_cleanup_unlink() is going to kill the Rx
+ * URB just after we return. No problem, at this
+ * point the URB will be idle ;-) - Jean II */
+ case -ESHUTDOWN:
+ /* That's usually a hot-unplug. Submit will fail... */
+ case -ETIME:
+ /* Usually precursor to a hot-unplug on OHCI. */
+ default:
+ self->netdev->stats.rx_errors++;
+ pr_debug("%s(), RX status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
+ break;
+ }
+ /* If we received an error, we don't want to resubmit the
+ * Rx URB straight away but to give the USB layer a little
+ * bit of breathing room.
+ * We are in the USB thread context, therefore there is a
+ * danger of recursion (new URB we submit fails, we come
+ * back here).
+ * With recent USB stack (2.6.15+), I'm seeing that on
+ * hot unplug of the dongle...
+ * Lowest effective timer is 10ms...
+ * Jean II */
+ self->rx_defer_timer.function = irda_usb_rx_defer_expired;
+ self->rx_defer_timer.data = (unsigned long) urb;
+ mod_timer(&self->rx_defer_timer,
+ jiffies + msecs_to_jiffies(10));
+
+ return;
+ }
+
+ /* Check for empty frames */
+ if (urb->actual_length <= self->header_length) {
+ net_warn_ratelimited("%s(), empty frame!\n", __func__);
+ goto done;
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ self->stamp = ktime_get();
+
+ /* Check if we need to copy the data to a new skb or not.
+ * For most frames, we use ZeroCopy and pass the already
+ * allocated skb up the stack.
+ * If the frame is small, it is more efficient to copy it
+ * to save memory (copy will be fast anyway - that's
+ * called Rx-copy-break). Jean II */
+ docopy = (urb->actual_length < IRDA_RX_COPY_THRESHOLD);
+
+ /* Allocate a new skb */
+ if (self->capability & IUC_STIR421X)
+ newskb = dev_alloc_skb(docopy ? urb->actual_length :
+ IRDA_SKB_MAX_MTU +
+ USB_IRDA_STIR421X_HEADER);
+ else
+ newskb = dev_alloc_skb(docopy ? urb->actual_length :
+ IRDA_SKB_MAX_MTU);
+
+ if (!newskb) {
+ self->netdev->stats.rx_dropped++;
+ /* We could deliver the current skb, but this would stall
+ * the Rx path. Better drop the packet... Jean II */
+ goto done;
+ }
+
+ /* Make sure IP header get aligned (IrDA header is 5 bytes) */
+ /* But IrDA-USB header is 1 byte. Jean II */
+ //skb_reserve(newskb, USB_IRDA_HEADER - 1);
+
+ if(docopy) {
+ /* Copy packet, so we can recycle the original */
+ skb_copy_from_linear_data(skb, newskb->data, urb->actual_length);
+ /* Deliver this new skb */
+ dataskb = newskb;
+ /* And hook the old skb to the URB
+ * Note : we don't need to "clean up" the old skb,
+ * as we never touched it. Jean II */
+ } else {
+ /* We are using ZeroCopy. Deliver old skb */
+ dataskb = skb;
+ /* And hook the new skb to the URB */
+ skb = newskb;
+ }
+
+ /* Set proper length on skb & remove USB-IrDA header */
+ skb_put(dataskb, urb->actual_length);
+ skb_pull(dataskb, self->header_length);
+
+ /* Ask the networking layer to queue the packet for the IrDA stack */
+ dataskb->dev = self->netdev;
+ skb_reset_mac_header(dataskb);
+ dataskb->protocol = htons(ETH_P_IRDA);
+ len = dataskb->len;
+ netif_rx(dataskb);
+
+ /* Keep stats up to date */
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+
+done:
+ /* Note : at this point, the URB we've just received (urb)
+ * is still referenced by the USB layer. For example, if we
+ * have received a -ECONNRESET, uhci_cleanup_unlink() will
+ * continue to process it (in fact, cleaning it up).
+ * If we were to submit this URB, disaster would ensue.
+ * Therefore, we submit our idle URB, and put this URB in our
+ * idle slot....
+ * Jean II */
+ /* Note : with this scheme, we could submit the idle URB before
+ * processing the Rx URB. I don't think it would buy us anything as
+ * we are running in the USB thread context. Jean II */
+ next_urb = self->idle_rx_urb;
+
+ /* Recycle Rx URB : Now, the idle URB is the present one */
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+
+ /* Submit the idle URB to replace the URB we've just received.
+ * Do it last to avoid race conditions... Jean II */
+ irda_usb_submit(self, skb, next_urb);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * In case of errors, we want the USB layer to have time to recover.
+ * Now, it is time to resubmit ouur Rx URB...
+ */
+static void irda_usb_rx_defer_expired(unsigned long data)
+{
+ struct urb *urb = (struct urb *) data;
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct urb *next_urb;
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Same stuff as when Rx is done, see above... */
+ next_urb = self->idle_rx_urb;
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+ irda_usb_submit(self, skb, next_urb);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Callbak from IrDA layer. IrDA wants to know if we have
+ * started receiving anything.
+ */
+static int irda_usb_is_receiving(struct irda_usb_cb *self)
+{
+ /* Note : because of the way UHCI works, it's almost impossible
+ * to get this info. The Controller DMA directly to memory and
+ * signal only when the whole frame is finished. To know if the
+ * first TD of the URB has been filled or not seems hard work...
+ *
+ * The other solution would be to use the "receiving" command
+ * on the default decriptor with a usb_control_msg(), but that
+ * would add USB traffic and would return result only in the
+ * next USB frame (~1ms).
+ *
+ * I've been told that current dongles send status info on their
+ * interrupt endpoint, and that's what the Windows driver uses
+ * to know this info. Unfortunately, this is not yet in the spec...
+ *
+ * Jean II
+ */
+
+ return 0; /* For now */
+}
+
+#define STIR421X_PATCH_PRODUCT_VER "Product Version: "
+#define STIR421X_PATCH_STMP_TAG "STMP"
+#define STIR421X_PATCH_CODE_OFFSET 512 /* patch image starts before here */
+/* marks end of patch file header (PC DOS text file EOF character) */
+#define STIR421X_PATCH_END_OF_HDR_TAG 0x1A
+#define STIR421X_PATCH_BLOCK_SIZE 1023
+
+/*
+ * Function stir421x_fwupload (struct irda_usb_cb *self,
+ * unsigned char *patch,
+ * const unsigned int patch_len)
+ *
+ * Upload firmware code to SigmaTel 421X IRDA-USB dongle
+ */
+static int stir421x_fw_upload(struct irda_usb_cb *self,
+ const unsigned char *patch,
+ const unsigned int patch_len)
+{
+ int ret = -ENOMEM;
+ int actual_len = 0;
+ unsigned int i;
+ unsigned int block_size = 0;
+ unsigned char *patch_block;
+
+ patch_block = kzalloc(STIR421X_PATCH_BLOCK_SIZE, GFP_KERNEL);
+ if (patch_block == NULL)
+ return -ENOMEM;
+
+ /* break up patch into 1023-byte sections */
+ for (i = 0; i < patch_len; i += block_size) {
+ block_size = patch_len - i;
+
+ if (block_size > STIR421X_PATCH_BLOCK_SIZE)
+ block_size = STIR421X_PATCH_BLOCK_SIZE;
+
+ /* upload the patch section */
+ memcpy(patch_block, patch + i, block_size);
+
+ ret = usb_bulk_msg(self->usbdev,
+ usb_sndbulkpipe(self->usbdev,
+ self->bulk_out_ep),
+ patch_block, block_size,
+ &actual_len, msecs_to_jiffies(500));
+ pr_debug("%s(): Bulk send %u bytes, ret=%d\n",
+ __func__, actual_len, ret);
+
+ if (ret < 0)
+ break;
+
+ mdelay(10);
+ }
+
+ kfree(patch_block);
+
+ return ret;
+ }
+
+/*
+ * Function stir421x_patch_device(struct irda_usb_cb *self)
+ *
+ * Get a firmware code from userspase using hotplug request_firmware() call
+ */
+static int stir421x_patch_device(struct irda_usb_cb *self)
+{
+ unsigned int i;
+ int ret;
+ char stir421x_fw_name[12];
+ const struct firmware *fw;
+ const unsigned char *fw_version_ptr; /* pointer to version string */
+ unsigned long fw_version = 0;
+
+ /*
+ * Known firmware patch file names for STIR421x dongles
+ * are "42101001.sb" or "42101002.sb"
+ */
+ sprintf(stir421x_fw_name, "4210%4X.sb",
+ le16_to_cpu(self->usbdev->descriptor.bcdDevice));
+ ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* We get a patch from userspace */
+ net_info_ratelimited("%s(): Received firmware %s (%zu bytes)\n",
+ __func__, stir421x_fw_name, fw->size);
+
+ ret = -EINVAL;
+
+ /* Get the bcd product version */
+ if (!memcmp(fw->data, STIR421X_PATCH_PRODUCT_VER,
+ sizeof(STIR421X_PATCH_PRODUCT_VER) - 1)) {
+ fw_version_ptr = fw->data +
+ sizeof(STIR421X_PATCH_PRODUCT_VER) - 1;
+
+ /* Let's check if the product version is dotted */
+ if (fw_version_ptr[3] == '.' &&
+ fw_version_ptr[7] == '.') {
+ unsigned long major, minor, build;
+ major = simple_strtoul(fw_version_ptr, NULL, 10);
+ minor = simple_strtoul(fw_version_ptr + 4, NULL, 10);
+ build = simple_strtoul(fw_version_ptr + 8, NULL, 10);
+
+ fw_version = (major << 12)
+ + (minor << 8)
+ + ((build / 10) << 4)
+ + (build % 10);
+
+ pr_debug("%s(): Firmware Product version %ld\n",
+ __func__, fw_version);
+ }
+ }
+
+ if (self->usbdev->descriptor.bcdDevice == cpu_to_le16(fw_version)) {
+ /*
+ * If we're here, we've found a correct patch
+ * The actual image starts after the "STMP" keyword
+ * so forward to the firmware header tag
+ */
+ for (i = 0; i < fw->size && fw->data[i] !=
+ STIR421X_PATCH_END_OF_HDR_TAG; i++) ;
+ /* here we check for the out of buffer case */
+ if (i < STIR421X_PATCH_CODE_OFFSET && i < fw->size &&
+ STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) {
+ if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
+ sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
+
+ /* We can upload the patch to the target */
+ i += sizeof(STIR421X_PATCH_STMP_TAG);
+ ret = stir421x_fw_upload(self, &fw->data[i],
+ fw->size - i);
+ }
+ }
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+
+/********************** IRDA DEVICE CALLBACKS **********************/
+/*
+ * Main calls from the IrDA/Network subsystem.
+ * Mostly registering a new irda-usb device and removing it....
+ * We only deal with the IrDA side of the business, the USB side will
+ * be dealt with below...
+ */
+
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ *
+ * Note : don't mess with self->netopen - Jean II
+ */
+static int irda_usb_net_open(struct net_device *netdev)
+{
+ struct irda_usb_cb *self;
+ unsigned long flags;
+ char hwname[16];
+ int i;
+
+ IRDA_ASSERT(netdev != NULL, return -1;);
+ self = netdev_priv(netdev);
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ spin_lock_irqsave(&self->lock, flags);
+ /* Can only open the device if it's there */
+ if(!self->present) {
+ spin_unlock_irqrestore(&self->lock, flags);
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
+ return -1;
+ }
+
+ if(self->needspatch) {
+ spin_unlock_irqrestore(&self->lock, flags);
+ net_warn_ratelimited("%s(), device needs patch\n", __func__);
+ return -EIO ;
+ }
+
+ /* Initialise default speed and xbofs value
+ * (IrLAP will change that soon) */
+ self->speed = -1;
+ self->xbofs = -1;
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+
+ /* To do *before* submitting Rx urbs and starting net Tx queue
+ * Jean II */
+ self->netopen = 1;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", self->usbdev->devnum);
+ self->irlap = irlap_open(netdev, &self->qos, hwname);
+ IRDA_ASSERT(self->irlap != NULL, return -1;);
+
+ /* Allow IrLAP to send data to us */
+ netif_start_queue(netdev);
+
+ /* We submit all the Rx URB except for one that we keep idle.
+ * Need to be initialised before submitting other USBs, because
+ * in some cases as soon as we submit the URBs the USB layer
+ * will trigger a dummy receive - Jean II */
+ self->idle_rx_urb = self->rx_urb[IU_MAX_ACTIVE_RX_URBS];
+ self->idle_rx_urb->context = NULL;
+
+ /* Now that we can pass data to IrLAP, allow the USB layer
+ * to send us some data... */
+ for (i = 0; i < IU_MAX_ACTIVE_RX_URBS; i++) {
+ struct sk_buff *skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!skb) {
+ /* If this ever happen, we are in deep s***.
+ * Basically, we can't start the Rx path... */
+ return -1;
+ }
+ //skb_reserve(newskb, USB_IRDA_HEADER - 1);
+ irda_usb_submit(self, skb, self->rx_urb[i]);
+ }
+
+ /* Ready to play !!! */
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_net_close (self)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int irda_usb_net_close(struct net_device *netdev)
+{
+ struct irda_usb_cb *self;
+ int i;
+
+ IRDA_ASSERT(netdev != NULL, return -1;);
+ self = netdev_priv(netdev);
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ /* Clear this flag *before* unlinking the urbs and *before*
+ * stopping the network Tx queue - Jean II */
+ self->netopen = 0;
+
+ /* Stop network Tx queue */
+ netif_stop_queue(netdev);
+
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
+ /* Deallocate all the Rx path buffers (URBs and skb) */
+ for (i = 0; i < self->max_rx_urb; i++) {
+ struct urb *urb = self->rx_urb[i];
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ /* Cancel the receive command */
+ usb_kill_urb(urb);
+ /* The skb is ours, free it */
+ if(skb) {
+ dev_kfree_skb(skb);
+ urb->context = NULL;
+ }
+ }
+ /* Cancel Tx and speed URB - need to be synchronous to avoid races */
+ usb_kill_urb(self->tx_urb);
+ usb_kill_urb(self->speed_urb);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ unsigned long flags;
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct irda_usb_cb *self;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Protect us from USB callbacks, net watchdog and else. */
+ spin_lock_irqsave(&self->lock, flags);
+ /* Check if the device is still there */
+ if(self->present) {
+ /* Set the desired speed */
+ self->new_speed = irq->ifr_baudrate;
+ irda_usb_change_speed_xbofs(self);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Check if the IrDA stack is still there */
+ if(self->netopen)
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = irda_usb_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+
+/********************* IRDA CONFIG SUBROUTINES *********************/
+/*
+ * Various subroutines dealing with IrDA and network stuff we use to
+ * configure and initialise each irda-usb instance.
+ * These functions are used below in the main calls of the driver...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set proper values in the IrDA QOS structure
+ */
+static inline void irda_usb_init_qos(struct irda_usb_cb *self)
+{
+ struct irda_class_desc *desc;
+
+
+ desc = self->irda_desc;
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* See spec section 7.2 for meaning.
+ * Values are little endian (as most USB stuff), the IrDA stack
+ * use it in native order (see parameters.c). - Jean II */
+ self->qos.baud_rate.bits = le16_to_cpu(desc->wBaudRate);
+ self->qos.min_turn_time.bits = desc->bmMinTurnaroundTime;
+ self->qos.additional_bofs.bits = desc->bmAdditionalBOFs;
+ self->qos.window_size.bits = desc->bmWindowSize;
+ self->qos.data_size.bits = desc->bmDataSize;
+
+ pr_debug("%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
+ __func__, self->qos.baud_rate.bits, self->qos.data_size.bits,
+ self->qos.window_size.bits, self->qos.additional_bofs.bits,
+ self->qos.min_turn_time.bits);
+
+ /* Don't always trust what the dongle tell us */
+ if(self->capability & IUC_SIR_ONLY)
+ self->qos.baud_rate.bits &= 0x00ff;
+ if(self->capability & IUC_SMALL_PKT)
+ self->qos.data_size.bits = 0x07;
+ if(self->capability & IUC_NO_WINDOW)
+ self->qos.window_size.bits = 0x01;
+ if(self->capability & IUC_MAX_WINDOW)
+ self->qos.window_size.bits = 0x7f;
+ if(self->capability & IUC_MAX_XBOFS)
+ self->qos.additional_bofs.bits = 0x01;
+
+#if 1
+ /* Module parameter can override the rx window size */
+ if (qos_mtt_bits)
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+#endif
+ /*
+ * Note : most of those values apply only for the receive path,
+ * the transmit path will be set differently - Jean II
+ */
+ irda_qos_bits_to_value(&self->qos);
+}
+
+/*------------------------------------------------------------------*/
+static const struct net_device_ops irda_usb_netdev_ops = {
+ .ndo_open = irda_usb_net_open,
+ .ndo_stop = irda_usb_net_close,
+ .ndo_do_ioctl = irda_usb_net_ioctl,
+ .ndo_start_xmit = irda_usb_hard_xmit,
+ .ndo_tx_timeout = irda_usb_net_timeout,
+};
+
+/*
+ * Initialise the network side of the irda-usb instance
+ * Called when a new USB instance is registered in irda_usb_probe()
+ */
+static inline int irda_usb_open(struct irda_usb_cb *self)
+{
+ struct net_device *netdev = self->netdev;
+
+ netdev->netdev_ops = &irda_usb_netdev_ops;
+
+ irda_usb_init_qos(self);
+
+ return register_netdev(netdev);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Cleanup the network side of the irda-usb instance
+ * Called when a USB instance is removed in irda_usb_disconnect()
+ */
+static inline void irda_usb_close(struct irda_usb_cb *self)
+{
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Remove the speed buffer */
+ kfree(self->speed_buff);
+ self->speed_buff = NULL;
+
+ kfree(self->tx_buff);
+ self->tx_buff = NULL;
+}
+
+/********************** USB CONFIG SUBROUTINES **********************/
+/*
+ * Various subroutines dealing with USB stuff we use to configure and
+ * initialise each irda-usb instance.
+ * These functions are used below in the main calls of the driver...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_parse_endpoints(dev, ifnum)
+ *
+ * Parse the various endpoints and find the one we need.
+ *
+ * The endpoint are the pipes used to communicate with the USB device.
+ * The spec defines 2 endpoints of type bulk transfer, one in, and one out.
+ * These are used to pass frames back and forth with the dongle.
+ * Most dongle have also an interrupt endpoint, that will be probably
+ * documented in the next spec...
+ */
+static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_host_endpoint *endpoint, int ennum)
+{
+ int i; /* Endpoint index in table */
+
+ /* Init : no endpoints */
+ self->bulk_in_ep = 0;
+ self->bulk_out_ep = 0;
+ self->bulk_int_ep = 0;
+
+ /* Let's look at all those endpoints */
+ for(i = 0; i < ennum; i++) {
+ /* All those variables will get optimised by the compiler,
+ * so let's aim for clarity... - Jean II */
+ __u8 ep; /* Endpoint address */
+ __u8 dir; /* Endpoint direction */
+ __u8 attr; /* Endpoint attribute */
+ __u16 psize; /* Endpoint max packet size in bytes */
+
+ /* Get endpoint address, direction and attribute */
+ ep = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ dir = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ attr = endpoint[i].desc.bmAttributes;
+ psize = le16_to_cpu(endpoint[i].desc.wMaxPacketSize);
+
+ /* Is it a bulk endpoint ??? */
+ if(attr == USB_ENDPOINT_XFER_BULK) {
+ /* We need to find an IN and an OUT */
+ if(dir == USB_DIR_IN) {
+ /* This is our Rx endpoint */
+ self->bulk_in_ep = ep;
+ } else {
+ /* This is our Tx endpoint */
+ self->bulk_out_ep = ep;
+ self->bulk_out_mtu = psize;
+ }
+ } else {
+ if((attr == USB_ENDPOINT_XFER_INT) &&
+ (dir == USB_DIR_IN)) {
+ /* This is our interrupt endpoint */
+ self->bulk_int_ep = ep;
+ } else {
+ net_err_ratelimited("%s(), Unrecognised endpoint %02X\n",
+ __func__, ep);
+ }
+ }
+ }
+
+ pr_debug("%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
+ __func__, self->bulk_in_ep, self->bulk_out_ep,
+ self->bulk_out_mtu, self->bulk_int_ep);
+
+ return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
+}
+
+#ifdef IU_DUMP_CLASS_DESC
+/*------------------------------------------------------------------*/
+/*
+ * Function usb_irda_dump_class_desc(desc)
+ *
+ * Prints out the contents of the IrDA class descriptor
+ *
+ */
+static inline void irda_usb_dump_class_desc(struct irda_class_desc *desc)
+{
+ /* Values are little endian */
+ printk("bLength=%x\n", desc->bLength);
+ printk("bDescriptorType=%x\n", desc->bDescriptorType);
+ printk("bcdSpecRevision=%x\n", le16_to_cpu(desc->bcdSpecRevision));
+ printk("bmDataSize=%x\n", desc->bmDataSize);
+ printk("bmWindowSize=%x\n", desc->bmWindowSize);
+ printk("bmMinTurnaroundTime=%d\n", desc->bmMinTurnaroundTime);
+ printk("wBaudRate=%x\n", le16_to_cpu(desc->wBaudRate));
+ printk("bmAdditionalBOFs=%x\n", desc->bmAdditionalBOFs);
+ printk("bIrdaRateSniff=%x\n", desc->bIrdaRateSniff);
+ printk("bMaxUnicastList=%x\n", desc->bMaxUnicastList);
+}
+#endif /* IU_DUMP_CLASS_DESC */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_find_class_desc(intf)
+ *
+ * Returns instance of IrDA class descriptor, or NULL if not found
+ *
+ * The class descriptor is some extra info that IrDA USB devices will
+ * offer to us, describing their IrDA characteristics. We will use that in
+ * irda_usb_init_qos()
+ */
+static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf)
+{
+ struct usb_device *dev = interface_to_usbdev (intf);
+ struct irda_class_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ /* USB-IrDA class spec 1.0:
+ * 6.1.3: Standard "Get Descriptor" Device Request is not
+ * appropriate to retrieve class-specific descriptor
+ * 6.2.5: Class Specific "Get Class Descriptor" Interface Request
+ * is mandatory and returns the USB-IrDA class descriptor
+ */
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev,0),
+ IU_REQ_GET_CLASS_DESC,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, intf->altsetting->desc.bInterfaceNumber, desc,
+ sizeof(*desc), 500);
+
+ pr_debug("%s(), ret=%d\n", __func__, ret);
+ if (ret < sizeof(*desc)) {
+ net_warn_ratelimited("usb-irda: class_descriptor read %s (%d)\n",
+ ret < 0 ? "failed" : "too short", ret);
+ }
+ else if (desc->bDescriptorType != USB_DT_IRDA) {
+ net_warn_ratelimited("usb-irda: bad class_descriptor type\n");
+ }
+ else {
+#ifdef IU_DUMP_CLASS_DESC
+ irda_usb_dump_class_desc(desc);
+#endif /* IU_DUMP_CLASS_DESC */
+
+ return desc;
+ }
+ kfree(desc);
+ return NULL;
+}
+
+/*********************** USB DEVICE CALLBACKS ***********************/
+/*
+ * Main calls from the USB subsystem.
+ * Mostly registering a new irda-usb device and removing it....
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ * The USB layer protect us from reentrancy (via BKL), so we don't need
+ * to spinlock in there... Jean II
+ */
+static int irda_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *net;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct irda_usb_cb *self;
+ struct usb_host_interface *interface;
+ struct irda_class_desc *irda_desc;
+ int ret = -ENOMEM;
+ int i; /* Driver instance index / Rx URB index */
+
+ /* Note : the probe make sure to call us only for devices that
+ * matches the list of dongle (top of the file). So, we
+ * don't need to check if the dongle is really ours.
+ * Jean II */
+
+ net_info_ratelimited("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ net = alloc_irdadev(sizeof(*self));
+ if (!net)
+ goto err_out;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ self = netdev_priv(net);
+ self->netdev = net;
+ spin_lock_init(&self->lock);
+ init_timer(&self->rx_defer_timer);
+
+ self->capability = id->driver_info;
+ self->needspatch = ((self->capability & IUC_STIR421X) != 0);
+
+ /* Create all of the needed urbs */
+ if (self->capability & IUC_STIR421X) {
+ self->max_rx_urb = IU_SIGMATEL_MAX_RX_URBS;
+ self->header_length = USB_IRDA_STIR421X_HEADER;
+ } else {
+ self->max_rx_urb = IU_MAX_RX_URBS;
+ self->header_length = USB_IRDA_HEADER;
+ }
+
+ self->rx_urb = kcalloc(self->max_rx_urb, sizeof(struct urb *),
+ GFP_KERNEL);
+ if (!self->rx_urb)
+ goto err_free_net;
+
+ for (i = 0; i < self->max_rx_urb; i++) {
+ self->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->rx_urb[i]) {
+ goto err_out_1;
+ }
+ }
+ self->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->tx_urb) {
+ goto err_out_1;
+ }
+ self->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->speed_urb) {
+ goto err_out_2;
+ }
+
+ /* Is this really necessary? (no, except maybe for broken devices) */
+ if (usb_reset_configuration (dev) < 0) {
+ dev_err(&intf->dev, "reset_configuration failed\n");
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Is this really necessary? */
+ /* Note : some driver do hardcode the interface number, some others
+ * specify an alternate, but very few driver do like this.
+ * Jean II */
+ ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0);
+ pr_debug("usb-irda: set interface %d result %d\n",
+ intf->altsetting->desc.bInterfaceNumber, ret);
+ switch (ret) {
+ case 0:
+ break;
+ case -EPIPE: /* -EPIPE = -32 */
+ /* Martin Diehl says if we get a -EPIPE we should
+ * be fine and we don't need to do a usb_clear_halt().
+ * - Jean II */
+ pr_debug("%s(), Received -EPIPE, ignoring...\n",
+ __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown error %d\n", __func__, ret);
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Find our endpoints */
+ interface = intf->cur_altsetting;
+ if(!irda_usb_parse_endpoints(self, interface->endpoint,
+ interface->desc.bNumEndpoints)) {
+ net_err_ratelimited("%s(), Bogus endpoints...\n", __func__);
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ self->usbdev = dev;
+
+ /* Find IrDA class descriptor */
+ irda_desc = irda_usb_find_class_desc(intf);
+ ret = -ENODEV;
+ if (!irda_desc)
+ goto err_out_3;
+
+ if (self->needspatch) {
+ ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
+ 0x02, 0x40, 0, 0, NULL, 0, 500);
+ if (ret < 0) {
+ pr_debug("usb_control_msg failed %d\n", ret);
+ goto err_out_3;
+ } else {
+ mdelay(10);
+ }
+ }
+
+ self->irda_desc = irda_desc;
+ self->present = 1;
+ self->netopen = 0;
+ self->usbintf = intf;
+
+ /* Allocate the buffer for speed changes */
+ /* Don't change this buffer size and allocation without doing
+ * some heavy and complete testing. Don't ask why :-(
+ * Jean II */
+ ret = -ENOMEM;
+ self->speed_buff = kzalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
+ if (!self->speed_buff)
+ goto err_out_3;
+
+ self->tx_buff = kzalloc(IRDA_SKB_MAX_MTU + self->header_length,
+ GFP_KERNEL);
+ if (!self->tx_buff)
+ goto err_out_4;
+
+ ret = irda_usb_open(self);
+ if (ret)
+ goto err_out_5;
+
+ net_info_ratelimited("IrDA: Registered device %s\n", net->name);
+ usb_set_intfdata(intf, self);
+
+ if (self->needspatch) {
+ /* Now we fetch and upload the firmware patch */
+ ret = stir421x_patch_device(self);
+ self->needspatch = (ret < 0);
+ if (self->needspatch) {
+ net_err_ratelimited("STIR421X: Couldn't upload patch\n");
+ goto err_out_6;
+ }
+
+ /* replace IrDA class descriptor with what patched device is now reporting */
+ irda_desc = irda_usb_find_class_desc (self->usbintf);
+ if (!irda_desc) {
+ ret = -ENODEV;
+ goto err_out_6;
+ }
+ kfree(self->irda_desc);
+ self->irda_desc = irda_desc;
+ irda_usb_init_qos(self);
+ }
+
+ return 0;
+err_out_6:
+ unregister_netdev(self->netdev);
+err_out_5:
+ kfree(self->tx_buff);
+err_out_4:
+ kfree(self->speed_buff);
+err_out_3:
+ /* Free all urbs that we may have created */
+ usb_free_urb(self->speed_urb);
+err_out_2:
+ usb_free_urb(self->tx_urb);
+err_out_1:
+ for (i = 0; i < self->max_rx_urb; i++)
+ usb_free_urb(self->rx_urb[i]);
+ kfree(self->rx_urb);
+err_free_net:
+ free_netdev(net);
+err_out:
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * The current irda-usb device is removed, the USB layer tell us
+ * to shut it down...
+ * One of the constraints is that when we exit this function,
+ * we cannot use the usb_device no more. Gone. Destroyed. kfree().
+ * Most other subsystem allow you to destroy the instance at a time
+ * when it's convenient to you, to postpone it to a later date, but
+ * not the USB subsystem.
+ * So, we must make bloody sure that everything gets deactivated.
+ * Jean II
+ */
+static void irda_usb_disconnect(struct usb_interface *intf)
+{
+ unsigned long flags;
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ usb_set_intfdata(intf, NULL);
+ if (!self)
+ return;
+
+ /* Make sure that the Tx path is not executing. - Jean II */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Oups ! We are not there any more.
+ * This will stop/desactivate the Tx path. - Jean II */
+ self->present = 0;
+
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
+ /* We need to have irq enabled to unlink the URBs. That's OK,
+ * at this point the Tx path is gone - Jean II */
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Hum... Check if networking is still active (avoid races) */
+ if((self->netopen) || (self->irlap)) {
+ /* Accept no more transmissions */
+ /*netif_device_detach(self->netdev);*/
+ netif_stop_queue(self->netdev);
+ /* Stop all the receive URBs. Must be synchronous. */
+ for (i = 0; i < self->max_rx_urb; i++)
+ usb_kill_urb(self->rx_urb[i]);
+ /* Cancel Tx and speed URB.
+ * Make sure it's synchronous to avoid races. */
+ usb_kill_urb(self->tx_urb);
+ usb_kill_urb(self->speed_urb);
+ }
+
+ /* Cleanup the device stuff */
+ irda_usb_close(self);
+ /* No longer attached to USB bus */
+ self->usbdev = NULL;
+ self->usbintf = NULL;
+
+ /* Clean up our urbs */
+ for (i = 0; i < self->max_rx_urb; i++)
+ usb_free_urb(self->rx_urb[i]);
+ kfree(self->rx_urb);
+ /* Clean up Tx and speed URB */
+ usb_free_urb(self->tx_urb);
+ usb_free_urb(self->speed_urb);
+
+ /* Free self and network device */
+ free_netdev(self->netdev);
+ pr_debug("%s(), USB IrDA Disconnected\n", __func__);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int irda_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ netif_device_detach(self->netdev);
+
+ if (self->tx_urb != NULL)
+ usb_kill_urb(self->tx_urb);
+ if (self->speed_urb != NULL)
+ usb_kill_urb(self->speed_urb);
+ for (i = 0; i < self->max_rx_urb; i++) {
+ if (self->rx_urb[i] != NULL)
+ usb_kill_urb(self->rx_urb[i]);
+ }
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int irda_usb_resume(struct usb_interface *intf)
+{
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ for (i = 0; i < self->max_rx_urb; i++) {
+ if (self->rx_urb[i] != NULL)
+ usb_submit_urb(self->rx_urb[i], GFP_KERNEL);
+ }
+
+ netif_device_attach(self->netdev);
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "irda-usb",
+ .probe = irda_usb_probe,
+ .disconnect = irda_usb_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = irda_usb_suspend,
+ .resume = irda_usb_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+/*
+ * Module parameters
+ */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+MODULE_AUTHOR("Roman Weissgaerber <weissg@vienna.at>, Dag Brattli <dag@brattli.net>, Jean Tourrilhes <jt@hpl.hp.com> and Nick Fedchik <nick@fedchik.org.ua>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/irda-usb.h b/drivers/staging/irda/drivers/irda-usb.h
new file mode 100644
index 000000000000..8ac389fa9348
--- /dev/null
+++ b/drivers/staging/irda/drivers/irda-usb.h
@@ -0,0 +1,174 @@
+/*****************************************************************************
+ *
+ * Filename: irda-usb.h
+ * Version: 0.10
+ * Description: IrDA-USB Driver
+ * Status: Experimental
+ * Author: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (C) 2001, Roman Weissgaerber <weissg@vienna.at>
+ * Copyright (C) 2000, Dag Brattli <dag@brattli.net>
+ * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+ * Copyright (C) 2004, SigmaTel, Inc. <irquality@sigmatel.com>
+ * Copyright (C) 2005, Milan Beno <beno@pobox.sk>
+ * Copyright (C) 2006, Nick FEdchik <nick@fedchik.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include <linux/ktime.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> /* struct irlap_cb */
+
+#define RX_COPY_THRESHOLD 200
+#define IRDA_USB_MAX_MTU 2051
+#define IRDA_USB_SPEED_MTU 64 /* Weird, but work like this */
+
+/* Maximum number of active URB on the Rx path
+ * This is the amount of buffers the we keep between the USB harware and the
+ * IrDA stack.
+ *
+ * Note : the network layer does also queue the packets between us and the
+ * IrDA stack, and is actually pretty fast and efficient in doing that.
+ * Therefore, we don't need to have a large number of URBs, and we can
+ * perfectly live happy with only one. We certainly don't need to keep the
+ * full IrTTP window around here...
+ * I repeat for those who have trouble to understand : 1 URB is plenty
+ * good enough to handle back-to-back (brickwalled) frames. I tried it,
+ * it works (it's the hardware that has trouble doing it).
+ *
+ * Having 2 URBs would allow the USB stack to process one URB while we take
+ * care of the other and then swap the URBs...
+ * On the other hand, increasing the number of URB will have penalities
+ * in term of latency and will interact with the link management in IrLAP...
+ * Jean II */
+#define IU_MAX_ACTIVE_RX_URBS 1 /* Don't touch !!! */
+
+/* When a Rx URB is passed back to us, we can't reuse it immediately,
+ * because it may still be referenced by the USB layer. Therefore we
+ * need to keep one extra URB in the Rx path.
+ * Jean II */
+#define IU_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + 1)
+
+/* Various ugly stuff to try to workaround generic problems */
+/* Send speed command in case of timeout, just for trying to get things sane */
+#define IU_BUG_KICK_TIMEOUT
+/* Show the USB class descriptor */
+#undef IU_DUMP_CLASS_DESC
+/* Assume a minimum round trip latency for USB transfer (in us)...
+ * USB transfer are done in the next USB slot if there is no traffic
+ * (1/19 msec) and is done at 12 Mb/s :
+ * Waiting for slot + tx = (53us + 16us) * 2 = 137us minimum.
+ * Rx notification will only be done at the end of the USB frame period :
+ * OHCI : frame period = 1ms
+ * UHCI : frame period = 1ms, but notification can take 2 or 3 ms :-(
+ * EHCI : frame period = 125us */
+#define IU_USB_MIN_RTT 500 /* This should be safe in most cases */
+
+/* Inbound header */
+#define MEDIA_BUSY 0x80
+
+#define SPEED_2400 0x01
+#define SPEED_9600 0x02
+#define SPEED_19200 0x03
+#define SPEED_38400 0x04
+#define SPEED_57600 0x05
+#define SPEED_115200 0x06
+#define SPEED_576000 0x07
+#define SPEED_1152000 0x08
+#define SPEED_4000000 0x09
+#define SPEED_16000000 0x0a
+
+/* Basic capabilities */
+#define IUC_DEFAULT 0x00 /* Basic device compliant with 1.0 spec */
+/* Main bugs */
+#define IUC_SPEED_BUG 0x01 /* Device doesn't set speed after the frame */
+#define IUC_NO_WINDOW 0x02 /* Device doesn't behave with big Rx window */
+#define IUC_NO_TURN 0x04 /* Device doesn't do turnaround by itself */
+/* Not currently used */
+#define IUC_SIR_ONLY 0x08 /* Device doesn't behave at FIR speeds */
+#define IUC_SMALL_PKT 0x10 /* Device doesn't behave with big Rx packets */
+#define IUC_MAX_WINDOW 0x20 /* Device underestimate the Rx window */
+#define IUC_MAX_XBOFS 0x40 /* Device need more xbofs than advertised */
+#define IUC_STIR421X 0x80 /* SigmaTel 4210/4220/4116 VFIR */
+
+/* USB class definitions */
+#define USB_IRDA_HEADER 0x01
+#define USB_CLASS_IRDA 0x02 /* USB_CLASS_APP_SPEC subclass */
+#define USB_DT_IRDA 0x21
+#define USB_IRDA_STIR421X_HEADER 0x03
+#define IU_SIGMATEL_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + \
+ USB_IRDA_STIR421X_HEADER)
+
+struct irda_class_desc {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __le16 bcdSpecRevision;
+ __u8 bmDataSize;
+ __u8 bmWindowSize;
+ __u8 bmMinTurnaroundTime;
+ __le16 wBaudRate;
+ __u8 bmAdditionalBOFs;
+ __u8 bIrdaRateSniff;
+ __u8 bMaxUnicastList;
+} __packed;
+
+/* class specific interface request to get the IrDA-USB class descriptor
+ * (6.2.5, USB-IrDA class spec 1.0) */
+
+#define IU_REQ_GET_CLASS_DESC 0x06
+#define STIR421X_MAX_PATCH_DOWNLOAD_SIZE 1023
+
+struct irda_usb_cb {
+ struct irda_class_desc *irda_desc;
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct usb_interface *usbintf; /* init: probe_irda */
+ int netopen; /* Device is active for network */
+ int present; /* Device is present on the bus */
+ __u32 capability; /* Capability of the hardware */
+ __u8 bulk_in_ep; /* Rx Endpoint assignments */
+ __u8 bulk_out_ep; /* Tx Endpoint assignments */
+ __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
+ __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
+
+ __u8 max_rx_urb;
+ struct urb **rx_urb; /* URBs used to receive data frames */
+ struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
+ struct urb *tx_urb; /* URB used to send data frames */
+ struct urb *speed_urb; /* URB used to send speed commands */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdev. */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos;
+ char *speed_buff; /* Buffer for speed changes */
+ char *tx_buff;
+
+ ktime_t stamp;
+
+ spinlock_t lock; /* For serializing Tx operations */
+
+ __u16 xbofs; /* Current xbofs setting */
+ __s16 new_xbofs; /* xbofs we need to set */
+ __u32 speed; /* Current speed */
+ __s32 new_speed; /* speed we need to set */
+
+ __u8 header_length; /* USB-IrDA frame header size */
+ int needspatch; /* device needs firmware patch */
+
+ struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
+};
+
diff --git a/drivers/staging/irda/drivers/irtty-sir.c b/drivers/staging/irda/drivers/irtty-sir.c
new file mode 100644
index 000000000000..7a20a9a4663a
--- /dev/null
+++ b/drivers/staging/irda/drivers/irtty-sir.c
@@ -0,0 +1,570 @@
+/*********************************************************************
+ *
+ * Filename: irtty-sir.c
+ * Version: 2.0
+ * Description: IrDA line discipline implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:18:38 1997
+ * Modified at: Sun Oct 27 22:13:30 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ * Sources: slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+#include "irtty-sir.h"
+
+static int qos_mtt_bits = 0x03; /* 5 ms or more */
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+/* ------------------------------------------------------- */
+
+/* device configuration callbacks always invoked with irda-thread context */
+
+/* find out, how many chars we have in buffers below us
+ * this is allowed to lie, i.e. return less chars than we
+ * actually have. The returned value is used to determine
+ * how long the irdathread should wait before doing the
+ * real blocking wait_until_sent()
+ */
+
+static int irtty_chars_in_buffer(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv = dev->priv;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ return tty_chars_in_buffer(priv->tty);
+}
+
+/* Wait (sleep) until underlaying hardware finished transmission
+ * i.e. hardware buffers are drained
+ * this must block and not return before all characters are really sent
+ *
+ * If the tty sits on top of a 16550A-like uart, there are typically
+ * up to 16 bytes in the fifo - f.e. 9600 bps 8N1 needs 16.7 msec
+ *
+ * With usbserial the uart-fifo is basically replaced by the converter's
+ * outgoing endpoint buffer, which can usually hold 64 bytes (at least).
+ * With pl2303 it appears we are safe with 60msec here.
+ *
+ * I really wish all serial drivers would provide
+ * correct implementation of wait_until_sent()
+ */
+
+#define USBSERIAL_TX_DONE_DELAY 60
+
+static void irtty_wait_until_sent(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ tty = priv->tty;
+ if (tty->ops->wait_until_sent) {
+ tty->ops->wait_until_sent(tty, msecs_to_jiffies(100));
+ }
+ else {
+ msleep(USBSERIAL_TX_DONE_DELAY);
+ }
+}
+
+/*
+ * Function irtty_change_speed (dev, speed)
+ *
+ * Change the speed of the serial port.
+ *
+ * This may sleep in set_termios (usbserial driver f.e.) and must
+ * not be called from interrupt/timer/tasklet therefore.
+ * All such invocations are deferred to kIrDAd now so we can sleep there.
+ */
+
+static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+ struct ktermios old_termios;
+ int cflag;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ tty = priv->tty;
+
+ down_write(&tty->termios_rwsem);
+ old_termios = tty->termios;
+ cflag = tty->termios.c_cflag;
+ tty_encode_baud_rate(tty, speed, speed);
+ if (tty->ops->set_termios)
+ tty->ops->set_termios(tty, &old_termios);
+ priv->io.speed = speed;
+ up_write(&tty->termios_rwsem);
+
+ return 0;
+}
+
+/*
+ * Function irtty_set_dtr_rts (dev, dtr, rts)
+ *
+ * This function can be used by dongles etc. to set or reset the status
+ * of the dtr and rts lines
+ */
+
+static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ struct sirtty_cb *priv = dev->priv;
+ int set = 0;
+ int clear = 0;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ if (rts)
+ set |= TIOCM_RTS;
+ else
+ clear |= TIOCM_RTS;
+ if (dtr)
+ set |= TIOCM_DTR;
+ else
+ clear |= TIOCM_DTR;
+
+ /*
+ * We can't use ioctl() because it expects a non-null file structure,
+ * and we don't have that here.
+ * This function is not yet defined for all tty driver, so
+ * let's be careful... Jean II
+ */
+ IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;);
+ priv->tty->ops->tiocmset(priv->tty, set, clear);
+
+ return 0;
+}
+
+/* ------------------------------------------------------- */
+
+/* called from sir_dev when there is more data to send
+ * context is either netdev->hard_xmit or some transmit-completion bh
+ * i.e. we are under spinlock here and must not sleep.
+ */
+
+static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t len)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+ int writelen;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ tty = priv->tty;
+ if (!tty->ops->write)
+ return 0;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ writelen = tty_write_room(tty);
+ if (writelen > len)
+ writelen = len;
+ return tty->ops->write(tty, ptr, writelen);
+}
+
+/* ------------------------------------------------------- */
+
+/* irda line discipline callbacks */
+
+/*
+ * Function irtty_receive_buf( tty, cp, count)
+ *
+ * Handle the 'receiver data ready' interrupt. This function is called
+ * by the 'tty_io' module in the kernel when a block of IrDA data has
+ * been received, which can now be decapsulated and delivered for
+ * further processing
+ *
+ * calling context depends on underlying driver and tty->port->low_latency!
+ * for example (low_latency: 1 / 0):
+ * serial.c: uart-interrupt / softint
+ * usbserial: urb-complete-interrupt / softint
+ */
+
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct sir_dev *dev;
+ struct sirtty_cb *priv = tty->disc_data;
+ int i;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ if (unlikely(count==0)) /* yes, this happens */
+ return;
+
+ dev = priv->dev;
+ if (!dev) {
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ /*
+ * Characters received with a parity error, etc?
+ */
+ if (fp && *fp++) {
+ pr_debug("Framing or parity error!\n");
+ sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
+ return;
+ }
+ }
+
+ sirdev_receive(dev, cp, count);
+}
+
+/*
+ * Function irtty_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ */
+static void irtty_write_wakeup(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (priv->dev)
+ sirdev_write_complete(priv->dev);
+}
+
+/* ------------------------------------------------------- */
+
+/*
+ * Function irtty_stop_receiver (tty, stop)
+ *
+ */
+
+static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
+{
+ struct ktermios old_termios;
+ int cflag;
+
+ down_write(&tty->termios_rwsem);
+ old_termios = tty->termios;
+ cflag = tty->termios.c_cflag;
+
+ if (stop)
+ cflag &= ~CREAD;
+ else
+ cflag |= CREAD;
+
+ tty->termios.c_cflag = cflag;
+ if (tty->ops->set_termios)
+ tty->ops->set_termios(tty, &old_termios);
+ up_write(&tty->termios_rwsem);
+}
+
+/*****************************************************************/
+
+/* serialize ldisc open/close with sir_dev */
+static DEFINE_MUTEX(irtty_mutex);
+
+/* notifier from sir_dev when irda% device gets opened (ifup) */
+
+static int irtty_start_dev(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv;
+ struct tty_struct *tty;
+
+ /* serialize with ldisc open/close */
+ mutex_lock(&irtty_mutex);
+
+ priv = dev->priv;
+ if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
+ mutex_unlock(&irtty_mutex);
+ return -ESTALE;
+ }
+
+ tty = priv->tty;
+
+ if (tty->ops->start)
+ tty->ops->start(tty);
+ /* Make sure we can receive more data */
+ irtty_stop_receiver(tty, FALSE);
+
+ mutex_unlock(&irtty_mutex);
+ return 0;
+}
+
+/* notifier from sir_dev when irda% device gets closed (ifdown) */
+
+static int irtty_stop_dev(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv;
+ struct tty_struct *tty;
+
+ /* serialize with ldisc open/close */
+ mutex_lock(&irtty_mutex);
+
+ priv = dev->priv;
+ if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
+ mutex_unlock(&irtty_mutex);
+ return -ESTALE;
+ }
+
+ tty = priv->tty;
+
+ /* Make sure we don't receive more data */
+ irtty_stop_receiver(tty, TRUE);
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+
+ mutex_unlock(&irtty_mutex);
+
+ return 0;
+}
+
+/* ------------------------------------------------------- */
+
+static struct sir_driver sir_tty_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = "sir_tty",
+ .start_dev = irtty_start_dev,
+ .stop_dev = irtty_stop_dev,
+ .do_write = irtty_do_write,
+ .chars_in_buffer = irtty_chars_in_buffer,
+ .wait_until_sent = irtty_wait_until_sent,
+ .set_speed = irtty_change_speed,
+ .set_dtr_rts = irtty_set_dtr_rts,
+};
+
+/* ------------------------------------------------------- */
+
+/*
+ * Function irtty_ioctl (tty, file, cmd, arg)
+ *
+ * The Swiss army knife of system calls :-)
+ *
+ */
+static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct irtty_info { char name[6]; } info;
+ struct sir_dev *dev;
+ struct sirtty_cb *priv = tty->disc_data;
+ int err = 0;
+
+ IRDA_ASSERT(priv != NULL, return -ENODEV;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
+
+ pr_debug("%s(cmd=0x%X)\n", __func__, cmd);
+
+ dev = priv->dev;
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ switch (cmd) {
+ case IRTTY_IOCTDONGLE:
+ /* this call blocks for completion */
+ err = sirdev_set_dongle(dev, (IRDA_DONGLE) arg);
+ break;
+
+ case IRTTY_IOCGET:
+ IRDA_ASSERT(dev->netdev != NULL, return -1;);
+
+ memset(&info, 0, sizeof(info));
+ strncpy(info.name, dev->netdev->name, sizeof(info.name)-1);
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ err = -EFAULT;
+ break;
+ default:
+ err = tty_mode_ioctl(tty, file, cmd, arg);
+ break;
+ }
+ return err;
+}
+
+
+/*
+ * Function irtty_open(tty)
+ *
+ * This function is called by the TTY module when the IrDA line
+ * discipline is called for. Because we are sure the tty line exists,
+ * we only have to link it to a free IrDA channel.
+ */
+static int irtty_open(struct tty_struct *tty)
+{
+ struct sir_dev *dev;
+ struct sirtty_cb *priv;
+ int ret = 0;
+
+ /* Module stuff handled via irda_ldisc.owner - Jean II */
+
+ /* stop the underlying driver */
+ irtty_stop_receiver(tty, TRUE);
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+
+ tty_driver_flush_buffer(tty);
+
+ /* apply mtt override */
+ sir_tty_drv.qos_mtt_bits = qos_mtt_bits;
+
+ /* get a sir device instance for this driver */
+ dev = sirdev_get_instance(&sir_tty_drv, tty->name);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* allocate private device info block */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ priv->magic = IRTTY_MAGIC;
+ priv->tty = tty;
+ priv->dev = dev;
+
+ /* serialize with start_dev - in case we were racing with ifup */
+ mutex_lock(&irtty_mutex);
+
+ dev->priv = priv;
+ tty->disc_data = priv;
+ tty->receive_room = 65536;
+
+ mutex_unlock(&irtty_mutex);
+
+ pr_debug("%s - %s: irda line discipline opened\n", __func__, tty->name);
+
+ return 0;
+
+out_put:
+ sirdev_put_instance(dev);
+out:
+ return ret;
+}
+
+/*
+ * Function irtty_close (tty)
+ *
+ * Close down a IrDA channel. This means flushing out any pending queues,
+ * and then restoring the TTY line discipline to what it was before it got
+ * hooked to IrDA (which usually is TTY again).
+ */
+static void irtty_close(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ /* Hm, with a dongle attached the dongle driver wants
+ * to close the dongle - which requires the use of
+ * some tty write and/or termios or ioctl operations.
+ * Are we allowed to call those when already requested
+ * to shutdown the ldisc?
+ * If not, we should somehow mark the dev being staled.
+ * Question remains, how to close the dongle in this case...
+ * For now let's assume we are granted to issue tty driver calls
+ * until we return here from the ldisc close. I'm just wondering
+ * how this behaves with hotpluggable serial hardware like
+ * rs232-pcmcia card or usb-serial...
+ *
+ * priv->tty = NULL?;
+ */
+
+ /* we are dead now */
+ tty->disc_data = NULL;
+
+ sirdev_put_instance(priv->dev);
+
+ /* Stop tty */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+
+ kfree(priv);
+
+ pr_debug("%s - %s: irda line discipline closed\n", __func__, tty->name);
+}
+
+/* ------------------------------------------------------- */
+
+static struct tty_ldisc_ops irda_ldisc = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "irda",
+ .flags = 0,
+ .open = irtty_open,
+ .close = irtty_close,
+ .read = NULL,
+ .write = NULL,
+ .ioctl = irtty_ioctl,
+ .poll = NULL,
+ .receive_buf = irtty_receive_buf,
+ .write_wakeup = irtty_write_wakeup,
+ .owner = THIS_MODULE,
+};
+
+/* ------------------------------------------------------- */
+
+static int __init irtty_sir_init(void)
+{
+ int err;
+
+ if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0)
+ net_err_ratelimited("IrDA: can't register line discipline (err = %d)\n",
+ err);
+ return err;
+}
+
+static void __exit irtty_sir_cleanup(void)
+{
+ int err;
+
+ if ((err = tty_unregister_ldisc(N_IRDA))) {
+ net_err_ratelimited("%s(), can't unregister line discipline (err = %d)\n",
+ __func__, err);
+ }
+}
+
+module_init(irtty_sir_init);
+module_exit(irtty_sir_cleanup);
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("IrDA TTY device driver");
+MODULE_ALIAS_LDISC(N_IRDA);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/irda/drivers/irtty-sir.h b/drivers/staging/irda/drivers/irtty-sir.h
new file mode 100644
index 000000000000..b132d8f6eb13
--- /dev/null
+++ b/drivers/staging/irda/drivers/irtty-sir.h
@@ -0,0 +1,34 @@
+/*********************************************************************
+ *
+ * sir_tty.h: definitions for the irtty_sir client driver (former irtty)
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef IRTTYSIR_H
+#define IRTTYSIR_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> // chipio_t
+
+#define IRTTY_IOC_MAGIC 'e'
+#define IRTTY_IOCTDONGLE _IO(IRTTY_IOC_MAGIC, 1)
+#define IRTTY_IOCGET _IOR(IRTTY_IOC_MAGIC, 2, struct irtty_info)
+#define IRTTY_IOC_MAXNR 2
+
+struct sirtty_cb {
+ magic_t magic;
+
+ struct sir_dev *dev;
+ struct tty_struct *tty;
+
+ chipio_t io; /* IrDA controller information */
+};
+
+#endif
diff --git a/drivers/staging/irda/drivers/kingsun-sir.c b/drivers/staging/irda/drivers/kingsun-sir.c
new file mode 100644
index 000000000000..4fd4ac2fe09f
--- /dev/null
+++ b/drivers/staging/irda/drivers/kingsun-sir.c
@@ -0,0 +1,634 @@
+/*****************************************************************************
+*
+* Filename: kingsun-sir.c
+* Version: 0.1.1
+* Description: Irda KingSun/DonShine USB Dongle
+* Status: Experimental
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
+*
+* Based on stir4200 and mcs7780 drivers, with (strange?) differences
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * This is my current (2007-04-25) understanding of how this dongle is supposed
+ * to work. This is based on reverse-engineering and examination of the packet
+ * data sent and received by the WinXP driver using USBSnoopy. Feel free to
+ * update here as more of this dongle is known:
+ *
+ * General: Unlike the other USB IrDA dongles, this particular dongle exposes,
+ * not two bulk (in and out) endpoints, but two *interrupt* ones. This dongle,
+ * like the bulk based ones (stir4200.c and mcs7780.c), requires polling in
+ * order to receive data.
+ * Transmission: Just like stir4200, this dongle uses a raw stream of data,
+ * which needs to be wrapped and escaped in a similar way as in stir4200.c.
+ * Reception: Poll-based, as in stir4200. Each read returns the contents of a
+ * 8-byte buffer, of which the first byte (LSB) indicates the number of bytes
+ * (1-7) of valid data contained within the remaining 7 bytes. For example, if
+ * the buffer had the following contents:
+ * 06 ff ff ff c0 01 04 aa
+ * This means that (06) there are 6 bytes of valid data. The byte 0xaa at the
+ * end is garbage (left over from a previous reception) and is discarded.
+ * If a read returns an "impossible" value as the length of valid data (such as
+ * 0x36) in the first byte, then the buffer is uninitialized (as is the case of
+ * first plug-in) and its contents should be discarded. There is currently no
+ * evidence that the top 5 bits of the 1st byte of the buffer can have values
+ * other than 0 once reception begins.
+ * Once valid bytes are collected, the assembled stream is a sequence of
+ * wrapped IrDA frames that is unwrapped and unescaped as in stir4200.c.
+ * BIG FAT WARNING: the dongle does *not* reset the RX buffer in any way after
+ * a successful read from the host, which means that in absence of further
+ * reception, repeated reads from the dongle will return the exact same
+ * contents repeatedly. Attempts to be smart and cache a previous read seem
+ * to result in corrupted packets, so this driver depends on the unwrap logic
+ * to sort out any repeated reads.
+ * Speed change: no commands observed so far to change speed, assumed fixed
+ * 9600bps (SIR).
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+/*
+ * According to lsusb, 0x07c0 is assigned to
+ * "Code Mercenaries Hard- und Software GmbH"
+ */
+#define KING_VENDOR_ID 0x07c0
+#define KING_PRODUCT_ID 0x4200
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* KingSun Co,Ltd IrDA/USB Bridge */
+ { USB_DEVICE(KING_VENDOR_ID, KING_PRODUCT_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+#define KINGSUN_MTT 0x07
+
+#define KINGSUN_FIFO_SIZE 4096
+#define KINGSUN_EP_IN 0
+#define KINGSUN_EP_OUT 1
+
+struct kingsun_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+
+ __u8 *in_buf; /* receive buffer */
+ __u8 *out_buf; /* transmit buffer */
+ __u8 max_rx; /* max. atomic read from dongle
+ (usually 8), also size of in_buf */
+ __u8 max_tx; /* max. atomic write to dongle
+ (usually 8) */
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ spinlock_t lock;
+ int receiving;
+
+ __u8 ep_in;
+ __u8 ep_out;
+
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+};
+
+/* Callback transmission routine */
+static void kingsun_send_irq(struct urb *urb)
+{
+ struct kingsun_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_send_irq: Network not running!\n");
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_send_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ }
+ netif_wake_queue(netdev);
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t kingsun_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kingsun_cb *kingsun;
+ int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ kingsun = netdev_priv(netdev);
+
+ spin_lock(&kingsun->lock);
+
+ /* Append data to the end of whatever data remains to be transmitted */
+ wraplen = async_wrap_skb(skb,
+ kingsun->out_buf,
+ KINGSUN_FIFO_SIZE);
+
+ /* Calculate how much data can be transmitted in this urb */
+ usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
+ usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
+ kingsun->out_buf, wraplen, kingsun_send_irq,
+ kingsun, 1);
+
+ if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_hard_xmit: failed tx_urb submit: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock(&kingsun->lock);
+
+ return NETDEV_TX_OK;
+}
+
+/* Receive callback function */
+static void kingsun_rcv_irq(struct urb *urb)
+{
+ struct kingsun_cb *kingsun = urb->context;
+ int ret;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ kingsun->receiving = 0;
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_rcv_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ kingsun->receiving = 0;
+ return;
+ }
+
+ if (urb->actual_length == kingsun->max_rx) {
+ __u8 *bytes = urb->transfer_buffer;
+ int i;
+
+ /* The very first byte in the buffer indicates the length of
+ valid data in the read. This byte must be in the range
+ 1..kingsun->max_rx -1 . Values outside this range indicate
+ an uninitialized Rx buffer when the dongle has just been
+ plugged in. */
+ if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
+ for (i = 1; i <= bytes[0]; i++) {
+ async_unwrap_char(kingsun->netdev,
+ &kingsun->netdev->stats,
+ &kingsun->rx_buff, bytes[i]);
+ }
+ kingsun->receiving =
+ (kingsun->rx_buff.state != OUTSIDE_FRAME)
+ ? 1 : 0;
+ }
+ } else if (urb->actual_length > 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "%s(): Unexpected response length, expected %d got %d\n",
+ __func__, kingsun->max_rx, urb->actual_length);
+ }
+ /* This urb has already been filled in kingsun_net_open */
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * Function kingsun_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int kingsun_net_open(struct net_device *netdev)
+{
+ struct kingsun_cb *kingsun = netdev_priv(netdev);
+ int err = -ENOMEM;
+ char hwname[16];
+
+ /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
+ kingsun->receiving = 0;
+
+ /* Initialize for SIR to copy data directly into skb. */
+ kingsun->rx_buff.in_frame = FALSE;
+ kingsun->rx_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!kingsun->rx_buff.skb)
+ goto free_mem;
+
+ skb_reserve(kingsun->rx_buff.skb, 1);
+ kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
+
+ kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->rx_urb)
+ goto free_mem;
+
+ kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->tx_urb)
+ goto free_mem;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ */
+ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
+ kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
+ if (!kingsun->irlap) {
+ dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
+ goto free_mem;
+ }
+
+ /* Start first reception */
+ usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
+ usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
+ kingsun->in_buf, kingsun->max_rx,
+ kingsun_rcv_irq, kingsun, 1);
+ kingsun->rx_urb->status = 0;
+ err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ if (err) {
+ dev_err(&kingsun->usbdev->dev,
+ "first urb-submit failed: %d\n", err);
+ goto close_irlap;
+ }
+
+ netif_start_queue(netdev);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs allocated and ready to fill
+ - max rx packet known (in max_rx)
+ - unwrap state machine initialized, in state outside of any frame
+ - receive request in progress
+ - IrLAP layer started, about to hand over packets to send
+ */
+
+ return 0;
+
+ close_irlap:
+ irlap_close(kingsun->irlap);
+ free_mem:
+ if (kingsun->tx_urb) {
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ }
+ if (kingsun->rx_urb) {
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ }
+ if (kingsun->rx_buff.skb) {
+ kfree_skb(kingsun->rx_buff.skb);
+ kingsun->rx_buff.skb = NULL;
+ kingsun->rx_buff.head = NULL;
+ }
+ return err;
+}
+
+/*
+ * Function kingsun_net_close (kingsun)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int kingsun_net_close(struct net_device *netdev)
+{
+ struct kingsun_cb *kingsun = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->tx_urb);
+ usb_kill_urb(kingsun->rx_urb);
+
+ usb_free_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->rx_urb);
+
+ kingsun->tx_urb = NULL;
+ kingsun->rx_urb = NULL;
+
+ kfree_skb(kingsun->rx_buff.skb);
+ kingsun->rx_buff.skb = NULL;
+ kingsun->rx_buff.head = NULL;
+ kingsun->rx_buff.in_frame = FALSE;
+ kingsun->rx_buff.state = OUTSIDE_FRAME;
+ kingsun->receiving = 0;
+
+ /* Stop and remove instance of IrLAP */
+ if (kingsun->irlap)
+ irlap_close(kingsun->irlap);
+
+ kingsun->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int kingsun_net_ioctl(struct net_device *netdev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct kingsun_cb *kingsun = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(kingsun->netdev))
+ /* No observed commands for speed change */
+ ret = -EOPNOTSUPP;
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(kingsun->netdev))
+ irda_device_set_media_busy(kingsun->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = kingsun->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops kingsun_ops = {
+ .ndo_start_xmit = kingsun_hard_xmit,
+ .ndo_open = kingsun_net_open,
+ .ndo_stop = kingsun_net_close,
+ .ndo_do_ioctl = kingsun_net_ioctl,
+};
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ */
+static int kingsun_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *interface;
+ struct usb_endpoint_descriptor *endpoint;
+
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct kingsun_cb *kingsun = NULL;
+ struct net_device *net = NULL;
+ int ret = -ENOMEM;
+ int pipe, maxp_in, maxp_out;
+ __u8 ep_in;
+ __u8 ep_out;
+
+ /* Check that there really are two interrupt endpoints.
+ Check based on the one in drivers/usb/input/usbmouse.c
+ */
+ interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints != 2) {
+ dev_err(&intf->dev,
+ "kingsun-sir: expected 2 endpoints, found %d\n",
+ interface->desc.bNumEndpoints);
+ return -ENODEV;
+ }
+ endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
+ if (!usb_endpoint_is_int_in(endpoint)) {
+ dev_err(&intf->dev,
+ "kingsun-sir: endpoint 0 is not interrupt IN\n");
+ return -ENODEV;
+ }
+
+ ep_in = endpoint->bEndpointAddress;
+ pipe = usb_rcvintpipe(dev, ep_in);
+ maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ if (maxp_in > 255 || maxp_in <= 1) {
+ dev_err(&intf->dev,
+ "endpoint 0 has max packet size %d not in range\n",
+ maxp_in);
+ return -ENODEV;
+ }
+
+ endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
+ if (!usb_endpoint_is_int_out(endpoint)) {
+ dev_err(&intf->dev,
+ "kingsun-sir: endpoint 1 is not interrupt OUT\n");
+ return -ENODEV;
+ }
+
+ ep_out = endpoint->bEndpointAddress;
+ pipe = usb_sndintpipe(dev, ep_out);
+ maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*kingsun));
+ if(!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ kingsun = netdev_priv(net);
+ kingsun->irlap = NULL;
+ kingsun->tx_urb = NULL;
+ kingsun->rx_urb = NULL;
+ kingsun->ep_in = ep_in;
+ kingsun->ep_out = ep_out;
+ kingsun->in_buf = NULL;
+ kingsun->out_buf = NULL;
+ kingsun->max_rx = (__u8)maxp_in;
+ kingsun->max_tx = (__u8)maxp_out;
+ kingsun->netdev = net;
+ kingsun->usbdev = dev;
+ kingsun->rx_buff.in_frame = FALSE;
+ kingsun->rx_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_buff.skb = NULL;
+ kingsun->receiving = 0;
+ spin_lock_init(&kingsun->lock);
+
+ /* Allocate input buffer */
+ kingsun->in_buf = kmalloc(kingsun->max_rx, GFP_KERNEL);
+ if (!kingsun->in_buf)
+ goto free_mem;
+
+ /* Allocate output buffer */
+ kingsun->out_buf = kmalloc(KINGSUN_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->out_buf)
+ goto free_mem;
+
+ printk(KERN_INFO "KingSun/DonShine IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&kingsun->qos);
+
+ /* That's the Rx capability. */
+ kingsun->qos.baud_rate.bits &= IR_9600;
+ kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
+ irda_qos_bits_to_value(&kingsun->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &kingsun_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto free_mem;
+
+ dev_info(&net->dev, "IrDA: Registered KingSun/DonShine device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, kingsun);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs not allocated, set to NULL
+ - max rx packet known (in max_rx)
+ - unwrap state machine (partially) initialized, but skb == NULL
+ */
+
+ return 0;
+
+free_mem:
+ kfree(kingsun->out_buf);
+ kfree(kingsun->in_buf);
+ free_netdev(net);
+err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void kingsun_disconnect(struct usb_interface *intf)
+{
+ struct kingsun_cb *kingsun = usb_get_intfdata(intf);
+
+ if (!kingsun)
+ return;
+
+ unregister_netdev(kingsun->netdev);
+
+ /* Mop up receive && transmit urb's */
+ if (kingsun->tx_urb != NULL) {
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ }
+ if (kingsun->rx_urb != NULL) {
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ }
+
+ kfree(kingsun->out_buf);
+ kfree(kingsun->in_buf);
+ free_netdev(kingsun->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int kingsun_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct kingsun_cb *kingsun = usb_get_intfdata(intf);
+
+ netif_device_detach(kingsun->netdev);
+ if (kingsun->tx_urb != NULL) usb_kill_urb(kingsun->tx_urb);
+ if (kingsun->rx_urb != NULL) usb_kill_urb(kingsun->rx_urb);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int kingsun_resume(struct usb_interface *intf)
+{
+ struct kingsun_cb *kingsun = usb_get_intfdata(intf);
+
+ if (kingsun->rx_urb != NULL)
+ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ netif_device_attach(kingsun->netdev);
+
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "kingsun-sir",
+ .probe = kingsun_probe,
+ .disconnect = kingsun_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = kingsun_suspend,
+ .resume = kingsun_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/ks959-sir.c b/drivers/staging/irda/drivers/ks959-sir.c
new file mode 100644
index 000000000000..8025741e7586
--- /dev/null
+++ b/drivers/staging/irda/drivers/ks959-sir.c
@@ -0,0 +1,912 @@
+/*****************************************************************************
+*
+* Filename: ks959-sir.c
+* Version: 0.1.2
+* Description: Irda KingSun KS-959 USB Dongle
+* Status: Experimental
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
+* with help from Domen Puncer <domen@coderock.org>
+*
+* Based on stir4200, mcs7780, kingsun-sir drivers.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * Following is my most current (2007-07-17) understanding of how the Kingsun
+ * KS-959 dongle is supposed to work. This information was deduced by
+ * reverse-engineering and examining the USB traffic captured with USBSnoopy
+ * from the WinXP driver. Feel free to update here as more of the dongle is
+ * known.
+ *
+ * My most sincere thanks must go to Domen Puncer <domen@coderock.org> for
+ * invaluable help in cracking the obfuscation and padding required for this
+ * dongle.
+ *
+ * General: This dongle exposes one interface with one interrupt IN endpoint.
+ * However, the interrupt endpoint is NOT used at all for this dongle. Instead,
+ * this dongle uses control transfers for everything, including sending and
+ * receiving the IrDA frame data. Apparently the interrupt endpoint is just a
+ * dummy to ensure the dongle has a valid interface to present to the PC.And I
+ * thought the DonShine dongle was weird... In addition, this dongle uses
+ * obfuscation (?!?!), applied at the USB level, to hide the traffic, both sent
+ * and received, from the dongle. I call it obfuscation because the XOR keying
+ * and padding required to produce an USB traffic acceptable for the dongle can
+ * not be explained by any other technical requirement.
+ *
+ * Transmission: To transmit an IrDA frame, the driver must prepare a control
+ * URB with the following as a setup packet:
+ * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x09
+ * wValue <length of valid data before padding, little endian>
+ * wIndex 0x0000
+ * wLength <length of padded data>
+ * The payload packet must be manually wrapped and escaped (as in stir4200.c),
+ * then padded and obfuscated before being sent. Both padding and obfuscation
+ * are implemented in the procedure obfuscate_tx_buffer(). Suffice to say, the
+ * designer/programmer of the dongle used his name as a source for the
+ * obfuscation. WTF?!
+ * Apparently the dongle cannot handle payloads larger than 256 bytes. The
+ * driver has to perform fragmentation in order to send anything larger than
+ * this limit.
+ *
+ * Reception: To receive data, the driver must poll the dongle regularly (like
+ * kingsun-sir.c) with control URBs and the following as a setup packet:
+ * bRequestType USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x01
+ * wValue 0x0200
+ * wIndex 0x0000
+ * wLength 0x0800 (size of available buffer)
+ * If there is data to be read, it will be returned as the response payload.
+ * This data is (apparently) not padded, but it is obfuscated. To de-obfuscate
+ * it, the driver must XOR every byte, in sequence, with a value that starts at
+ * 1 and is incremented with each byte processed, and then with 0x55. The value
+ * incremented with each byte processed overflows as an unsigned char. The
+ * resulting bytes form a wrapped SIR frame that is unwrapped and unescaped
+ * as in stir4200.c The incremented value is NOT reset with each frame, but is
+ * kept across the entire session with the dongle. Also, the dongle inserts an
+ * extra garbage byte with value 0x95 (after decoding) every 0xff bytes, which
+ * must be skipped.
+ *
+ * Speed change: To change the speed of the dongle, the driver prepares a
+ * control URB with the following as a setup packet:
+ * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x09
+ * wValue 0x0200
+ * wIndex 0x0001
+ * wLength 0x0008 (length of the payload)
+ * The payload is a 8-byte record, apparently identical to the one used in
+ * drivers/usb/serial/cypress_m8.c to change speed:
+ * __u32 baudSpeed;
+ * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits
+ * unsigned int : 1;
+ * unsigned int stopBits : 1;
+ * unsigned int parityEnable : 1;
+ * unsigned int parityType : 1;
+ * unsigned int : 1;
+ * unsigned int reset : 1;
+ * unsigned char reserved[3]; // set to 0
+ *
+ * For now only SIR speeds have been observed with this dongle. Therefore,
+ * nothing is known on what changes (if any) must be done to frame wrapping /
+ * unwrapping for higher than SIR speeds. This driver assumes no change is
+ * necessary and announces support for all the way to 57600 bps. Although the
+ * package announces support for up to 4MBps, tests with a Sony Ericcson K300
+ * phone show corruption when receiving large frames at 115200 bps, the highest
+ * speed announced by the phone. However, transmission at 115200 bps is OK. Go
+ * figure. Since I don't know whether the phone or the dongle is at fault, max
+ * announced speed is 57600 bps until someone produces a device that can run
+ * at higher speeds with this dongle.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#define KS959_VENDOR_ID 0x07d0
+#define KS959_PRODUCT_ID 0x4959
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* KingSun Co,Ltd IrDA/USB Bridge */
+ {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+#define KINGSUN_MTT 0x07
+#define KINGSUN_REQ_RECV 0x01
+#define KINGSUN_REQ_SEND 0x09
+
+#define KINGSUN_RCV_FIFO_SIZE 2048 /* Max length we can receive */
+#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */
+#define KINGSUN_SND_PACKET_SIZE 256 /* Max packet dongle can handle */
+
+struct ks959_speedparams {
+ __le32 baudrate; /* baud rate, little endian */
+ __u8 flags;
+ __u8 reserved[3];
+} __packed;
+
+#define KS_DATA_5_BITS 0x00
+#define KS_DATA_6_BITS 0x01
+#define KS_DATA_7_BITS 0x02
+#define KS_DATA_8_BITS 0x03
+
+#define KS_STOP_BITS_1 0x00
+#define KS_STOP_BITS_2 0x08
+
+#define KS_PAR_DISABLE 0x00
+#define KS_PAR_EVEN 0x10
+#define KS_PAR_ODD 0x30
+#define KS_RESET 0x80
+
+struct ks959_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+
+ struct usb_ctrlrequest *tx_setuprequest;
+ struct urb *tx_urb;
+ __u8 *tx_buf_clear;
+ unsigned int tx_buf_clear_used;
+ unsigned int tx_buf_clear_sent;
+ __u8 *tx_buf_xored;
+
+ struct usb_ctrlrequest *rx_setuprequest;
+ struct urb *rx_urb;
+ __u8 *rx_buf;
+ __u8 rx_variable_xormask;
+ iobuff_t rx_unwrap_buff;
+
+ struct usb_ctrlrequest *speed_setuprequest;
+ struct urb *speed_urb;
+ struct ks959_speedparams speedparams;
+ unsigned int new_speed;
+
+ spinlock_t lock;
+ int receiving;
+};
+
+/* Procedure to perform the obfuscation/padding expected by the dongle
+ *
+ * buf_cleartext (IN) Cleartext version of the IrDA frame to transmit
+ * len_cleartext (IN) Length of the cleartext version of IrDA frame
+ * buf_xoredtext (OUT) Obfuscated version of frame built by proc
+ * len_maxbuf (OUT) Maximum space available at buf_xoredtext
+ *
+ * (return) length of obfuscated frame with padding
+ *
+ * If not enough space (as indicated by len_maxbuf vs. required padding),
+ * zero is returned
+ *
+ * The value of lookup_string is actually a required portion of the algorithm.
+ * Seems the designer of the dongle wanted to state who exactly is responsible
+ * for implementing obfuscation. Send your best (or other) wishes to him ]:-)
+ */
+static unsigned int obfuscate_tx_buffer(const __u8 * buf_cleartext,
+ unsigned int len_cleartext,
+ __u8 * buf_xoredtext,
+ unsigned int len_maxbuf)
+{
+ unsigned int len_xoredtext;
+
+ /* Calculate required length with padding, check for necessary space */
+ len_xoredtext = ((len_cleartext + 7) & ~0x7) + 0x10;
+ if (len_xoredtext <= len_maxbuf) {
+ static const __u8 lookup_string[] = "wangshuofei19710";
+ __u8 xor_mask;
+
+ /* Unlike the WinXP driver, we *do* clear out the padding */
+ memset(buf_xoredtext, 0, len_xoredtext);
+
+ xor_mask = lookup_string[(len_cleartext & 0x0f) ^ 0x06] ^ 0x55;
+
+ while (len_cleartext-- > 0) {
+ *buf_xoredtext++ = *buf_cleartext++ ^ xor_mask;
+ }
+ } else {
+ len_xoredtext = 0;
+ }
+ return len_xoredtext;
+}
+
+/* Callback transmission routine */
+static void ks959_speed_irq(struct urb *urb)
+{
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&urb->dev->dev,
+ "ks959_speed_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ }
+}
+
+/* Send a control request to change speed of the dongle */
+static int ks959_change_speed(struct ks959_cb *kingsun, unsigned speed)
+{
+ static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400,
+ 57600, 115200, 576000, 1152000, 4000000, 0
+ };
+ int err;
+ unsigned int i;
+
+ if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL)
+ return -ENOMEM;
+
+ /* Check that requested speed is among the supported ones */
+ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ;
+ if (supported_speeds[i] == 0)
+ return -EOPNOTSUPP;
+
+ memset(&(kingsun->speedparams), 0, sizeof(struct ks959_speedparams));
+ kingsun->speedparams.baudrate = cpu_to_le32(speed);
+ kingsun->speedparams.flags = KS_DATA_8_BITS;
+
+ /* speed_setuprequest pre-filled in ks959_probe */
+ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev,
+ usb_sndctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->speed_setuprequest,
+ &(kingsun->speedparams),
+ sizeof(struct ks959_speedparams), ks959_speed_irq,
+ kingsun);
+ kingsun->speed_urb->status = 0;
+ err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC);
+
+ return err;
+}
+
+/* Submit one fragment of an IrDA frame to the dongle */
+static void ks959_send_irq(struct urb *urb);
+static int ks959_submit_tx_fragment(struct ks959_cb *kingsun)
+{
+ unsigned int padlen;
+ unsigned int wraplen;
+ int ret;
+
+ /* Check whether current plaintext can produce a padded buffer that fits
+ within the range handled by the dongle */
+ wraplen = (KINGSUN_SND_PACKET_SIZE & ~0x7) - 0x10;
+ if (wraplen > kingsun->tx_buf_clear_used)
+ wraplen = kingsun->tx_buf_clear_used;
+
+ /* Perform dongle obfuscation. Also remove the portion of the frame that
+ was just obfuscated and will now be sent to the dongle. */
+ padlen = obfuscate_tx_buffer(kingsun->tx_buf_clear, wraplen,
+ kingsun->tx_buf_xored,
+ KINGSUN_SND_PACKET_SIZE);
+
+ /* Calculate how much data can be transmitted in this urb */
+ kingsun->tx_setuprequest->wValue = cpu_to_le16(wraplen);
+ kingsun->tx_setuprequest->wLength = cpu_to_le16(padlen);
+ /* Rest of the fields were filled in ks959_probe */
+ usb_fill_control_urb(kingsun->tx_urb, kingsun->usbdev,
+ usb_sndctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->tx_setuprequest,
+ kingsun->tx_buf_xored, padlen,
+ ks959_send_irq, kingsun);
+ kingsun->tx_urb->status = 0;
+ ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC);
+
+ /* Remember how much data was sent, in order to update at callback */
+ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0;
+ return ret;
+}
+
+/* Callback transmission routine */
+static void ks959_send_irq(struct urb *urb)
+{
+ struct ks959_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+ int ret = 0;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_send_irq: Network not running!\n");
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_send_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ return;
+ }
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* Update data remaining to be sent */
+ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) {
+ memmove(kingsun->tx_buf_clear,
+ kingsun->tx_buf_clear +
+ kingsun->tx_buf_clear_sent,
+ kingsun->tx_buf_clear_used -
+ kingsun->tx_buf_clear_sent);
+ }
+ kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent;
+ kingsun->tx_buf_clear_sent = 0;
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* There is more data to be sent */
+ if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_send_irq: failed tx_urb submit: %d\n",
+ ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ }
+ } else {
+ /* All data sent, send next speed && wake network queue */
+ if (kingsun->new_speed != -1 &&
+ cpu_to_le32(kingsun->new_speed) !=
+ kingsun->speedparams.baudrate)
+ ks959_change_speed(kingsun, kingsun->new_speed);
+
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t ks959_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ks959_cb *kingsun;
+ unsigned int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ kingsun = netdev_priv(netdev);
+
+ spin_lock(&kingsun->lock);
+ kingsun->new_speed = irda_get_next_speed(skb);
+
+ /* Append data to the end of whatever data remains to be transmitted */
+ wraplen =
+ async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
+ kingsun->tx_buf_clear_used = wraplen;
+
+ if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_hard_xmit: failed tx_urb submit: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock(&kingsun->lock);
+
+ return NETDEV_TX_OK;
+}
+
+/* Receive callback function */
+static void ks959_rcv_irq(struct urb *urb)
+{
+ struct ks959_cb *kingsun = urb->context;
+ int ret;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ kingsun->receiving = 0;
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_rcv_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ kingsun->receiving = 0;
+ return;
+ }
+
+ if (urb->actual_length > 0) {
+ __u8 *bytes = urb->transfer_buffer;
+ unsigned int i;
+
+ for (i = 0; i < urb->actual_length; i++) {
+ /* De-obfuscation implemented here: variable portion of
+ xormask is incremented, and then used with the encoded
+ byte for the XOR. The result of the operation is used
+ to unwrap the SIR frame. */
+ kingsun->rx_variable_xormask++;
+ bytes[i] =
+ bytes[i] ^ kingsun->rx_variable_xormask ^ 0x55u;
+
+ /* rx_variable_xormask doubles as an index counter so we
+ can skip the byte at 0xff (wrapped around to 0).
+ */
+ if (kingsun->rx_variable_xormask != 0) {
+ async_unwrap_char(kingsun->netdev,
+ &kingsun->netdev->stats,
+ &kingsun->rx_unwrap_buff,
+ bytes[i]);
+ }
+ }
+ kingsun->receiving =
+ (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
+ }
+
+ /* This urb has already been filled in kingsun_net_open. Setup
+ packet must be re-filled, but it is assumed that urb keeps the
+ pointer to the initial setup packet, as well as the payload buffer.
+ Setup packet is already pre-filled at ks959_probe.
+ */
+ urb->status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * Function kingsun_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int ks959_net_open(struct net_device *netdev)
+{
+ struct ks959_cb *kingsun = netdev_priv(netdev);
+ int err = -ENOMEM;
+ char hwname[16];
+
+ /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
+ kingsun->receiving = 0;
+
+ /* Initialize for SIR to copy data directly into skb. */
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
+ kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!kingsun->rx_unwrap_buff.skb)
+ goto free_mem;
+
+ skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
+ kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
+
+ kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->rx_urb)
+ goto free_mem;
+
+ kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->tx_urb)
+ goto free_mem;
+
+ kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->speed_urb)
+ goto free_mem;
+
+ /* Initialize speed for dongle */
+ kingsun->new_speed = 9600;
+ err = ks959_change_speed(kingsun, 9600);
+ if (err < 0)
+ goto free_mem;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ */
+ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
+ kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
+ if (!kingsun->irlap) {
+ err = -ENOMEM;
+ dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
+ goto free_mem;
+ }
+
+ /* Start reception. Setup request already pre-filled in ks959_probe */
+ usb_fill_control_urb(kingsun->rx_urb, kingsun->usbdev,
+ usb_rcvctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->rx_setuprequest,
+ kingsun->rx_buf, KINGSUN_RCV_FIFO_SIZE,
+ ks959_rcv_irq, kingsun);
+ kingsun->rx_urb->status = 0;
+ err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ if (err) {
+ dev_err(&kingsun->usbdev->dev,
+ "first urb-submit failed: %d\n", err);
+ goto close_irlap;
+ }
+
+ netif_start_queue(netdev);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs allocated and ready to fill
+ - max rx packet known (in max_rx)
+ - unwrap state machine initialized, in state outside of any frame
+ - receive request in progress
+ - IrLAP layer started, about to hand over packets to send
+ */
+
+ return 0;
+
+ close_irlap:
+ irlap_close(kingsun->irlap);
+ free_mem:
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ if (kingsun->rx_unwrap_buff.skb) {
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ }
+ return err;
+}
+
+/*
+ * Function kingsun_net_close (kingsun)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int ks959_net_close(struct net_device *netdev)
+{
+ struct ks959_cb *kingsun = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->receiving = 0;
+
+ /* Stop and remove instance of IrLAP */
+ if (kingsun->irlap)
+ irlap_close(kingsun->irlap);
+
+ kingsun->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
+ struct ks959_cb *kingsun = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(kingsun->netdev))
+ return ks959_change_speed(kingsun, irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(kingsun->netdev))
+ irda_device_set_media_busy(kingsun->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = kingsun->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops ks959_ops = {
+ .ndo_start_xmit = ks959_hard_xmit,
+ .ndo_open = ks959_net_open,
+ .ndo_stop = ks959_net_close,
+ .ndo_do_ioctl = ks959_net_ioctl,
+};
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ */
+static int ks959_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ks959_cb *kingsun = NULL;
+ struct net_device *net = NULL;
+ int ret = -ENOMEM;
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*kingsun));
+ if (!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ kingsun = netdev_priv(net);
+ kingsun->netdev = net;
+ kingsun->usbdev = dev;
+ kingsun->irlap = NULL;
+ kingsun->tx_setuprequest = NULL;
+ kingsun->tx_urb = NULL;
+ kingsun->tx_buf_clear = NULL;
+ kingsun->tx_buf_xored = NULL;
+ kingsun->tx_buf_clear_used = 0;
+ kingsun->tx_buf_clear_sent = 0;
+
+ kingsun->rx_setuprequest = NULL;
+ kingsun->rx_urb = NULL;
+ kingsun->rx_buf = NULL;
+ kingsun->rx_variable_xormask = 0;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->receiving = 0;
+ spin_lock_init(&kingsun->lock);
+
+ kingsun->speed_setuprequest = NULL;
+ kingsun->speed_urb = NULL;
+ kingsun->speedparams.baudrate = 0;
+
+ /* Allocate input buffer */
+ kingsun->rx_buf = kmalloc(KINGSUN_RCV_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->rx_buf)
+ goto free_mem;
+
+ /* Allocate input setup packet */
+ kingsun->rx_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->rx_setuprequest)
+ goto free_mem;
+ kingsun->rx_setuprequest->bRequestType =
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->rx_setuprequest->bRequest = KINGSUN_REQ_RECV;
+ kingsun->rx_setuprequest->wValue = cpu_to_le16(0x0200);
+ kingsun->rx_setuprequest->wIndex = 0;
+ kingsun->rx_setuprequest->wLength = cpu_to_le16(KINGSUN_RCV_FIFO_SIZE);
+
+ /* Allocate output buffer */
+ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->tx_buf_clear)
+ goto free_mem;
+ kingsun->tx_buf_xored = kmalloc(KINGSUN_SND_PACKET_SIZE, GFP_KERNEL);
+ if (!kingsun->tx_buf_xored)
+ goto free_mem;
+
+ /* Allocate and initialize output setup packet */
+ kingsun->tx_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->tx_setuprequest)
+ goto free_mem;
+ kingsun->tx_setuprequest->bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->tx_setuprequest->bRequest = KINGSUN_REQ_SEND;
+ kingsun->tx_setuprequest->wValue = 0;
+ kingsun->tx_setuprequest->wIndex = 0;
+ kingsun->tx_setuprequest->wLength = 0;
+
+ /* Allocate and initialize speed setup packet */
+ kingsun->speed_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->speed_setuprequest)
+ goto free_mem;
+ kingsun->speed_setuprequest->bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND;
+ kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200);
+ kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001);
+ kingsun->speed_setuprequest->wLength =
+ cpu_to_le16(sizeof(struct ks959_speedparams));
+
+ printk(KERN_INFO "KingSun KS-959 IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&kingsun->qos);
+
+ /* Baud rates known to be supported. Please uncomment if devices (other
+ than a SonyEriccson K300 phone) can be shown to support higher speed
+ with this dongle.
+ */
+ kingsun->qos.baud_rate.bits =
+ IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600;
+ kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
+ irda_qos_bits_to_value(&kingsun->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &ks959_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto free_mem;
+
+ dev_info(&net->dev, "IrDA: Registered KingSun KS-959 device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, kingsun);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - setup requests pre-filled
+ - urbs not allocated, set to NULL
+ - max rx packet known (is KINGSUN_FIFO_SIZE)
+ - unwrap state machine (partially) initialized, but skb == NULL
+ */
+
+ return 0;
+
+ free_mem:
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_setuprequest);
+ kfree(kingsun->tx_buf_xored);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_setuprequest);
+ kfree(kingsun->rx_buf);
+ free_netdev(net);
+ err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void ks959_disconnect(struct usb_interface *intf)
+{
+ struct ks959_cb *kingsun = usb_get_intfdata(intf);
+
+ if (!kingsun)
+ return;
+
+ unregister_netdev(kingsun->netdev);
+
+ /* Mop up receive && transmit urb's */
+ if (kingsun->speed_urb != NULL) {
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+ }
+ if (kingsun->tx_urb != NULL) {
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ }
+ if (kingsun->rx_urb != NULL) {
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ }
+
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_setuprequest);
+ kfree(kingsun->tx_buf_xored);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_setuprequest);
+ kfree(kingsun->rx_buf);
+ free_netdev(kingsun->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int ks959_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct ks959_cb *kingsun = usb_get_intfdata(intf);
+
+ netif_device_detach(kingsun->netdev);
+ if (kingsun->speed_urb != NULL)
+ usb_kill_urb(kingsun->speed_urb);
+ if (kingsun->tx_urb != NULL)
+ usb_kill_urb(kingsun->tx_urb);
+ if (kingsun->rx_urb != NULL)
+ usb_kill_urb(kingsun->rx_urb);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int ks959_resume(struct usb_interface *intf)
+{
+ struct ks959_cb *kingsun = usb_get_intfdata(intf);
+
+ if (kingsun->rx_urb != NULL) {
+ /* Setup request already filled in ks959_probe */
+ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ }
+ netif_device_attach(kingsun->netdev);
+
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "ks959-sir",
+ .probe = ks959_probe,
+ .disconnect = ks959_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = ks959_suspend,
+ .resume = ks959_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/ksdazzle-sir.c b/drivers/staging/irda/drivers/ksdazzle-sir.c
new file mode 100644
index 000000000000..d2a0755df596
--- /dev/null
+++ b/drivers/staging/irda/drivers/ksdazzle-sir.c
@@ -0,0 +1,813 @@
+/*****************************************************************************
+*
+* Filename: ksdazzle.c
+* Version: 0.1.2
+* Description: Irda KingSun Dazzle USB Dongle
+* Status: Experimental
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
+*
+* Based on stir4200, mcs7780, kingsun-sir drivers.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * Following is my most current (2007-07-26) understanding of how the Kingsun
+ * 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This
+ * information was deduced by examining the USB traffic captured with USBSnoopy
+ * from the WinXP driver. Feel free to update here as more of the dongle is
+ * known.
+ *
+ * General: This dongle exposes one interface with two interrupt endpoints, one
+ * IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine
+ * dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and
+ * unwrapped manually as in stir4200, kingsun-sir, and ks959-sir.
+ *
+ * Transmission: To transmit an IrDA frame, it is necessary to wrap it, then
+ * split it into multiple segments of up to 7 bytes each, and transmit each in
+ * sequence. It seems that sending a single big block (like kingsun-sir does)
+ * won't work with this dongle. Each segment needs to be prefixed with a value
+ * equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload
+ * of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9,
+ * and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the
+ * payload, not considered by the prefix, are ignored (set to 0 by this
+ * implementation).
+ *
+ * Reception: To receive data, the driver must poll the dongle regularly (like
+ * kingsun-sir.c) with interrupt URBs. If data is available, it will be returned
+ * in payloads from 0 to 8 bytes long. When concatenated, these payloads form
+ * a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir
+ *
+ * Speed change: To change the speed of the dongle, the driver prepares a
+ * control URB with the following as a setup packet:
+ * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x09
+ * wValue 0x0200
+ * wIndex 0x0001
+ * wLength 0x0008 (length of the payload)
+ * The payload is a 8-byte record, apparently identical to the one used in
+ * drivers/usb/serial/cypress_m8.c to change speed:
+ * __u32 baudSpeed;
+ * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits
+ * unsigned int : 1;
+ * unsigned int stopBits : 1;
+ * unsigned int parityEnable : 1;
+ * unsigned int parityType : 1;
+ * unsigned int : 1;
+ * unsigned int reset : 1;
+ * unsigned char reserved[3]; // set to 0
+ *
+ * For now only SIR speeds have been observed with this dongle. Therefore,
+ * nothing is known on what changes (if any) must be done to frame wrapping /
+ * unwrapping for higher than SIR speeds. This driver assumes no change is
+ * necessary and announces support for all the way to 115200 bps.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#define KSDAZZLE_VENDOR_ID 0x07d0
+#define KSDAZZLE_PRODUCT_ID 0x4100
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* KingSun Co,Ltd IrDA/USB Bridge */
+ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+#define KINGSUN_MTT 0x07
+#define KINGSUN_REQ_RECV 0x01
+#define KINGSUN_REQ_SEND 0x09
+
+#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */
+#define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */
+
+struct ksdazzle_speedparams {
+ __le32 baudrate; /* baud rate, little endian */
+ __u8 flags;
+ __u8 reserved[3];
+} __packed;
+
+#define KS_DATA_5_BITS 0x00
+#define KS_DATA_6_BITS 0x01
+#define KS_DATA_7_BITS 0x02
+#define KS_DATA_8_BITS 0x03
+
+#define KS_STOP_BITS_1 0x00
+#define KS_STOP_BITS_2 0x08
+
+#define KS_PAR_DISABLE 0x00
+#define KS_PAR_EVEN 0x10
+#define KS_PAR_ODD 0x30
+#define KS_RESET 0x80
+
+#define KINGSUN_EP_IN 0
+#define KINGSUN_EP_OUT 1
+
+struct ksdazzle_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+
+ struct urb *tx_urb;
+ __u8 *tx_buf_clear;
+ unsigned int tx_buf_clear_used;
+ unsigned int tx_buf_clear_sent;
+ __u8 tx_payload[8];
+
+ struct urb *rx_urb;
+ __u8 *rx_buf;
+ iobuff_t rx_unwrap_buff;
+
+ struct usb_ctrlrequest *speed_setuprequest;
+ struct urb *speed_urb;
+ struct ksdazzle_speedparams speedparams;
+ unsigned int new_speed;
+
+ __u8 ep_in;
+ __u8 ep_out;
+
+ spinlock_t lock;
+ int receiving;
+};
+
+/* Callback transmission routine */
+static void ksdazzle_speed_irq(struct urb *urb)
+{
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0)
+ dev_err(&urb->dev->dev,
+ "ksdazzle_speed_irq: urb asynchronously failed - %d\n",
+ urb->status);
+}
+
+/* Send a control request to change speed of the dongle */
+static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed)
+{
+ static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400,
+ 57600, 115200, 576000, 1152000, 4000000, 0
+ };
+ int err;
+ unsigned int i;
+
+ if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL)
+ return -ENOMEM;
+
+ /* Check that requested speed is among the supported ones */
+ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ;
+ if (supported_speeds[i] == 0)
+ return -EOPNOTSUPP;
+
+ memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams));
+ kingsun->speedparams.baudrate = cpu_to_le32(speed);
+ kingsun->speedparams.flags = KS_DATA_8_BITS;
+
+ /* speed_setuprequest pre-filled in ksdazzle_probe */
+ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev,
+ usb_sndctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->speed_setuprequest,
+ &(kingsun->speedparams),
+ sizeof(struct ksdazzle_speedparams),
+ ksdazzle_speed_irq, kingsun);
+ kingsun->speed_urb->status = 0;
+ err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC);
+
+ return err;
+}
+
+/* Submit one fragment of an IrDA frame to the dongle */
+static void ksdazzle_send_irq(struct urb *urb);
+static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun)
+{
+ unsigned int wraplen;
+ int ret;
+
+ /* We can send at most 7 bytes of payload at a time */
+ wraplen = 7;
+ if (wraplen > kingsun->tx_buf_clear_used)
+ wraplen = kingsun->tx_buf_clear_used;
+
+ /* Prepare payload prefix with used length */
+ memset(kingsun->tx_payload, 0, 8);
+ kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen;
+ memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen);
+
+ usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
+ usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
+ kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1);
+ kingsun->tx_urb->status = 0;
+ ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC);
+
+ /* Remember how much data was sent, in order to update at callback */
+ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0;
+ return ret;
+}
+
+/* Callback transmission routine */
+static void ksdazzle_send_irq(struct urb *urb)
+{
+ struct ksdazzle_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+ int ret = 0;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_send_irq: Network not running!\n");
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_send_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ return;
+ }
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* Update data remaining to be sent */
+ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) {
+ memmove(kingsun->tx_buf_clear,
+ kingsun->tx_buf_clear +
+ kingsun->tx_buf_clear_sent,
+ kingsun->tx_buf_clear_used -
+ kingsun->tx_buf_clear_sent);
+ }
+ kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent;
+ kingsun->tx_buf_clear_sent = 0;
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* There is more data to be sent */
+ if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_send_irq: failed tx_urb submit: %d\n",
+ ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ }
+ } else {
+ /* All data sent, send next speed && wake network queue */
+ if (kingsun->new_speed != -1 &&
+ cpu_to_le32(kingsun->new_speed) !=
+ kingsun->speedparams.baudrate)
+ ksdazzle_change_speed(kingsun,
+ kingsun->new_speed);
+
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t ksdazzle_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ksdazzle_cb *kingsun;
+ unsigned int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ kingsun = netdev_priv(netdev);
+
+ spin_lock(&kingsun->lock);
+ kingsun->new_speed = irda_get_next_speed(skb);
+
+ /* Append data to the end of whatever data remains to be transmitted */
+ wraplen =
+ async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
+ kingsun->tx_buf_clear_used = wraplen;
+
+ if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_hard_xmit: failed tx_urb submit: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock(&kingsun->lock);
+
+ return NETDEV_TX_OK;
+}
+
+/* Receive callback function */
+static void ksdazzle_rcv_irq(struct urb *urb)
+{
+ struct ksdazzle_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(netdev)) {
+ kingsun->receiving = 0;
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_rcv_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ kingsun->receiving = 0;
+ return;
+ }
+
+ if (urb->actual_length > 0) {
+ __u8 *bytes = urb->transfer_buffer;
+ unsigned int i;
+
+ for (i = 0; i < urb->actual_length; i++) {
+ async_unwrap_char(netdev, &netdev->stats,
+ &kingsun->rx_unwrap_buff, bytes[i]);
+ }
+ kingsun->receiving =
+ (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
+ }
+
+ /* This urb has already been filled in ksdazzle_net_open. It is assumed that
+ urb keeps the pointer to the payload buffer.
+ */
+ urb->status = 0;
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * Function ksdazzle_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int ksdazzle_net_open(struct net_device *netdev)
+{
+ struct ksdazzle_cb *kingsun = netdev_priv(netdev);
+ int err = -ENOMEM;
+ char hwname[16];
+
+ /* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */
+ kingsun->receiving = 0;
+
+ /* Initialize for SIR to copy data directly into skb. */
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
+ kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!kingsun->rx_unwrap_buff.skb)
+ goto free_mem;
+
+ skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
+ kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
+
+ kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->rx_urb)
+ goto free_mem;
+
+ kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->tx_urb)
+ goto free_mem;
+
+ kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->speed_urb)
+ goto free_mem;
+
+ /* Initialize speed for dongle */
+ kingsun->new_speed = 9600;
+ err = ksdazzle_change_speed(kingsun, 9600);
+ if (err < 0)
+ goto free_mem;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ */
+ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
+ kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
+ if (!kingsun->irlap) {
+ err = -ENOMEM;
+ dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
+ goto free_mem;
+ }
+
+ /* Start reception. */
+ usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
+ usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
+ kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq,
+ kingsun, 1);
+ kingsun->rx_urb->status = 0;
+ err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ if (err) {
+ dev_err(&kingsun->usbdev->dev, "first urb-submit failed: %d\n", err);
+ goto close_irlap;
+ }
+
+ netif_start_queue(netdev);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs allocated and ready to fill
+ - max rx packet known (in max_rx)
+ - unwrap state machine initialized, in state outside of any frame
+ - receive request in progress
+ - IrLAP layer started, about to hand over packets to send
+ */
+
+ return 0;
+
+ close_irlap:
+ irlap_close(kingsun->irlap);
+ free_mem:
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ if (kingsun->rx_unwrap_buff.skb) {
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ }
+ return err;
+}
+
+/*
+ * Function ksdazzle_net_close (dev)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int ksdazzle_net_close(struct net_device *netdev)
+{
+ struct ksdazzle_cb *kingsun = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->receiving = 0;
+
+ /* Stop and remove instance of IrLAP */
+ irlap_close(kingsun->irlap);
+
+ kingsun->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
+ struct ksdazzle_cb *kingsun = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(kingsun->netdev))
+ return ksdazzle_change_speed(kingsun,
+ irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(kingsun->netdev))
+ irda_device_set_media_busy(kingsun->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = kingsun->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops ksdazzle_ops = {
+ .ndo_start_xmit = ksdazzle_hard_xmit,
+ .ndo_open = ksdazzle_net_open,
+ .ndo_stop = ksdazzle_net_close,
+ .ndo_do_ioctl = ksdazzle_net_ioctl,
+};
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ */
+static int ksdazzle_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *interface;
+ struct usb_endpoint_descriptor *endpoint;
+
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ksdazzle_cb *kingsun = NULL;
+ struct net_device *net = NULL;
+ int ret = -ENOMEM;
+ int pipe, maxp_in, maxp_out;
+ __u8 ep_in;
+ __u8 ep_out;
+
+ /* Check that there really are two interrupt endpoints. Check based on the
+ one in drivers/usb/input/usbmouse.c
+ */
+ interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints != 2) {
+ dev_err(&intf->dev, "ksdazzle: expected 2 endpoints, found %d\n",
+ interface->desc.bNumEndpoints);
+ return -ENODEV;
+ }
+ endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
+ if (!usb_endpoint_is_int_in(endpoint)) {
+ dev_err(&intf->dev,
+ "ksdazzle: endpoint 0 is not interrupt IN\n");
+ return -ENODEV;
+ }
+
+ ep_in = endpoint->bEndpointAddress;
+ pipe = usb_rcvintpipe(dev, ep_in);
+ maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ if (maxp_in > 255 || maxp_in <= 1) {
+ dev_err(&intf->dev,
+ "ksdazzle: endpoint 0 has max packet size %d not in range [2..255]\n",
+ maxp_in);
+ return -ENODEV;
+ }
+
+ endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
+ if (!usb_endpoint_is_int_out(endpoint)) {
+ dev_err(&intf->dev,
+ "ksdazzle: endpoint 1 is not interrupt OUT\n");
+ return -ENODEV;
+ }
+
+ ep_out = endpoint->bEndpointAddress;
+ pipe = usb_sndintpipe(dev, ep_out);
+ maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*kingsun));
+ if (!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ kingsun = netdev_priv(net);
+ kingsun->netdev = net;
+ kingsun->usbdev = dev;
+ kingsun->ep_in = ep_in;
+ kingsun->ep_out = ep_out;
+ kingsun->irlap = NULL;
+ kingsun->tx_urb = NULL;
+ kingsun->tx_buf_clear = NULL;
+ kingsun->tx_buf_clear_used = 0;
+ kingsun->tx_buf_clear_sent = 0;
+
+ kingsun->rx_urb = NULL;
+ kingsun->rx_buf = NULL;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->receiving = 0;
+ spin_lock_init(&kingsun->lock);
+
+ kingsun->speed_setuprequest = NULL;
+ kingsun->speed_urb = NULL;
+ kingsun->speedparams.baudrate = 0;
+
+ /* Allocate input buffer */
+ kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL);
+ if (!kingsun->rx_buf)
+ goto free_mem;
+
+ /* Allocate output buffer */
+ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->tx_buf_clear)
+ goto free_mem;
+
+ /* Allocate and initialize speed setup packet */
+ kingsun->speed_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->speed_setuprequest)
+ goto free_mem;
+ kingsun->speed_setuprequest->bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND;
+ kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200);
+ kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001);
+ kingsun->speed_setuprequest->wLength =
+ cpu_to_le16(sizeof(struct ksdazzle_speedparams));
+
+ printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&kingsun->qos);
+
+ /* Baud rates known to be supported. Please uncomment if devices (other
+ than a SonyEriccson K300 phone) can be shown to support higher speeds
+ with this dongle.
+ */
+ kingsun->qos.baud_rate.bits =
+ IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
+ kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
+ irda_qos_bits_to_value(&kingsun->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &ksdazzle_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto free_mem;
+
+ dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, kingsun);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - setup requests pre-filled
+ - urbs not allocated, set to NULL
+ - max rx packet known (is KINGSUN_FIFO_SIZE)
+ - unwrap state machine (partially) initialized, but skb == NULL
+ */
+
+ return 0;
+
+ free_mem:
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_buf);
+ free_netdev(net);
+ err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void ksdazzle_disconnect(struct usb_interface *intf)
+{
+ struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
+
+ if (!kingsun)
+ return;
+
+ unregister_netdev(kingsun->netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_buf);
+ free_netdev(kingsun->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
+
+ netif_device_detach(kingsun->netdev);
+ if (kingsun->speed_urb != NULL)
+ usb_kill_urb(kingsun->speed_urb);
+ if (kingsun->tx_urb != NULL)
+ usb_kill_urb(kingsun->tx_urb);
+ if (kingsun->rx_urb != NULL)
+ usb_kill_urb(kingsun->rx_urb);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int ksdazzle_resume(struct usb_interface *intf)
+{
+ struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
+
+ if (kingsun->rx_urb != NULL) {
+ /* Setup request already filled in ksdazzle_probe */
+ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ }
+ netif_device_attach(kingsun->netdev);
+
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "ksdazzle-sir",
+ .probe = ksdazzle_probe,
+ .disconnect = ksdazzle_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = ksdazzle_suspend,
+ .resume = ksdazzle_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/litelink-sir.c b/drivers/staging/irda/drivers/litelink-sir.c
new file mode 100644
index 000000000000..8eefcb44bac3
--- /dev/null
+++ b/drivers/staging/irda/drivers/litelink-sir.c
@@ -0,0 +1,199 @@
+/*********************************************************************
+ *
+ * Filename: litelink.c
+ * Version: 1.1
+ * Description: Driver for the Parallax LiteLink dongle
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri May 7 12:50:33 1999
+ * Modified at: Fri Dec 17 09:14:23 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+/*
+ * Modified at: Thu Jan 15 2003
+ * Modified by: Eugene Crosser <crosser@average.org>
+ *
+ * Convert to "new" IRDA infrastructure for kernel 2.6
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static int litelink_open(struct sir_dev *dev);
+static int litelink_close(struct sir_dev *dev);
+static int litelink_change_speed(struct sir_dev *dev, unsigned speed);
+static int litelink_reset(struct sir_dev *dev);
+
+/* These are the baudrates supported - 9600 must be last one! */
+static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
+
+static struct dongle_driver litelink = {
+ .owner = THIS_MODULE,
+ .driver_name = "Parallax LiteLink",
+ .type = IRDA_LITELINK_DONGLE,
+ .open = litelink_open,
+ .close = litelink_close,
+ .reset = litelink_reset,
+ .set_speed = litelink_change_speed,
+};
+
+static int __init litelink_sir_init(void)
+{
+ return irda_register_dongle(&litelink);
+}
+
+static void __exit litelink_sir_cleanup(void)
+{
+ irda_unregister_dongle(&litelink);
+}
+
+static int litelink_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power up dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600;
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int litelink_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function litelink_change_speed (task)
+ *
+ * Change speed of the Litelink dongle. To cycle through the available
+ * baud rates, pulse RTS low for a few ms.
+ */
+static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int i;
+
+ /* dongle already reset by irda-thread - current speed (dongle and
+ * port) is the default speed (115200 for litelink!)
+ */
+
+ /* Cycle through avaiable baudrates until we reach the correct one */
+ for (i = 0; baud_rates[i] != speed; i++) {
+
+ /* end-of-list reached due to invalid speed request */
+ if (baud_rates[i] == 9600)
+ break;
+
+ /* Set DTR, clear RTS */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+ }
+
+ dev->speed = baud_rates[i];
+
+ /* invalid baudrate should not happen - but if, we return -EINVAL and
+ * the dongle configured for 9600 so the stack has a chance to recover
+ */
+
+ return (dev->speed == speed) ? 0 : -EINVAL;
+}
+
+/*
+ * Function litelink_reset (task)
+ *
+ * Reset the Litelink type dongle.
+ *
+ */
+static int litelink_reset(struct sir_dev *dev)
+{
+ /* probably the power-up can be dropped here, but with only
+ * 15 usec delay it's not worth the risk unless somebody with
+ * the hardware confirms it doesn't break anything...
+ */
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Clear RTS to reset dongle */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* This dongles speed defaults to 115200 bps */
+ dev->speed = 115200;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Parallax Litelink dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Litelink module
+ *
+ */
+module_init(litelink_sir_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Litelink module
+ *
+ */
+module_exit(litelink_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/ma600-sir.c b/drivers/staging/irda/drivers/ma600-sir.c
new file mode 100644
index 000000000000..a764817b47f1
--- /dev/null
+++ b/drivers/staging/irda/drivers/ma600-sir.c
@@ -0,0 +1,253 @@
+/*********************************************************************
+ *
+ * Filename: ma600.c
+ * Version: 0.1
+ * Description: Implementation of the MA600 dongle
+ * Status: Experimental.
+ * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95
+ * Created at: Sat Jun 10 20:02:35 2000
+ * Modified at: Sat Aug 16 09:34:13 2003
+ * Modified by: Martin Diehl <mad@mdiehl.de> (modified for new sir_dev)
+ *
+ * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing
+ * information on the MA600 dongle
+ *
+ * Copyright (c) 2000 Leung, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int ma600_open(struct sir_dev *);
+static int ma600_close(struct sir_dev *);
+static int ma600_change_speed(struct sir_dev *, unsigned);
+static int ma600_reset(struct sir_dev *);
+
+/* control byte for MA600 */
+#define MA600_9600 0x00
+#define MA600_19200 0x01
+#define MA600_38400 0x02
+#define MA600_57600 0x03
+#define MA600_115200 0x04
+#define MA600_DEV_ID1 0x05
+#define MA600_DEV_ID2 0x06
+#define MA600_2400 0x08
+
+static struct dongle_driver ma600 = {
+ .owner = THIS_MODULE,
+ .driver_name = "MA600",
+ .type = IRDA_MA600_DONGLE,
+ .open = ma600_open,
+ .close = ma600_close,
+ .reset = ma600_reset,
+ .set_speed = ma600_change_speed,
+};
+
+
+static int __init ma600_sir_init(void)
+{
+ return irda_register_dongle(&ma600);
+}
+
+static void __exit ma600_sir_cleanup(void)
+{
+ irda_unregister_dongle(&ma600);
+}
+
+/*
+ Power on:
+ (0) Clear RTS and DTR for 1 second
+ (1) Set RTS and DTR for 1 second
+ (2) 9600 bps now
+ Note: assume RTS, DTR are clear before
+*/
+static int ma600_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Explicitly set the speeds we can accept */
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400
+ |IR_57600|IR_115200;
+ /* Hm, 0x01 means 10ms - for >= 1ms we would need 0x07 */
+ qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int ma600_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+static __u8 get_control_byte(__u32 speed)
+{
+ __u8 byte;
+
+ switch (speed) {
+ default:
+ case 115200:
+ byte = MA600_115200;
+ break;
+ case 57600:
+ byte = MA600_57600;
+ break;
+ case 38400:
+ byte = MA600_38400;
+ break;
+ case 19200:
+ byte = MA600_19200;
+ break;
+ case 9600:
+ byte = MA600_9600;
+ break;
+ case 2400:
+ byte = MA600_2400;
+ break;
+ }
+
+ return byte;
+}
+
+/*
+ * Function ma600_change_speed (dev, speed)
+ *
+ * Set the speed for the MA600 type dongle.
+ *
+ * The dongle has already been reset to a known state (dongle default)
+ * We cycle through speeds by pulsing RTS low and then high.
+ */
+
+/*
+ * Function ma600_change_speed (dev, speed)
+ *
+ * Set the speed for the MA600 type dongle.
+ *
+ * Algorithm
+ * 1. Reset (already done by irda thread state machine)
+ * 2. clear RTS, set DTR and wait for 1ms
+ * 3. send Control Byte to the MA600 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 10 msec)
+ * 4. set RTS, set DTR (return to NORMAL Operation)
+ * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here
+ * after
+ */
+
+/* total delays are only about 20ms - let's just sleep for now to
+ * avoid the state machine complexity before we get things working
+ */
+
+static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ u8 byte;
+
+ pr_debug("%s(), speed=%d (was %d)\n", __func__,
+ speed, dev->speed);
+
+ /* dongle already reset, dongle and port at default speed (9600) */
+
+ /* Set RTS low for 1 ms */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ mdelay(1);
+
+ /* Write control byte */
+ byte = get_control_byte(speed);
+ sirdev_raw_write(dev, &byte, sizeof(byte));
+
+ /* Wait at least 10ms: fake wait_until_sent - 10 bits at 9600 baud*/
+ msleep(15); /* old ma600 uses 15ms */
+
+#if 1
+ /* read-back of the control byte. ma600 is the first dongle driver
+ * which uses this so there might be some unidentified issues.
+ * Disable this in case of problems with readback.
+ */
+
+ sirdev_raw_read(dev, &byte, sizeof(byte));
+ if (byte != get_control_byte(speed)) {
+ net_warn_ratelimited("%s(): bad control byte read-back %02x != %02x\n",
+ __func__, (unsigned)byte,
+ (unsigned)get_control_byte(speed));
+ return -1;
+ }
+ else
+ pr_debug("%s() control byte write read OK\n", __func__);
+#endif
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 10ms */
+ msleep(10);
+
+ /* dongle is now switched to the new speed */
+ dev->speed = speed;
+
+ return 0;
+}
+
+/*
+ * Function ma600_reset (dev)
+ *
+ * This function resets the ma600 dongle.
+ *
+ * Algorithm:
+ * 0. DTR=0, RTS=1 and wait 10 ms
+ * 1. DTR=1, RTS=1 and wait 10 ms
+ * 2. 9600 bps now
+ */
+
+/* total delays are only about 20ms - let's just sleep for now to
+ * avoid the state machine complexity before we get things working
+ */
+
+static int ma600_reset(struct sir_dev *dev)
+{
+ /* Reset the dongle : set DTR low for 10 ms */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ msleep(10);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ msleep(10);
+
+ dev->speed = 9600; /* That's the dongle-default */
+
+ return 0;
+}
+
+MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95");
+MODULE_DESCRIPTION("MA600 dongle driver version 0.1");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */
+
+module_init(ma600_sir_init);
+module_exit(ma600_sir_cleanup);
+
diff --git a/drivers/staging/irda/drivers/mcp2120-sir.c b/drivers/staging/irda/drivers/mcp2120-sir.c
new file mode 100644
index 000000000000..2e33f91bfe8f
--- /dev/null
+++ b/drivers/staging/irda/drivers/mcp2120-sir.c
@@ -0,0 +1,224 @@
+/*********************************************************************
+ *
+ *
+ * Filename: mcp2120.c
+ * Version: 1.0
+ * Description: Implementation for the MCP2120 (Microchip)
+ * Status: Experimental.
+ * Author: Felix Tang (tangf@eyetap.org)
+ * Created at: Sun Mar 31 19:32:12 EST 2002
+ * Based on code by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 2002 Felix Tang, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int mcp2120_reset(struct sir_dev *dev);
+static int mcp2120_open(struct sir_dev *dev);
+static int mcp2120_close(struct sir_dev *dev);
+static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
+
+#define MCP2120_9600 0x87
+#define MCP2120_19200 0x8B
+#define MCP2120_38400 0x85
+#define MCP2120_57600 0x83
+#define MCP2120_115200 0x81
+
+#define MCP2120_COMMIT 0x11
+
+static struct dongle_driver mcp2120 = {
+ .owner = THIS_MODULE,
+ .driver_name = "Microchip MCP2120",
+ .type = IRDA_MCP2120_DONGLE,
+ .open = mcp2120_open,
+ .close = mcp2120_close,
+ .reset = mcp2120_reset,
+ .set_speed = mcp2120_change_speed,
+};
+
+static int __init mcp2120_sir_init(void)
+{
+ return irda_register_dongle(&mcp2120);
+}
+
+static void __exit mcp2120_sir_cleanup(void)
+{
+ irda_unregister_dongle(&mcp2120);
+}
+
+static int mcp2120_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* seems no explicit power-on required here and reset switching it on anyway */
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01;
+ irda_qos_bits_to_value(qos);
+
+ return 0;
+}
+
+static int mcp2120_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ /* reset and inhibit mcp2120 */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ // sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function mcp2120_change_speed (dev, speed)
+ *
+ * Set the speed for the MCP2120.
+ *
+ */
+
+#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
+
+static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[2];
+ static int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+ /* Set DTR to enter command mode */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ udelay(500);
+
+ ret = 0;
+ switch (speed) {
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = MCP2120_9600;
+ //printk("mcp2120 9600\n");
+ break;
+ case 19200:
+ control[0] = MCP2120_19200;
+ //printk("mcp2120 19200\n");
+ break;
+ case 34800:
+ control[0] = MCP2120_38400;
+ //printk("mcp2120 38400\n");
+ break;
+ case 57600:
+ control[0] = MCP2120_57600;
+ //printk("mcp2120 57600\n");
+ break;
+ case 115200:
+ control[0] = MCP2120_115200;
+ //printk("mcp2120 115200\n");
+ break;
+ }
+ control[1] = MCP2120_COMMIT;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 2);
+ dev->speed = speed;
+
+ state = MCP2120_STATE_WAIT_SPEED;
+ delay = 100;
+ //printk("mcp2120_change_speed: dongle_speed\n");
+ break;
+
+ case MCP2120_STATE_WAIT_SPEED:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ //printk("mcp2120_change_speed: mcp_wait\n");
+ break;
+
+ default:
+ net_err_ratelimited("%s(), undefine state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function mcp2120_reset (driver)
+ *
+ * This function resets the mcp2120 dongle.
+ *
+ * Info: -set RTS to reset mcp2120
+ * -set DTR to set mcp2120 software command mode
+ * -mcp2120 defaults to 9600 baud after reset
+ *
+ * Algorithm:
+ * 0. Set RTS to reset mcp2120.
+ * 1. Clear RTS and wait for device reset timer of 30 ms (max).
+ *
+ */
+
+#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
+#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
+
+static int mcp2120_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ //printk("mcp2120_reset: dongle_reset\n");
+ /* Reset dongle by setting RTS*/
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ state = MCP2120_STATE_WAIT1_RESET;
+ delay = 50;
+ break;
+
+ case MCP2120_STATE_WAIT1_RESET:
+ //printk("mcp2120_reset: mcp2120_wait1\n");
+ /* clear RTS and wait for at least 30 ms. */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ state = MCP2120_STATE_WAIT2_RESET;
+ delay = 50;
+ break;
+
+ case MCP2120_STATE_WAIT2_RESET:
+ //printk("mcp2120_reset mcp2120_wait2\n");
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ break;
+
+ default:
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
+MODULE_DESCRIPTION("Microchip MCP2120");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
+
+module_init(mcp2120_sir_init);
+module_exit(mcp2120_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/mcs7780.c b/drivers/staging/irda/drivers/mcs7780.c
new file mode 100644
index 000000000000..c3f0b254b344
--- /dev/null
+++ b/drivers/staging/irda/drivers/mcs7780.c
@@ -0,0 +1,987 @@
+/*****************************************************************************
+*
+* Filename: mcs7780.c
+* Version: 0.4-alpha
+* Description: Irda MosChip USB Dongle Driver
+* Authors: Lukasz Stelmach <stlman@poczta.fm>
+* Brian Pugh <bpugh@cs.pdx.edu>
+* Judy Fischbach <jfisch@cs.pdx.edu>
+*
+* Based on stir4200 driver, but some things done differently.
+* Based on earlier driver by Paul Stewart <stewart@parc.com>
+*
+* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+* Copyright (C) 2005, Lukasz Stelmach <stlman@poczta.fm>
+* Copyright (C) 2005, Brian Pugh <bpugh@cs.pdx.edu>
+* Copyright (C) 2005, Judy Fischbach <jfisch@cs.pdx.edu>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * MCS7780 is a simple USB to IrDA bridge by MosChip. It is neither
+ * compatibile with irda-usb nor with stir4200. Although it is quite
+ * similar to the later as far as general idea of operation is concerned.
+ * That is it requires the software to do all the framing job at SIR speeds.
+ * The hardware does take care of the framing at MIR and FIR speeds.
+ * It supports all speeds from 2400 through 4Mbps
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#include "mcs7780.h"
+
+#define MCS_VENDOR_ID 0x9710
+#define MCS_PRODUCT_ID 0x7780
+
+static const struct usb_device_id mcs_table[] = {
+ /* MosChip Corp., MCS7780 FIR-USB Adapter */
+ {USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)},
+ {},
+};
+
+MODULE_AUTHOR("Brian Pugh <bpugh@cs.pdx.edu>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for MosChip MCS7780");
+MODULE_VERSION("0.3alpha");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(usb, mcs_table);
+
+static int qos_mtt_bits = 0x07 /* > 1ms */ ;
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+static int receive_mode = 0x1;
+module_param(receive_mode, int, 0);
+MODULE_PARM_DESC(receive_mode,
+ "Receive mode of the device (1:fast, 0:slow, default:1)");
+
+static int sir_tweak = 1;
+module_param(sir_tweak, int, 0444);
+MODULE_PARM_DESC(sir_tweak,
+ "Default pulse width (1:1.6us, 0:3/16 bit, default:1).");
+
+static int transceiver_type = MCS_TSC_VISHAY;
+module_param(transceiver_type, int, 0444);
+MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h.");
+
+static struct usb_driver mcs_driver = {
+ .name = "mcs7780",
+ .probe = mcs_probe,
+ .disconnect = mcs_disconnect,
+ .id_table = mcs_table,
+};
+
+/* speed flag selection by direct addressing.
+addr = (speed >> 8) & 0x0f
+
+0x1 57600 0x2 115200 0x4 1152000 0x5 9600
+0x6 38400 0x9 2400 0xa 576000 0xb 19200
+
+4Mbps (or 2400) must be checked separately. Since it also has
+to be programmed in a different manner that is not a big problem.
+*/
+static __u16 mcs_speed_set[16] = { 0,
+ MCS_SPEED_57600,
+ MCS_SPEED_115200,
+ 0,
+ MCS_SPEED_1152000,
+ MCS_SPEED_9600,
+ MCS_SPEED_38400,
+ 0, 0,
+ MCS_SPEED_2400,
+ MCS_SPEED_576000,
+ MCS_SPEED_19200,
+ 0, 0, 0,
+};
+
+/* Set given 16 bit register with a 16 bit value. Send control message
+ * to set dongle register. */
+static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
+{
+ struct usb_device *dev = mcs->usbdev;
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
+ MCS_WR_RTYPE, val, reg, NULL, 0,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+}
+
+/* Get 16 bit register value. Send contol message to read dongle register. */
+static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
+{
+ struct usb_device *dev = mcs->usbdev;
+ void *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+ memcpy(val, dmabuf, sizeof(__u16));
+ kfree(dmabuf);
+
+ return ret;
+}
+
+/* Setup a communication between mcs7780 and TFDU chips. It is described
+ * in more detail in the data sheet. The setup sequence puts the the
+ * vishay tranceiver into high speed mode. It will also receive SIR speed
+ * packets but at reduced sensitivity.
+ */
+
+/* 0: OK 1:ERROR */
+static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
+{
+ int ret = 0;
+ __u16 rval;
+
+ /* mcs_get_reg should read exactly two bytes from the dongle */
+ ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
+ if (unlikely(ret != 2)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* The MCS_XCVR_CONF bit puts the transceiver into configuration
+ * mode. The MCS_MODE0 bit must start out high (1) and then
+ * transition to low and the MCS_STFIR and MCS_MODE1 bits must
+ * be low.
+ */
+ rval |= (MCS_MODE0 | MCS_XCVR_CONF);
+ rval &= ~MCS_STFIR;
+ rval &= ~MCS_MODE1;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ rval &= ~MCS_MODE0;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ rval &= ~MCS_XCVR_CONF;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ ret = 0;
+error:
+ return ret;
+}
+
+/* Setup a communication between mcs7780 and agilent chip. */
+static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
+{
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
+ return 1;
+}
+
+/* Setup a communication between mcs7780 and sharp chip. */
+static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
+{
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
+ return 1;
+}
+
+/* Common setup for all transceivers */
+static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
+{
+ int ret = 0;
+ __u16 rval;
+ const char *msg;
+
+ msg = "Basic transceiver setup error";
+
+ /* read value of MODE Register, set the DRIVER and RESET bits
+ * and write value back out to MODE Register
+ */
+ ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ if(unlikely(ret != 2))
+ goto error;
+ rval |= MCS_DRIVER; /* put the mcs7780 into configuration mode. */
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if(unlikely(ret))
+ goto error;
+
+ rval = 0; /* set min pulse width to 0 initially. */
+ ret = mcs_set_reg(mcs, MCS_MINRXPW_REG, rval);
+ if(unlikely(ret))
+ goto error;
+
+ ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ if(unlikely(ret != 2))
+ goto error;
+
+ rval &= ~MCS_FIR; /* turn off fir mode. */
+ if(mcs->sir_tweak)
+ rval |= MCS_SIR16US; /* 1.6us pulse width */
+ else
+ rval &= ~MCS_SIR16US; /* 3/16 bit time pulse width */
+
+ /* make sure ask mode and back to back packets are off. */
+ rval &= ~(MCS_BBTG | MCS_ASK);
+
+ rval &= ~MCS_SPEED_MASK;
+ rval |= MCS_SPEED_9600; /* make sure initial speed is 9600. */
+ mcs->speed = 9600;
+ mcs->new_speed = 0; /* new_speed is set to 0 */
+ rval &= ~MCS_PLLPWDN; /* disable power down. */
+
+ /* make sure device determines direction and that the auto send sip
+ * pulse are on.
+ */
+ rval |= MCS_DTD | MCS_SIPEN;
+
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if(unlikely(ret))
+ goto error;
+
+ msg = "transceiver model specific setup error";
+ switch (mcs->transceiver_type) {
+ case MCS_TSC_VISHAY:
+ ret = mcs_setup_transceiver_vishay(mcs);
+ break;
+
+ case MCS_TSC_SHARP:
+ ret = mcs_setup_transceiver_sharp(mcs);
+ break;
+
+ case MCS_TSC_AGILENT:
+ ret = mcs_setup_transceiver_agilent(mcs);
+ break;
+
+ default:
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
+ ret = 1;
+ }
+ if (unlikely(ret))
+ goto error;
+
+ /* If transceiver is not SHARP, then if receive mode set
+ * on the RXFAST bit in the XCVR Register otherwise unset it
+ */
+ if (mcs->transceiver_type != MCS_TSC_SHARP) {
+
+ ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
+ if (unlikely(ret != 2))
+ goto error;
+ if (mcs->receive_mode)
+ rval |= MCS_RXFAST;
+ else
+ rval &= ~MCS_RXFAST;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+ }
+
+ msg = "transceiver reset";
+
+ ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ if (unlikely(ret != 2))
+ goto error;
+
+ /* reset the mcs7780 so all changes take effect. */
+ rval &= ~MCS_RESET;
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if (unlikely(ret))
+ goto error;
+ else
+ return ret;
+
+error:
+ net_err_ratelimited("%s\n", msg);
+ return ret;
+}
+
+/* Wraps the data in format for SIR */
+static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf)
+{
+ int wraplen;
+
+ /* 2: full frame length, including "the length" */
+ wraplen = async_wrap_skb(skb, buf + 2, 4094);
+
+ wraplen += 2;
+ buf[0] = wraplen & 0xff;
+ buf[1] = (wraplen >> 8) & 0xff;
+
+ return wraplen;
+}
+
+/* Wraps the data in format for FIR */
+static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ unsigned int len = 0;
+ __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
+
+ /* add 2 bytes for length value and 4 bytes for fcs. */
+ len = skb->len + 6;
+
+ /* The mcs7780 requires that the first two bytes are the packet
+ * length in little endian order. Note: the length value includes
+ * the two bytes for the length value itself.
+ */
+ buf[0] = len & 0xff;
+ buf[1] = (len >> 8) & 0xff;
+ /* copy the data into the tx buffer. */
+ skb_copy_from_linear_data(skb, buf + 2, skb->len);
+ /* put the fcs in the last four bytes in little endian order. */
+ buf[len - 4] = fcs & 0xff;
+ buf[len - 3] = (fcs >> 8) & 0xff;
+ buf[len - 2] = (fcs >> 16) & 0xff;
+ buf[len - 1] = (fcs >> 24) & 0xff;
+
+ return len;
+}
+
+/* Wraps the data in format for MIR */
+static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ __u16 fcs = 0;
+ int len = skb->len + 4;
+
+ fcs = ~(irda_calc_crc16(~fcs, skb->data, skb->len));
+ /* put the total packet length in first. Note: packet length
+ * value includes the two bytes that hold the packet length
+ * itself.
+ */
+ buf[0] = len & 0xff;
+ buf[1] = (len >> 8) & 0xff;
+ /* copy the data */
+ skb_copy_from_linear_data(skb, buf + 2, skb->len);
+ /* put the fcs in last two bytes in little endian order. */
+ buf[len - 2] = fcs & 0xff;
+ buf[len - 1] = (fcs >> 8) & 0xff;
+
+ return len;
+}
+
+/* Unwrap received packets at MIR speed. A 16 bit crc_ccitt checksum is
+ * used for the fcs. When performed over the entire packet the result
+ * should be GOOD_FCS = 0xf0b8. Hands the unwrapped data off to the IrDA
+ * layer via a sk_buff.
+ */
+static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
+{
+ __u16 fcs;
+ int new_len;
+ struct sk_buff *skb;
+
+ /* Assume that the frames are going to fill a single packet
+ * rather than span multiple packets.
+ */
+
+ new_len = len - 2;
+ if(unlikely(new_len <= 0)) {
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
+ ++mcs->netdev->stats.rx_errors;
+ ++mcs->netdev->stats.rx_length_errors;
+ return;
+ }
+ fcs = 0;
+ fcs = irda_calc_crc16(~fcs, buf, len);
+
+ if(fcs != GOOD_FCS) {
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
+ mcs->netdev->stats.rx_errors++;
+ mcs->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = dev_alloc_skb(new_len + 1);
+ if(unlikely(!skb)) {
+ ++mcs->netdev->stats.rx_dropped;
+ return;
+ }
+
+ skb_reserve(skb, 1);
+ skb_copy_to_linear_data(skb, buf, new_len);
+ skb_put(skb, new_len);
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = mcs->netdev;
+
+ netif_rx(skb);
+
+ mcs->netdev->stats.rx_packets++;
+ mcs->netdev->stats.rx_bytes += new_len;
+}
+
+/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
+ * used for the fcs. Hands the unwrapped data off to the IrDA
+ * layer via a sk_buff.
+ */
+static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
+{
+ __u32 fcs;
+ int new_len;
+ struct sk_buff *skb;
+
+ /* Assume that the frames are going to fill a single packet
+ * rather than span multiple packets. This is most likely a false
+ * assumption.
+ */
+
+ new_len = len - 4;
+ if(unlikely(new_len <= 0)) {
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
+ ++mcs->netdev->stats.rx_errors;
+ ++mcs->netdev->stats.rx_length_errors;
+ return;
+ }
+
+ fcs = ~(crc32_le(~0, buf, new_len));
+ if(fcs != get_unaligned_le32(buf + new_len)) {
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
+ mcs->netdev->stats.rx_errors++;
+ mcs->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = dev_alloc_skb(new_len + 1);
+ if(unlikely(!skb)) {
+ ++mcs->netdev->stats.rx_dropped;
+ return;
+ }
+
+ skb_reserve(skb, 1);
+ skb_copy_to_linear_data(skb, buf, new_len);
+ skb_put(skb, new_len);
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = mcs->netdev;
+
+ netif_rx(skb);
+
+ mcs->netdev->stats.rx_packets++;
+ mcs->netdev->stats.rx_bytes += new_len;
+}
+
+
+/* Allocates urbs for both receive and transmit.
+ * If alloc fails return error code 0 (fail) otherwise
+ * return error code 1 (success).
+ */
+static inline int mcs_setup_urbs(struct mcs_cb *mcs)
+{
+ mcs->rx_urb = NULL;
+
+ mcs->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!mcs->tx_urb)
+ return 0;
+
+ mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!mcs->rx_urb) {
+ usb_free_urb(mcs->tx_urb);
+ mcs->tx_urb = NULL;
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Sets up state to be initially outside frame, gets receive urb,
+ * sets status to successful and then submits the urb to start
+ * receiving the data.
+ */
+static inline int mcs_receive_start(struct mcs_cb *mcs)
+{
+ mcs->rx_buff.in_frame = FALSE;
+ mcs->rx_buff.state = OUTSIDE_FRAME;
+
+ usb_fill_bulk_urb(mcs->rx_urb, mcs->usbdev,
+ usb_rcvbulkpipe(mcs->usbdev, mcs->ep_in),
+ mcs->in_buf, 4096, mcs_receive_irq, mcs);
+
+ mcs->rx_urb->status = 0;
+ return usb_submit_urb(mcs->rx_urb, GFP_KERNEL);
+}
+
+/* Finds the in and out endpoints for the mcs control block */
+static inline int mcs_find_endpoints(struct mcs_cb *mcs,
+ struct usb_host_endpoint *ep, int epnum)
+{
+ int i;
+ int ret = 0;
+
+ /* If no place to store the endpoints just return */
+ if (!ep)
+ return ret;
+
+ /* cycle through all endpoints, find the first two that are DIR_IN */
+ for (i = 0; i < epnum; i++) {
+ if (ep[i].desc.bEndpointAddress & USB_DIR_IN)
+ mcs->ep_in = ep[i].desc.bEndpointAddress;
+ else
+ mcs->ep_out = ep[i].desc.bEndpointAddress;
+
+ /* MosChip says that the chip has only two bulk
+ * endpoints. Find one for each direction and move on.
+ */
+ if ((mcs->ep_in != 0) && (mcs->ep_out != 0)) {
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void mcs_speed_work(struct work_struct *work)
+{
+ struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
+ struct net_device *netdev = mcs->netdev;
+
+ mcs_speed_change(mcs);
+ netif_wake_queue(netdev);
+}
+
+/* Function to change the speed of the mcs7780. Fully supports SIR,
+ * MIR, and FIR speeds.
+ */
+static int mcs_speed_change(struct mcs_cb *mcs)
+{
+ int ret = 0;
+ int rst = 0;
+ int cnt = 0;
+ __u16 nspeed;
+ __u16 rval;
+
+ nspeed = mcs_speed_set[(mcs->new_speed >> 8) & 0x0f];
+
+ do {
+ mcs_get_reg(mcs, MCS_RESV_REG, &rval);
+ } while(cnt++ < 100 && (rval & MCS_IRINTX));
+
+ if (cnt > 100) {
+ net_err_ratelimited("unable to change speed\n");
+ ret = -EIO;
+ goto error;
+ }
+
+ mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+
+ /* MINRXPW values recommended by MosChip */
+ if (mcs->new_speed <= 115200) {
+ rval &= ~MCS_FIR;
+
+ if ((rst = (mcs->speed > 115200)))
+ mcs_set_reg(mcs, MCS_MINRXPW_REG, 0);
+
+ } else if (mcs->new_speed <= 1152000) {
+ rval &= ~MCS_FIR;
+
+ if ((rst = !(mcs->speed == 576000 || mcs->speed == 1152000)))
+ mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
+
+ } else {
+ rval |= MCS_FIR;
+
+ if ((rst = (mcs->speed != 4000000)))
+ mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
+
+ }
+
+ rval &= ~MCS_SPEED_MASK;
+ rval |= nspeed;
+
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ if (rst)
+ switch (mcs->transceiver_type) {
+ case MCS_TSC_VISHAY:
+ ret = mcs_setup_transceiver_vishay(mcs);
+ break;
+
+ case MCS_TSC_SHARP:
+ ret = mcs_setup_transceiver_sharp(mcs);
+ break;
+
+ case MCS_TSC_AGILENT:
+ ret = mcs_setup_transceiver_agilent(mcs);
+ break;
+
+ default:
+ ret = 1;
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
+ }
+ if (unlikely(ret))
+ goto error;
+
+ mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ rval &= ~MCS_RESET;
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+
+ mcs->speed = mcs->new_speed;
+error:
+ mcs->new_speed = 0;
+ return ret;
+}
+
+/* Ioctl calls not supported at this time. Can be an area of future work. */
+static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ /* struct if_irda_req *irq = (struct if_irda_req *)rq; */
+ /* struct mcs_cb *mcs = netdev_priv(netdev); */
+ int ret = 0;
+
+ switch (cmd) {
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* Network device is taken down, done by "ifconfig irda0 down" */
+static int mcs_net_close(struct net_device *netdev)
+{
+ int ret = 0;
+ struct mcs_cb *mcs = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ kfree_skb(mcs->rx_buff.skb);
+
+ /* kill and free the receive and transmit URBs */
+ usb_kill_urb(mcs->rx_urb);
+ usb_free_urb(mcs->rx_urb);
+ usb_kill_urb(mcs->tx_urb);
+ usb_free_urb(mcs->tx_urb);
+
+ /* Stop and remove instance of IrLAP */
+ if (mcs->irlap)
+ irlap_close(mcs->irlap);
+
+ mcs->irlap = NULL;
+ return ret;
+}
+
+/* Network device is taken up, done by "ifconfig irda0 up" */
+static int mcs_net_open(struct net_device *netdev)
+{
+ struct mcs_cb *mcs = netdev_priv(netdev);
+ char hwname[16];
+ int ret = 0;
+
+ ret = usb_clear_halt(mcs->usbdev,
+ usb_sndbulkpipe(mcs->usbdev, mcs->ep_in));
+ if (ret)
+ goto error1;
+ ret = usb_clear_halt(mcs->usbdev,
+ usb_rcvbulkpipe(mcs->usbdev, mcs->ep_out));
+ if (ret)
+ goto error1;
+
+ ret = mcs_setup_transceiver(mcs);
+ if (ret)
+ goto error1;
+
+ ret = -ENOMEM;
+
+ /* Initialize for SIR/FIR to copy data directly into skb. */
+ mcs->receiving = 0;
+ mcs->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ mcs->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!mcs->rx_buff.skb)
+ goto error1;
+
+ skb_reserve(mcs->rx_buff.skb, 1);
+ mcs->rx_buff.head = mcs->rx_buff.skb->data;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
+ mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
+ if (!mcs->irlap) {
+ net_err_ratelimited("mcs7780: irlap_open failed\n");
+ goto error2;
+ }
+
+ if (!mcs_setup_urbs(mcs))
+ goto error3;
+
+ ret = mcs_receive_start(mcs);
+ if (ret)
+ goto error4;
+
+ netif_start_queue(netdev);
+ return 0;
+
+error4:
+ usb_free_urb(mcs->rx_urb);
+ usb_free_urb(mcs->tx_urb);
+error3:
+ irlap_close(mcs->irlap);
+error2:
+ kfree_skb(mcs->rx_buff.skb);
+error1:
+ return ret;
+}
+
+/* Receive callback function. */
+static void mcs_receive_irq(struct urb *urb)
+{
+ __u8 *bytes;
+ struct mcs_cb *mcs = urb->context;
+ int i;
+ int ret;
+
+ if (!netif_running(mcs->netdev))
+ return;
+
+ if (urb->status)
+ return;
+
+ if (urb->actual_length > 0) {
+ bytes = urb->transfer_buffer;
+
+ /* MCS returns frames without BOF and EOF
+ * I assume it returns whole frames.
+ */
+ /* SIR speed */
+ if(mcs->speed < 576000) {
+ async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
+ &mcs->rx_buff, 0xc0);
+
+ for (i = 0; i < urb->actual_length; i++)
+ async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
+ &mcs->rx_buff, bytes[i]);
+
+ async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
+ &mcs->rx_buff, 0xc1);
+ }
+ /* MIR speed */
+ else if(mcs->speed == 576000 || mcs->speed == 1152000) {
+ mcs_unwrap_mir(mcs, urb->transfer_buffer,
+ urb->actual_length);
+ }
+ /* FIR speed */
+ else {
+ mcs_unwrap_fir(mcs, urb->transfer_buffer,
+ urb->actual_length);
+ }
+ }
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/* Transmit callback function. */
+static void mcs_send_irq(struct urb *urb)
+{
+ struct mcs_cb *mcs = urb->context;
+ struct net_device *ndev = mcs->netdev;
+
+ if (unlikely(mcs->new_speed))
+ schedule_work(&mcs->work);
+ else
+ netif_wake_queue(ndev);
+}
+
+/* Transmit callback function. */
+static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ unsigned long flags;
+ struct mcs_cb *mcs;
+ int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(ndev);
+ mcs = netdev_priv(ndev);
+
+ spin_lock_irqsave(&mcs->lock, flags);
+
+ mcs->new_speed = irda_get_next_speed(skb);
+ if (likely(mcs->new_speed == mcs->speed))
+ mcs->new_speed = 0;
+
+ /* SIR speed */
+ if(mcs->speed < 576000) {
+ wraplen = mcs_wrap_sir_skb(skb, mcs->out_buf);
+ }
+ /* MIR speed */
+ else if(mcs->speed == 576000 || mcs->speed == 1152000) {
+ wraplen = mcs_wrap_mir_skb(skb, mcs->out_buf);
+ }
+ /* FIR speed */
+ else {
+ wraplen = mcs_wrap_fir_skb(skb, mcs->out_buf);
+ }
+ usb_fill_bulk_urb(mcs->tx_urb, mcs->usbdev,
+ usb_sndbulkpipe(mcs->usbdev, mcs->ep_out),
+ mcs->out_buf, wraplen, mcs_send_irq, mcs);
+
+ if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
+ net_err_ratelimited("failed tx_urb: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ mcs->netdev->stats.tx_errors++;
+ netif_start_queue(ndev);
+ }
+ } else {
+ mcs->netdev->stats.tx_packets++;
+ mcs->netdev->stats.tx_bytes += skb->len;
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&mcs->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops mcs_netdev_ops = {
+ .ndo_open = mcs_net_open,
+ .ndo_stop = mcs_net_close,
+ .ndo_start_xmit = mcs_hard_xmit,
+ .ndo_do_ioctl = mcs_net_ioctl,
+};
+
+/*
+ * This function is called by the USB subsystem for each new device in the
+ * system. Need to verify the device and if it is, then start handling it.
+ */
+static int mcs_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct net_device *ndev = NULL;
+ struct mcs_cb *mcs;
+ int ret = -ENOMEM;
+
+ ndev = alloc_irdadev(sizeof(*mcs));
+ if (!ndev)
+ goto error1;
+
+ pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
+
+ SET_NETDEV_DEV(ndev, &intf->dev);
+
+ ret = usb_reset_configuration(udev);
+ if (ret != 0) {
+ net_err_ratelimited("mcs7780: usb reset configuration failed\n");
+ goto error2;
+ }
+
+ mcs = netdev_priv(ndev);
+ mcs->usbdev = udev;
+ mcs->netdev = ndev;
+ spin_lock_init(&mcs->lock);
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&mcs->qos);
+
+ /* That's the Rx capability. */
+ mcs->qos.baud_rate.bits &=
+ IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200
+ | IR_576000 | IR_1152000 | (IR_4000000 << 8);
+
+
+ mcs->qos.min_turn_time.bits &= qos_mtt_bits;
+ irda_qos_bits_to_value(&mcs->qos);
+
+ /* Speed change work initialisation*/
+ INIT_WORK(&mcs->work, mcs_speed_work);
+
+ ndev->netdev_ops = &mcs_netdev_ops;
+
+ if (!intf->cur_altsetting) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+
+ ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
+ intf->cur_altsetting->desc.bNumEndpoints);
+ if (!ret) {
+ ret = -ENODEV;
+ goto error2;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret != 0)
+ goto error2;
+
+ pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n",
+ ndev->name);
+
+ mcs->transceiver_type = transceiver_type;
+ mcs->sir_tweak = sir_tweak;
+ mcs->receive_mode = receive_mode;
+
+ usb_set_intfdata(intf, mcs);
+ return 0;
+
+error2:
+ free_netdev(ndev);
+
+error1:
+ return ret;
+}
+
+/* The current device is removed, the USB layer tells us to shut down. */
+static void mcs_disconnect(struct usb_interface *intf)
+{
+ struct mcs_cb *mcs = usb_get_intfdata(intf);
+
+ if (!mcs)
+ return;
+
+ cancel_work_sync(&mcs->work);
+
+ unregister_netdev(mcs->netdev);
+ free_netdev(mcs->netdev);
+
+ usb_set_intfdata(intf, NULL);
+ pr_debug("MCS7780 now disconnected.\n");
+}
+
+module_usb_driver(mcs_driver);
diff --git a/drivers/staging/irda/drivers/mcs7780.h b/drivers/staging/irda/drivers/mcs7780.h
new file mode 100644
index 000000000000..a6e8f7dbafc9
--- /dev/null
+++ b/drivers/staging/irda/drivers/mcs7780.h
@@ -0,0 +1,165 @@
+/*****************************************************************************
+*
+* Filename: mcs7780.h
+* Version: 0.2-alpha
+* Description: Irda MosChip USB Dongle
+* Status: Experimental
+* Authors: Lukasz Stelmach <stlman@poczta.fm>
+* Brian Pugh <bpugh@cs.pdx.edu>
+*
+* Copyright (C) 2005, Lukasz Stelmach <stlman@poczta.fm>
+* Copyright (C) 2005, Brian Pugh <bpugh@cs.pdx.edu>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+#ifndef _MCS7780_H
+#define _MCS7780_H
+
+#define MCS_MODE_SIR 0
+#define MCS_MODE_MIR 1
+#define MCS_MODE_FIR 2
+
+#define MCS_CTRL_TIMEOUT 500
+#define MCS_XMIT_TIMEOUT 500
+/* Possible transceiver types */
+#define MCS_TSC_VISHAY 0 /* Vishay TFD, default choice */
+#define MCS_TSC_AGILENT 1 /* Agilent 3602/3600 */
+#define MCS_TSC_SHARP 2 /* Sharp GP2W1000YP */
+
+/* Requests */
+#define MCS_RD_RTYPE 0xC0
+#define MCS_WR_RTYPE 0x40
+#define MCS_RDREQ 0x0F
+#define MCS_WRREQ 0x0E
+
+/* Register 0x00 */
+#define MCS_MODE_REG 0
+#define MCS_FIR ((__u16)0x0001)
+#define MCS_SIR16US ((__u16)0x0002)
+#define MCS_BBTG ((__u16)0x0004)
+#define MCS_ASK ((__u16)0x0008)
+#define MCS_PARITY ((__u16)0x0010)
+
+/* SIR/MIR speed constants */
+#define MCS_SPEED_SHIFT 5
+#define MCS_SPEED_MASK ((__u16)0x00E0)
+#define MCS_SPEED(x) ((x & MCS_SPEED_MASK) >> MCS_SPEED_SHIFT)
+#define MCS_SPEED_2400 ((0 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_9600 ((1 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_19200 ((2 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_38400 ((3 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_57600 ((4 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_115200 ((5 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_576000 ((6 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_1152000 ((7 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+
+#define MCS_PLLPWDN ((__u16)0x0100)
+#define MCS_DRIVER ((__u16)0x0200)
+#define MCS_DTD ((__u16)0x0400)
+#define MCS_DIR ((__u16)0x0800)
+#define MCS_SIPEN ((__u16)0x1000)
+#define MCS_SENDSIP ((__u16)0x2000)
+#define MCS_CHGDIR ((__u16)0x4000)
+#define MCS_RESET ((__u16)0x8000)
+
+/* Register 0x02 */
+#define MCS_XCVR_REG 2
+#define MCS_MODE0 ((__u16)0x0001)
+#define MCS_STFIR ((__u16)0x0002)
+#define MCS_XCVR_CONF ((__u16)0x0004)
+#define MCS_RXFAST ((__u16)0x0008)
+/* TXCUR [6:4] */
+#define MCS_TXCUR_SHIFT 4
+#define MCS_TXCUR_MASK ((__u16)0x0070)
+#define MCS_TXCUR(x) ((x & MCS_TXCUR_MASK) >> MCS_TXCUR_SHIFT)
+#define MCS_SETTXCUR(x,y) \
+ ((x & ~MCS_TXCUR_MASK) | (y << MCS_TXCUR_SHIFT) & MCS_TXCUR_MASK)
+
+#define MCS_MODE1 ((__u16)0x0080)
+#define MCS_SMODE0 ((__u16)0x0100)
+#define MCS_SMODE1 ((__u16)0x0200)
+#define MCS_INVTX ((__u16)0x0400)
+#define MCS_INVRX ((__u16)0x0800)
+
+#define MCS_MINRXPW_REG 4
+
+#define MCS_RESV_REG 7
+#define MCS_IRINTX ((__u16)0x0001)
+#define MCS_IRINRX ((__u16)0x0002)
+
+struct mcs_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos;
+ unsigned int speed; /* Current speed */
+ unsigned int new_speed; /* new speed */
+
+ struct work_struct work; /* Change speed work */
+
+ struct sk_buff *tx_pending;
+ char in_buf[4096]; /* transmit/receive buffer */
+ char out_buf[4096]; /* transmit/receive buffer */
+ __u8 *fifo_status;
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ spinlock_t lock;
+ int receiving;
+
+ __u8 ep_in;
+ __u8 ep_out;
+
+ struct urb *rx_urb;
+ struct urb *tx_urb;
+
+ int transceiver_type;
+ int sir_tweak;
+ int receive_mode;
+};
+
+static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val);
+static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val);
+
+static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs);
+static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs);
+static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs);
+static inline int mcs_setup_transceiver(struct mcs_cb *mcs);
+static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf);
+static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf);
+static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf);
+static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len);
+static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len);
+static inline int mcs_setup_urbs(struct mcs_cb *mcs);
+static inline int mcs_receive_start(struct mcs_cb *mcs);
+static inline int mcs_find_endpoints(struct mcs_cb *mcs,
+ struct usb_host_endpoint *ep, int epnum);
+
+static int mcs_speed_change(struct mcs_cb *mcs);
+
+static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
+static int mcs_net_close(struct net_device *netdev);
+static int mcs_net_open(struct net_device *netdev);
+
+static void mcs_receive_irq(struct urb *urb);
+static void mcs_send_irq(struct urb *urb);
+static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev);
+
+static int mcs_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+static void mcs_disconnect(struct usb_interface *intf);
+
+#endif /* _MCS7780_H */
diff --git a/drivers/staging/irda/drivers/nsc-ircc.c b/drivers/staging/irda/drivers/nsc-ircc.c
new file mode 100644
index 000000000000..7beae147be11
--- /dev/null
+++ b/drivers/staging/irda/drivers/nsc-ircc.c
@@ -0,0 +1,2410 @@
+/*********************************************************************
+ *
+ * Filename: nsc-ircc.c
+ * Version: 1.0
+ * Description: Driver for the NSC PC'108 and PC'338 IrDA chipsets
+ * Status: Stable.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Nov 7 21:43:15 1998
+ * Modified at: Wed Mar 1 11:29:34 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
+ * Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Notice that all functions that needs to access the chip in _any_
+ * way, must save BSR register on entry, and restore it on exit.
+ * It is _very_ important to follow this policy!
+ *
+ * __u8 bank;
+ *
+ * bank = inb(iobase+BSR);
+ *
+ * do_your_stuff_here();
+ *
+ * outb(bank, iobase+BSR);
+ *
+ * If you find bugs in this file, its very likely that the same bug
+ * will also be in w83977af_ir.c since the implementations are quite
+ * similar.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "nsc-ircc.h"
+
+#define CHIP_IO_EXTENT 8
+#define BROKEN_DONGLE_ID
+
+static char *driver_name = "nsc-ircc";
+
+/* Power Management */
+#define NSC_IRCC_DRIVER_NAME "nsc-ircc"
+static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int nsc_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver nsc_ircc_driver = {
+ .suspend = nsc_ircc_suspend,
+ .resume = nsc_ircc_resume,
+ .driver = {
+ .name = NSC_IRCC_DRIVER_NAME,
+ },
+};
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static int dongle_id;
+
+/* Use BIOS settions by default, but user may supply module parameters */
+static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0, 0 };
+
+static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
+#ifdef CONFIG_PNP
+static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
+#endif
+
+/* These are the known NSC chips */
+static nsc_chip_t chips[] = {
+/* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */
+ { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0,
+ nsc_ircc_probe_108, nsc_ircc_init_108 },
+ { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8,
+ nsc_ircc_probe_338, nsc_ircc_init_338 },
+ /* Contributed by Steffen Pingel - IBM X40 */
+ { "PC8738x", { 0x164e, 0x4e, 0x2e }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* Contributed by Jan Frey - IBM A30/A31 */
+ { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* IBM ThinkPads using PC8738x (T60/X60/Z60) */
+ { "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* IBM ThinkPads using PC8394T (T43/R52/?) */
+ { "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ { NULL }
+};
+
+static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL };
+
+static char *dongle_types[] = {
+ "Differential serial interface",
+ "Differential serial interface",
+ "Reserved",
+ "Reserved",
+ "Sharp RY5HD01",
+ "Reserved",
+ "Single-ended serial interface",
+ "Consumer-IR only",
+ "HP HSDL-2300, HP HSDL-3600/HSDL-3610",
+ "IBM31T1100 or Temic TFDS6000/TFDS6500",
+ "Reserved",
+ "Reserved",
+ "HP HSDL-1100/HSDL-2100",
+ "HP HSDL-1100/HSDL-2100",
+ "Supports SIR Mode only",
+ "No dongle connected",
+};
+
+/* PNP probing */
+static chipio_t pnp_info;
+static const struct pnp_device_id nsc_ircc_pnp_table[] = {
+ { .id = "NSC6001", .driver_data = 0 },
+ { .id = "HWPC224", .driver_data = 0 },
+ { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
+
+static struct pnp_driver nsc_ircc_pnp_driver = {
+#ifdef CONFIG_PNP
+ .name = "nsc-ircc",
+ .id_table = nsc_ircc_pnp_table,
+ .probe = nsc_ircc_pnp_probe,
+#endif
+};
+
+/* Some prototypes */
+static int nsc_ircc_open(chipio_t *info);
+static int nsc_ircc_close(struct nsc_ircc_cb *self);
+static int nsc_ircc_setup(chipio_t *info);
+static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
+static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self);
+static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase);
+static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
+static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
+static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
+static int nsc_ircc_read_dongle_id (int iobase);
+static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
+
+static int nsc_ircc_net_open(struct net_device *dev);
+static int nsc_ircc_net_close(struct net_device *dev);
+static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+/* Globals */
+static int pnp_registered;
+static int pnp_succeeded;
+
+/*
+ * Function nsc_ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init nsc_ircc_init(void)
+{
+ chipio_t info;
+ nsc_chip_t *chip;
+ int ret;
+ int cfg_base;
+ int cfg, id;
+ int reg;
+ int i = 0;
+
+ ret = platform_driver_register(&nsc_ircc_driver);
+ if (ret) {
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
+ return ret;
+ }
+
+ /* Register with PnP subsystem to detect disable ports */
+ ret = pnp_register_driver(&nsc_ircc_pnp_driver);
+
+ if (!ret)
+ pnp_registered = 1;
+
+ ret = -ENODEV;
+
+ /* Probe for all the NSC chipsets we know about */
+ for (chip = chips; chip->name ; chip++) {
+ pr_debug("%s(), Probing for %s ...\n", __func__,
+ chip->name);
+
+ /* Try all config registers for this chip */
+ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) {
+ cfg_base = chip->cfg[cfg];
+ if (!cfg_base)
+ continue;
+
+ /* Read index register */
+ reg = inb(cfg_base);
+ if (reg == 0xff) {
+ pr_debug("%s() no chip at 0x%03x\n",
+ __func__, cfg_base);
+ continue;
+ }
+
+ /* Read chip identification register */
+ outb(chip->cid_index, cfg_base);
+ id = inb(cfg_base+1);
+ if ((id & chip->cid_mask) == chip->cid_value) {
+ pr_debug("%s() Found %s chip, revision=%d\n",
+ __func__, chip->name,
+ id & ~chip->cid_mask);
+
+ /*
+ * If we found a correct PnP setting,
+ * we first try it.
+ */
+ if (pnp_succeeded) {
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = pnp_info.fir_base;
+ info.dma = pnp_info.dma;
+ info.irq = pnp_info.irq;
+
+ if (info.fir_base < 0x2000) {
+ net_info_ratelimited("%s, chip->init\n",
+ driver_name);
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(&info) >= 0)
+ ret = 0;
+ }
+
+ /*
+ * Opening based on PnP values failed.
+ * Let's fallback to user values, or probe
+ * the chip.
+ */
+ if (ret) {
+ pr_debug("%s, PnP init failed\n",
+ driver_name);
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 0x2000) {
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(&info) >= 0)
+ ret = 0;
+ }
+ i++;
+ } else {
+ pr_debug("%s(), Wrong chip id=0x%02x\n",
+ __func__, id);
+ }
+ }
+ }
+
+ if (ret) {
+ platform_driver_unregister(&nsc_ircc_driver);
+ pnp_unregister_driver(&nsc_ircc_pnp_driver);
+ pnp_registered = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * Function nsc_ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit nsc_ircc_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
+ if (dev_self[i])
+ nsc_ircc_close(dev_self[i]);
+ }
+
+ platform_driver_unregister(&nsc_ircc_driver);
+
+ if (pnp_registered)
+ pnp_unregister_driver(&nsc_ircc_pnp_driver);
+
+ pnp_registered = 0;
+}
+
+static const struct net_device_ops nsc_ircc_sir_ops = {
+ .ndo_open = nsc_ircc_net_open,
+ .ndo_stop = nsc_ircc_net_close,
+ .ndo_start_xmit = nsc_ircc_hard_xmit_sir,
+ .ndo_do_ioctl = nsc_ircc_net_ioctl,
+};
+
+static const struct net_device_ops nsc_ircc_fir_ops = {
+ .ndo_open = nsc_ircc_net_open,
+ .ndo_stop = nsc_ircc_net_close,
+ .ndo_start_xmit = nsc_ircc_hard_xmit_fir,
+ .ndo_do_ioctl = nsc_ircc_net_ioctl,
+};
+
+/*
+ * Function nsc_ircc_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int __init nsc_ircc_open(chipio_t *info)
+{
+ struct net_device *dev;
+ struct nsc_ircc_cb *self;
+ void *ret;
+ int err, chip_index;
+
+ for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
+ if (!dev_self[chip_index])
+ break;
+ }
+
+ if (chip_index == ARRAY_SIZE(dev_self)) {
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ net_info_ratelimited("%s, Found chip at base=0x%03x\n",
+ driver_name, info->cfg_base);
+
+ if ((nsc_ircc_setup(info)) == -1)
+ return -1;
+
+ net_info_ratelimited("%s, driver loaded (Dag Brattli)\n", driver_name);
+
+ dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
+ if (dev == NULL) {
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[chip_index] = self;
+ self->index = chip_index;
+
+ /* Initialize IO */
+ self->io.cfg_base = info->cfg_base;
+ self->io.fir_base = info->fir_base;
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.fifo_size = 32;
+
+ /* Reserve the ioports that we need */
+ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
+ if (!ret) {
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
+ err = -ENODEV;
+ goto out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000 |(IR_4000000 << 8);
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 14384;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto out2;
+
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto out3;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ dev->netdev_ops = &nsc_ircc_sir_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
+ goto out4;
+ }
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+
+ /* Check if user has supplied a valid dongle id or not */
+ if ((dongle_id <= 0) ||
+ (dongle_id >= ARRAY_SIZE(dongle_types))) {
+ dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
+
+ net_info_ratelimited("%s, Found dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
+ } else {
+ net_info_ratelimited("%s, Using dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
+ }
+
+ self->io.dongle_id = dongle_id;
+ nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
+
+ self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME,
+ self->index, NULL, 0);
+ if (IS_ERR(self->pldev)) {
+ err = PTR_ERR(self->pldev);
+ goto out5;
+ }
+ platform_set_drvdata(self->pldev, self);
+
+ return chip_index;
+
+ out5:
+ unregister_netdev(dev);
+ out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ out1:
+ free_netdev(dev);
+ dev_self[chip_index] = NULL;
+ return err;
+}
+
+/*
+ * Function nsc_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ platform_device_unregister(self->pldev);
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_init_108 (iobase, cfg_base, irq, dma)
+ *
+ * Initialize the NSC '108 chip
+ *
+ */
+static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ __u8 temp=0;
+
+ outb(2, cfg_base); /* Mode Control Register (MCTL) */
+ outb(0x00, cfg_base+1); /* Disable device */
+
+ /* Base Address and Interrupt Control Register (BAIC) */
+ outb(CFG_108_BAIC, cfg_base);
+ switch (info->fir_base) {
+ case 0x3e8: outb(0x14, cfg_base+1); break;
+ case 0x2e8: outb(0x15, cfg_base+1); break;
+ case 0x3f8: outb(0x16, cfg_base+1); break;
+ case 0x2f8: outb(0x17, cfg_base+1); break;
+ default: net_err_ratelimited("%s(), invalid base_address\n", __func__);
+ }
+
+ /* Control Signal Routing Register (CSRT) */
+ switch (info->irq) {
+ case 3: temp = 0x01; break;
+ case 4: temp = 0x02; break;
+ case 5: temp = 0x03; break;
+ case 7: temp = 0x04; break;
+ case 9: temp = 0x05; break;
+ case 11: temp = 0x06; break;
+ case 15: temp = 0x07; break;
+ default: net_err_ratelimited("%s(), invalid irq\n", __func__);
+ }
+ outb(CFG_108_CSRT, cfg_base);
+
+ switch (info->dma) {
+ case 0: outb(0x08+temp, cfg_base+1); break;
+ case 1: outb(0x10+temp, cfg_base+1); break;
+ case 3: outb(0x18+temp, cfg_base+1); break;
+ default: net_err_ratelimited("%s(), invalid dma\n", __func__);
+ }
+
+ outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
+ outb(0x03, cfg_base+1); /* Enable device */
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_108 (chip, info)
+ *
+ *
+ *
+ */
+static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg;
+
+ /* Read address and interrupt control register (BAIC) */
+ outb(CFG_108_BAIC, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch (reg & 0x03) {
+ case 0:
+ info->fir_base = 0x3e8;
+ break;
+ case 1:
+ info->fir_base = 0x2e8;
+ break;
+ case 2:
+ info->fir_base = 0x3f8;
+ break;
+ case 3:
+ info->fir_base = 0x2f8;
+ break;
+ }
+ info->sir_base = info->fir_base;
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__,
+ info->fir_base);
+
+ /* Read control signals routing register (CSRT) */
+ outb(CFG_108_CSRT, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch (reg & 0x07) {
+ case 0:
+ info->irq = -1;
+ break;
+ case 1:
+ info->irq = 3;
+ break;
+ case 2:
+ info->irq = 4;
+ break;
+ case 3:
+ info->irq = 5;
+ break;
+ case 4:
+ info->irq = 7;
+ break;
+ case 5:
+ info->irq = 9;
+ break;
+ case 6:
+ info->irq = 11;
+ break;
+ case 7:
+ info->irq = 15;
+ break;
+ }
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
+
+ /* Currently we only read Rx DMA but it will also be used for Tx */
+ switch ((reg >> 3) & 0x03) {
+ case 0:
+ info->dma = -1;
+ break;
+ case 1:
+ info->dma = 0;
+ break;
+ case 2:
+ info->dma = 1;
+ break;
+ case 3:
+ info->dma = 3;
+ break;
+ }
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
+
+ /* Read mode control register (MCTL) */
+ outb(CFG_108_MCTL, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->enabled = reg & 0x01;
+ info->suspended = !((reg >> 1) & 0x01);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_init_338 (chip, info)
+ *
+ * Initialize the NSC '338 chip. Remember that the 87338 needs two
+ * consecutive writes to the data registers while CPU interrupts are
+ * disabled. The 97338 does not require this, but shouldn't be any
+ * harm if we do it anyway.
+ */
+static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info)
+{
+ /* No init yet */
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_338 (chip, info)
+ *
+ *
+ *
+ */
+static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg, com = 0;
+ int pnp;
+
+ /* Read function enable register (FER) */
+ outb(CFG_338_FER, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->enabled = (reg >> 2) & 0x01;
+
+ /* Check if we are in Legacy or PnP mode */
+ outb(CFG_338_PNP0, cfg_base);
+ reg = inb(cfg_base+1);
+
+ pnp = (reg >> 3) & 0x01;
+ if (pnp) {
+ pr_debug("(), Chip is in PnP mode\n");
+ outb(0x46, cfg_base);
+ reg = (inb(cfg_base+1) & 0xfe) << 2;
+
+ outb(0x47, cfg_base);
+ reg |= ((inb(cfg_base+1) & 0xfc) << 8);
+
+ info->fir_base = reg;
+ } else {
+ /* Read function address register (FAR) */
+ outb(CFG_338_FAR, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch ((reg >> 4) & 0x03) {
+ case 0:
+ info->fir_base = 0x3f8;
+ break;
+ case 1:
+ info->fir_base = 0x2f8;
+ break;
+ case 2:
+ com = 3;
+ break;
+ case 3:
+ com = 4;
+ break;
+ }
+
+ if (com) {
+ switch ((reg >> 6) & 0x03) {
+ case 0:
+ if (com == 3)
+ info->fir_base = 0x3e8;
+ else
+ info->fir_base = 0x2e8;
+ break;
+ case 1:
+ if (com == 3)
+ info->fir_base = 0x338;
+ else
+ info->fir_base = 0x238;
+ break;
+ case 2:
+ if (com == 3)
+ info->fir_base = 0x2e8;
+ else
+ info->fir_base = 0x2e0;
+ break;
+ case 3:
+ if (com == 3)
+ info->fir_base = 0x220;
+ else
+ info->fir_base = 0x228;
+ break;
+ }
+ }
+ }
+ info->sir_base = info->fir_base;
+
+ /* Read PnP register 1 (PNP1) */
+ outb(CFG_338_PNP1, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->irq = reg >> 4;
+
+ /* Read PnP register 3 (PNP3) */
+ outb(CFG_338_PNP3, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->dma = (reg & 0x07) - 1;
+
+ /* Read power and test register (PTR) */
+ outb(CFG_338_PTR, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->suspended = reg & 0x01;
+
+ return 0;
+}
+
+
+/*
+ * Function nsc_ircc_init_39x (chip, info)
+ *
+ * Now that we know it's a '39x (see probe below), we need to
+ * configure it so we can use it.
+ *
+ * The NSC '338 chip is a Super I/O chip with a "bank" architecture,
+ * the configuration of the different functionality (serial, parallel,
+ * floppy...) are each in a different bank (Logical Device Number).
+ * The base address, irq and dma configuration registers are common
+ * to all functionalities (index 0x30 to 0x7F).
+ * There is only one configuration register specific to the
+ * serial port, CFG_39X_SPC.
+ * JeanII
+ *
+ * Note : this code was written by Jan Frey <janfrey@web.de>
+ */
+static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int enabled;
+
+ /* User is sure about his config... accept it. */
+ pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n",
+ __func__, info->fir_base, info->irq, info->dma);
+
+ /* Access bank for SP2 */
+ outb(CFG_39X_LDN, cfg_base);
+ outb(0x02, cfg_base+1);
+
+ /* Configure SP2 */
+
+ /* We want to enable the device if not enabled */
+ outb(CFG_39X_ACT, cfg_base);
+ enabled = inb(cfg_base+1) & 0x01;
+
+ if (!enabled) {
+ /* Enable the device */
+ outb(CFG_39X_SIOCF1, cfg_base);
+ outb(0x01, cfg_base+1);
+ /* May want to update info->enabled. Jean II */
+ }
+
+ /* Enable UART bank switching (bit 7) ; Sets the chip to normal
+ * power mode (wake up from sleep mode) (bit 1) */
+ outb(CFG_39X_SPC, cfg_base);
+ outb(0x82, cfg_base+1);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_39x (chip, info)
+ *
+ * Test if we really have a '39x chip at the given address
+ *
+ * Note : this code was written by Jan Frey <janfrey@web.de>
+ */
+static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg1, reg2, irq, irqt, dma1, dma2;
+ int enabled, susp;
+
+ pr_debug("%s(), nsc_ircc_probe_39x, base=%d\n",
+ __func__, cfg_base);
+
+ /* This function should be executed with irq off to avoid
+ * another driver messing with the Super I/O bank - Jean II */
+
+ /* Access bank for SP2 */
+ outb(CFG_39X_LDN, cfg_base);
+ outb(0x02, cfg_base+1);
+
+ /* Read infos about SP2 ; store in info struct */
+ outb(CFG_39X_BASEH, cfg_base);
+ reg1 = inb(cfg_base+1);
+ outb(CFG_39X_BASEL, cfg_base);
+ reg2 = inb(cfg_base+1);
+ info->fir_base = (reg1 << 8) | reg2;
+
+ outb(CFG_39X_IRQNUM, cfg_base);
+ irq = inb(cfg_base+1);
+ outb(CFG_39X_IRQSEL, cfg_base);
+ irqt = inb(cfg_base+1);
+ info->irq = irq;
+
+ outb(CFG_39X_DMA0, cfg_base);
+ dma1 = inb(cfg_base+1);
+ outb(CFG_39X_DMA1, cfg_base);
+ dma2 = inb(cfg_base+1);
+ info->dma = dma1 -1;
+
+ outb(CFG_39X_ACT, cfg_base);
+ info->enabled = enabled = inb(cfg_base+1) & 0x01;
+
+ outb(CFG_39X_SPC, cfg_base);
+ susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
+
+ pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n",
+ __func__, reg1, reg2, irq, irqt, dma1, dma2, enabled, susp);
+
+ /* Configure SP2 */
+
+ /* We want to enable the device if not enabled */
+ outb(CFG_39X_ACT, cfg_base);
+ enabled = inb(cfg_base+1) & 0x01;
+
+ if (!enabled) {
+ /* Enable the device */
+ outb(CFG_39X_SIOCF1, cfg_base);
+ outb(0x01, cfg_base+1);
+ /* May want to update info->enabled. Jean II */
+ }
+
+ /* Enable UART bank switching (bit 7) ; Sets the chip to normal
+ * power mode (wake up from sleep mode) (bit 1) */
+ outb(CFG_39X_SPC, cfg_base);
+ outb(0x82, cfg_base+1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PNP
+/* PNP probing */
+static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
+{
+ memset(&pnp_info, 0, sizeof(chipio_t));
+ pnp_info.irq = -1;
+ pnp_info.dma = -1;
+ pnp_succeeded = 1;
+
+ if (id->driver_data & NSC_FORCE_DONGLE_TYPE9)
+ dongle_id = 0x9;
+
+ /* There doesn't seem to be any way of getting the cfg_base.
+ * On my box, cfg_base is in the PnP descriptor of the
+ * motherboard. Oh well... Jean II */
+
+ if (pnp_port_valid(dev, 0) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.fir_base = pnp_port_start(dev, 0);
+
+ if (pnp_irq_valid(dev, 0) &&
+ !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.irq = pnp_irq(dev, 0);
+
+ if (pnp_dma_valid(dev, 0) &&
+ !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.dma = pnp_dma(dev, 0);
+
+ pr_debug("%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
+ __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
+
+ if((pnp_info.fir_base == 0) ||
+ (pnp_info.irq == -1) || (pnp_info.dma == -1)) {
+ /* Returning an error will disable the device. Yuck ! */
+ //return -EINVAL;
+ pnp_succeeded = 0;
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * Function nsc_ircc_setup (info)
+ *
+ * Returns non-negative on success.
+ *
+ */
+static int nsc_ircc_setup(chipio_t *info)
+{
+ int version;
+ int iobase = info->fir_base;
+
+ /* Read the Module ID */
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+MID);
+
+ pr_debug("%s() Driver %s Found chip version %02x\n",
+ __func__, driver_name, version);
+
+ /* Should be 0x2? */
+ if (0x20 != (version & 0xf0)) {
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ driver_name, version);
+ return -1;
+ }
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_EXT_SL, iobase+ECR1);
+ switch_bank(iobase, BANK0);
+
+ /* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+
+ outb(0x03, iobase+LCR); /* 8 bit word length */
+ outb(MCR_SIR, iobase+MCR); /* Start at SIR-mode, also clears LSR*/
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+
+ /* IRCR2: FEND_MD is not set */
+ switch_bank(iobase, BANK5);
+ outb(0x02, iobase+4);
+
+ /* Make sure that some defaults are OK */
+ switch_bank(iobase, BANK6);
+ outb(0x20, iobase+0); /* Set 32 bits FIR CRC */
+ outb(0x0a, iobase+1); /* Set MIR pulse width */
+ outb(0x0d, iobase+2); /* Set SIR pulse width to 1.6us */
+ outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */
+
+ /* Enable receive interrupts */
+ switch_bank(iobase, BANK0);
+ outb(IER_RXHDL_IE, iobase+IER);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_read_dongle_id (void)
+ *
+ * Try to read dongle identification. This procedure needs to be executed
+ * once after power-on/reset. It also needs to be used whenever you suspect
+ * that the user may have plugged/unplugged the IrDA Dongle.
+ */
+static int nsc_ircc_read_dongle_id (int iobase)
+{
+ int dongle_id;
+ __u8 bank;
+
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */
+ outb(0x00, iobase+7);
+
+ /* ID0, 1, and 2 are pulled up/down very slowly */
+ udelay(50);
+
+ /* IRCFG1: read the ID bits */
+ dongle_id = inb(iobase+4) & 0x0f;
+
+#ifdef BROKEN_DONGLE_ID
+ if (dongle_id == 0x0a)
+ dongle_id = 0x09;
+#endif
+ /* Go back to bank 0 before returning */
+ switch_bank(iobase, BANK0);
+
+ outb(bank, iobase+BSR);
+
+ return dongle_id;
+}
+
+/*
+ * Function nsc_ircc_init_dongle_interface (iobase, dongle_id)
+ *
+ * This function initializes the dongle for the transceiver that is
+ * used. This procedure needs to be executed once after
+ * power-on/reset. It also needs to be used whenever you suspect that
+ * the dongle is changed.
+ */
+static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
+{
+ int bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG4: set according to dongle_id */
+ switch (dongle_id) {
+ case 0x00: /* same as */
+ case 0x01: /* Differential serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x02: /* same as */
+ case 0x03: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x04: /* Sharp RY5HD01 */
+ break;
+ case 0x05: /* Reserved, but this is what the Thinkpad reports */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x06: /* Single-ended serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x07: /* Consumer-IR only */
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ outb(0x28, iobase+7); /* Set irsl[0-2] as output */
+ break;
+ case 0x0A: /* same as */
+ case 0x0B: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x0C: /* same as */
+ case 0x0D: /* HP HSDL-1100/HSDL-2100 */
+ /*
+ * Set irsl0 as input, irsl[1-2] as output, and separate
+ * inputs are used for SIR and MIR/FIR
+ */
+ outb(0x48, iobase+7);
+ break;
+ case 0x0E: /* Supports SIR Mode only */
+ outb(0x28, iobase+7); /* Set irsl[0-2] as output */
+ break;
+ case 0x0F: /* No dongle connected */
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
+
+ switch_bank(iobase, BANK0);
+ outb(0x62, iobase+MCR);
+ break;
+ default:
+ pr_debug("%s(), invalid dongle_id %#x",
+ __func__, dongle_id);
+ }
+
+ /* IRCFG1: IRSL1 and 2 are set to IrDA mode */
+ outb(0x00, iobase+4);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+} /* set_up_dongle_interface */
+
+/*
+ * Function nsc_ircc_change_dongle_speed (iobase, speed, dongle_id)
+ *
+ * Change speed of the attach dongle
+ *
+ */
+static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
+{
+ __u8 bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG1: set according to dongle_id */
+ switch (dongle_id) {
+ case 0x00: /* same as */
+ case 0x01: /* Differential serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x02: /* same as */
+ case 0x03: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x04: /* Sharp RY5HD01 */
+ break;
+ case 0x05: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x06: /* Single-ended serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x07: /* Consumer-IR only */
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
+ outb(0x00, iobase+4);
+ if (speed > 115200)
+ outb(0x01, iobase+4);
+ break;
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ outb(0x01, iobase+4);
+
+ if (speed == 4000000) {
+ /* There was a cli() there, but we now are already
+ * under spin_lock_irqsave() - JeanII */
+ outb(0x81, iobase+4);
+ outb(0x80, iobase+4);
+ } else
+ outb(0x00, iobase+4);
+ break;
+ case 0x0A: /* same as */
+ case 0x0B: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x0C: /* same as */
+ case 0x0D: /* HP HSDL-1100/HSDL-2100 */
+ break;
+ case 0x0E: /* Supports SIR Mode only */
+ break;
+ case 0x0F: /* No dongle connected */
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
+
+ switch_bank(iobase, BANK0);
+ outb(0x62, iobase+MCR);
+ break;
+ default:
+ pr_debug("%s(), invalid data_rate\n", __func__);
+ }
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_change_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ * This function *must* be called with irq off and spin-lock.
+ */
+static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
+{
+ struct net_device *dev;
+ __u8 mcr = MCR_SIR;
+ int iobase;
+ __u8 bank;
+ __u8 ier; /* Interrupt enable register */
+
+ pr_debug("%s(), speed=%d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ dev = self->netdev;
+ iobase = self->io.fir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ /* Select Bank 2 */
+ switch_bank(iobase, BANK2);
+
+ outb(0x00, iobase+BGDH);
+ switch (speed) {
+ case 9600: outb(0x0c, iobase+BGDL); break;
+ case 19200: outb(0x06, iobase+BGDL); break;
+ case 38400: outb(0x03, iobase+BGDL); break;
+ case 57600: outb(0x02, iobase+BGDL); break;
+ case 115200: outb(0x01, iobase+BGDL); break;
+ case 576000:
+ switch_bank(iobase, BANK5);
+
+ /* IRCR2: MDRS is set */
+ outb(inb(iobase+4) | 0x04, iobase+4);
+
+ mcr = MCR_MIR;
+ pr_debug("%s(), handling baud of 576000\n", __func__);
+ break;
+ case 1152000:
+ mcr = MCR_MIR;
+ pr_debug("%s(), handling baud of 1152000\n", __func__);
+ break;
+ case 4000000:
+ mcr = MCR_FIR;
+ pr_debug("%s(), handling baud of 4000000\n", __func__);
+ break;
+ default:
+ mcr = MCR_FIR;
+ pr_debug("%s(), unknown baud rate of %d\n",
+ __func__, speed);
+ break;
+ }
+
+ /* Set appropriate speed mode */
+ switch_bank(iobase, BANK0);
+ outb(mcr | MCR_TX_DFR, iobase+MCR);
+
+ /* Give some hits to the transceiver */
+ nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, BANK0);
+ outb(0x00, iobase+FCR);
+ outb(FCR_FIFO_EN, iobase+FCR);
+ outb(FCR_RXTH| /* Set Rx FIFO threshold */
+ FCR_TXTH| /* Set Tx FIFO threshold */
+ FCR_TXSR| /* Reset Tx FIFO */
+ FCR_RXSR| /* Reset Rx FIFO */
+ FCR_FIFO_EN, /* Enable FIFOs */
+ iobase+FCR);
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+
+ /* Enable some interrupts so we can receive frames */
+ switch_bank(iobase, BANK0);
+ if (speed > 115200) {
+ /* Install FIR xmit handler */
+ dev->netdev_ops = &nsc_ircc_fir_ops;
+ ier = IER_SFIF_IE;
+ nsc_ircc_dma_receive(self);
+ } else {
+ /* Install SIR xmit handler */
+ dev->netdev_ops = &nsc_ircc_sir_ops;
+ ier = IER_RXHDL_IE;
+ }
+ /* Set our current interrupt mask */
+ outb(ier, iobase+IER);
+
+ /* Restore BSR */
+ outb(bank, iobase+BSR);
+
+ /* Make sure interrupt handlers keep the proper interrupt mask */
+ return ier;
+}
+
+/*
+ * Function nsc_ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __s32 speed;
+ __u8 bank;
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame. */
+ if (!skb->len) {
+ /* If we just sent a frame, we get called before
+ * the last bytes get out (because of the SIR FIFO).
+ * If this is the case, let interrupt handler change
+ * the speed itself... Jean II */
+ if (self->io.direction == IO_RECV) {
+ nsc_ircc_change_speed(self, speed);
+ /* TODO : For SIR->SIR, the next packet
+ * may get corrupted - Jean II */
+ netif_wake_queue(dev);
+ } else {
+ self->new_speed = speed;
+ /* Queue will be restarted after speed change
+ * to make sure packets gets through the
+ * proper xmit handler - Jean II */
+ }
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ self->tx_buff.data = self->tx_buff.head;
+
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ dev->stats.tx_bytes += self->tx_buff.len;
+
+ /* Add interrupt on tx low level (will fire immediately) */
+ switch_bank(iobase, BANK0);
+ outb(IER_TXLDL_IE, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __s32 speed;
+ __u8 bank;
+ int mtt, diff;
+
+ self = netdev_priv(dev);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame. */
+ if (!skb->len) {
+ /* If we are currently transmitting, defer to
+ * interrupt handler. - Jean II */
+ if(self->tx_fifo.len == 0) {
+ nsc_ircc_change_speed(self, speed);
+ netif_wake_queue(dev);
+ } else {
+ self->new_speed = speed;
+ /* Keep queue stopped :
+ * the speed change operation may change the
+ * xmit handler, and we want to make sure
+ * the next packet get through the proper
+ * Tx path, so block the Tx queue until
+ * the speed change has been done.
+ * Jean II */
+ }
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else {
+ /* Change speed after current frame */
+ self->new_speed = speed;
+ }
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Register and copy this frame to DMA memory */
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+ self->tx_fifo.tail += skb->len;
+
+ dev->stats.tx_bytes += skb->len;
+
+ skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
+ skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+
+ /* Start transmit only if there is currently no transmit going on */
+ if (self->tx_fifo.len == 1) {
+ /* Check if we must wait the min turn time or not */
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ /* Check how much time we have used already */
+ diff = ktime_us_delta(ktime_get(), self->stamp);
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff) {
+ mtt -= diff;
+
+ /*
+ * Use timer if delay larger than 125 us, and
+ * use udelay for smaller values which should
+ * be acceptable
+ */
+ if (mtt > 125) {
+ /* Adjust for timer resolution */
+ mtt = mtt / 125;
+
+ /* Setup timer */
+ switch_bank(iobase, BANK4);
+ outb(mtt & 0xff, iobase+TMRL);
+ outb((mtt >> 8) & 0x0f, iobase+TMRH);
+
+ /* Start timer */
+ outb(IRCR1_TMR_EN, iobase+IRCR1);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ switch_bank(iobase, BANK0);
+ outb(IER_TMR_IE, iobase+IER);
+
+ /* Timer will take care of the rest */
+ goto out;
+ } else
+ udelay(mtt);
+ }
+ }
+ /* Enable DMA interrupt */
+ switch_bank(iobase, BANK0);
+ outb(IER_DMA_IE, iobase+IER);
+
+ /* Transmit frame */
+ nsc_ircc_dma_xmit(self, iobase);
+ }
+ out:
+ /* Not busy transmitting anymore if window is not full,
+ * and if we don't need to change speed */
+ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0))
+ netif_wake_queue(self->netdev);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function nsc_ircc_dma_xmit (self, iobase)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase)
+{
+ int bsr;
+
+ /* Save current bank */
+ bsr = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ self->io.direction = IO_XMIT;
+
+ /* Choose transmit DMA channel */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
+
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ DMA_TX_MODE);
+
+ /* Enable DMA and SIR interaction pulse */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_TX_DFR|MCR_DMA_EN|MCR_IR_PLS, iobase+MCR);
+
+ /* Restore bank register */
+ outb(bsr, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_pio_xmit (self, iobase)
+ *
+ * Transmit data using PIO. Returns the number of bytes that actually
+ * got transferred
+ *
+ */
+static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
+{
+ int actual = 0;
+ __u8 bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ switch_bank(iobase, BANK0);
+ if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
+ pr_debug("%s(), warning, FIFO not empty yet!\n",
+ __func__);
+
+ /* FIFO may still be filled to the Tx interrupt threshold */
+ fifo_size -= 17;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual++], iobase+TXD);
+ }
+
+ pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
+
+ /* Restore bank */
+ outb(bank, iobase+BSR);
+
+ return actual;
+}
+
+/*
+ * Function nsc_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
+{
+ int iobase;
+ __u8 bank;
+ int ret = TRUE;
+
+ iobase = self->io.fir_base;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ /* Check for underrun! */
+ if (inb(iobase+ASCR) & ASCR_TXUR) {
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+
+ /* Clear bit, by writing 1 into it */
+ outb(ASCR_TXUR, iobase+ASCR);
+ } else {
+ self->netdev->stats.tx_packets++;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ self->tx_fifo.ptr++;
+ self->tx_fifo.len--;
+
+ /* Any frames to be sent back-to-back? */
+ if (self->tx_fifo.len) {
+ nsc_ircc_dma_xmit(self, iobase);
+
+ /* Not finished yet! */
+ ret = FALSE;
+ } else {
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ }
+
+ /* Make sure we have room for more frames and
+ * that we don't need to change speed */
+ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) {
+ /* Not busy transmitting anymore */
+ /* Tell the network layer, that we can accept more frames */
+ netif_wake_queue(self->netdev);
+ }
+
+ /* Restore bank */
+ outb(bank, iobase+BSR);
+
+ return ret;
+}
+
+/*
+ * Function nsc_ircc_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self)
+{
+ int iobase;
+ __u8 bsr;
+
+ iobase = self->io.fir_base;
+
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Save current bank */
+ bsr = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ /* Choose DMA Rx, DMA Fairness, and Advanced mode */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Rx FIFO. This will also flush the ST_FIFO */
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Enable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR);
+
+ /* Restore bank register */
+ outb(bsr, iobase+BSR);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_dma_receive_complete (self)
+ *
+ * Finished with receiving frames
+ *
+ *
+ */
+static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ __u8 status;
+ __u8 bank;
+ int len;
+
+ st_fifo = &self->st_fifo;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Read all entries in status FIFO */
+ switch_bank(iobase, BANK5);
+ while ((status = inb(iobase+FRM_ST)) & FRM_ST_VLD) {
+ /* We must empty the status FIFO no matter what */
+ len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
+
+ if (st_fifo->tail >= MAX_RX_WINDOW) {
+ pr_debug("%s(), window is full!\n", __func__);
+ continue;
+ }
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+ /* Try to process all entries in status FIFO */
+ while (st_fifo->len > 0) {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->pending_bytes -= len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if (status & FRM_ST_ERR_MSK) {
+ if (status & FRM_ST_LOST_FR) {
+ /* Add number of lost frames to stats */
+ self->netdev->stats.rx_errors += len;
+ } else {
+ /* Skip frame */
+ self->netdev->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & FRM_ST_MAX_LEN)
+ self->netdev->stats.rx_length_errors++;
+
+ if (status & FRM_ST_PHY_ERR)
+ self->netdev->stats.rx_frame_errors++;
+
+ if (status & FRM_ST_BAD_CRC)
+ self->netdev->stats.rx_crc_errors++;
+ }
+ /* The errors below can be reported in both cases */
+ if (status & FRM_ST_OVR1)
+ self->netdev->stats.rx_fifo_errors++;
+
+ if (status & FRM_ST_OVR2)
+ self->netdev->stats.rx_fifo_errors++;
+ } else {
+ /*
+ * First we must make sure that the frame we
+ * want to deliver is all in main memory. If we
+ * cannot tell, then we check if the Rx FIFO is
+ * empty. If not then we will have to take a nap
+ * and try again later.
+ */
+ if (st_fifo->pending_bytes < self->io.fifo_size) {
+ switch_bank(iobase, BANK0);
+ if (inb(iobase+LSR) & LSR_RXDA) {
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->pending_bytes += len;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+ /*
+ * DMA not finished yet, so try again
+ * later, set timer value, resolution
+ * 125 us
+ */
+ switch_bank(iobase, BANK4);
+ outb(0x02, iobase+TMRL); /* x 125 us */
+ outb(0x00, iobase+TMRH);
+
+ /* Start timer */
+ outb(IRCR1_TMR_EN, iobase+IRCR1);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return FALSE; /* I'll be back! */
+ }
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ self->stamp = ktime_get();
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL) {
+ self->netdev->stats.rx_dropped++;
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC */
+ if (self->io.speed < 4000000) {
+ skb_put(skb, len-2);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 2);
+ } else {
+ skb_put(skb, len-4);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 4);
+ }
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ }
+ }
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return TRUE;
+}
+
+/*
+ * Function nsc_ircc_pio_receive (self)
+ *
+ * Receive all data in receiver FIFO
+ *
+ */
+static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self)
+{
+ __u8 byte;
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ /* Receive all characters in Rx FIFO */
+ do {
+ byte = inb(iobase+RXD);
+ async_unwrap_char(self->netdev, &self->netdev->stats,
+ &self->rx_buff, byte);
+ } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */
+}
+
+/*
+ * Function nsc_ircc_sir_interrupt (self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
+{
+ int actual;
+
+ /* Check if transmit FIFO is low on data */
+ if (eir & EIR_TXLDL_EV) {
+ /* Write data left in transmit buffer */
+ actual = nsc_ircc_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
+ self->io.fifo_size);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+
+ self->io.direction = IO_XMIT;
+
+ /* Check if finished */
+ if (self->tx_buff.len > 0)
+ self->ier = IER_TXLDL_IE;
+ else {
+
+ self->netdev->stats.tx_packets++;
+ netif_wake_queue(self->netdev);
+ self->ier = IER_TXEMP_IE;
+ }
+
+ }
+ /* Check if transmission has completed */
+ if (eir & EIR_TXEMP_EV) {
+ /* Turn around and get ready to receive some data */
+ self->io.direction = IO_RECV;
+ self->ier = IER_RXHDL_IE;
+ /* Check if we need to change the speed?
+ * Need to be after self->io.direction to avoid race with
+ * nsc_ircc_hard_xmit_sir() - Jean II */
+ if (self->new_speed) {
+ pr_debug("%s(), Changing speed!\n", __func__);
+ self->ier = nsc_ircc_change_speed(self,
+ self->new_speed);
+ self->new_speed = 0;
+ netif_wake_queue(self->netdev);
+
+ /* Check if we are going to FIR */
+ if (self->io.speed > 115200) {
+ /* No need to do anymore SIR stuff */
+ return;
+ }
+ }
+ }
+
+ /* Rx FIFO threshold or timeout */
+ if (eir & EIR_RXHDL_EV) {
+ nsc_ircc_pio_receive(self);
+
+ /* Keep receiving */
+ self->ier = IER_RXHDL_IE;
+ }
+}
+
+/*
+ * Function nsc_ircc_fir_interrupt (self, eir)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
+ int eir)
+{
+ __u8 bank;
+
+ bank = inb(iobase+BSR);
+
+ /* Status FIFO event*/
+ if (eir & EIR_SFIF_EV) {
+ /* Check if DMA has finished */
+ if (nsc_ircc_dma_receive_complete(self, iobase)) {
+ /* Wait for next status FIFO interrupt */
+ self->ier = IER_SFIF_IE;
+ } else {
+ self->ier = IER_SFIF_IE | IER_TMR_IE;
+ }
+ } else if (eir & EIR_TMR_EV) { /* Timer finished */
+ /* Disable timer */
+ switch_bank(iobase, BANK4);
+ outb(0, iobase+IRCR1);
+
+ /* Clear timer event */
+ switch_bank(iobase, BANK0);
+ outb(ASCR_CTE, iobase+ASCR);
+
+ /* Check if this is a Tx timer interrupt */
+ if (self->io.direction == IO_XMIT) {
+ nsc_ircc_dma_xmit(self, iobase);
+
+ /* Interrupt on DMA */
+ self->ier = IER_DMA_IE;
+ } else {
+ /* Check (again) if DMA has finished */
+ if (nsc_ircc_dma_receive_complete(self, iobase)) {
+ self->ier = IER_SFIF_IE;
+ } else {
+ self->ier = IER_SFIF_IE | IER_TMR_IE;
+ }
+ }
+ } else if (eir & EIR_DMA_EV) {
+ /* Finished with all transmissions? */
+ if (nsc_ircc_dma_xmit_complete(self)) {
+ if(self->new_speed != 0) {
+ /* As we stop the Tx queue, the speed change
+ * need to be done when the Tx fifo is
+ * empty. Ask for a Tx done interrupt */
+ self->ier = IER_TXEMP_IE;
+ } else {
+ /* Check if there are more frames to be
+ * transmitted */
+ if (irda_device_txqueue_empty(self->netdev)) {
+ /* Prepare for receive */
+ nsc_ircc_dma_receive(self);
+ self->ier = IER_SFIF_IE;
+ } else
+ net_warn_ratelimited("%s(), potential Tx queue lockup !\n",
+ __func__);
+ }
+ } else {
+ /* Not finished yet, so interrupt on DMA again */
+ self->ier = IER_DMA_IE;
+ }
+ } else if (eir & EIR_TXEMP_EV) {
+ /* The Tx FIFO has totally drained out, so now we can change
+ * the speed... - Jean II */
+ self->ier = nsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ netif_wake_queue(self->netdev);
+ /* Note : nsc_ircc_change_speed() restarted Rx fifo */
+ }
+
+ outb(bank, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct nsc_ircc_cb *self;
+ __u8 bsr, eir;
+ int iobase;
+
+ self = netdev_priv(dev);
+
+ spin_lock(&self->lock);
+
+ iobase = self->io.fir_base;
+
+ bsr = inb(iobase+BSR); /* Save current bank */
+
+ switch_bank(iobase, BANK0);
+ self->ier = inb(iobase+IER);
+ eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */
+
+ outb(0, iobase+IER); /* Disable interrupts */
+
+ if (eir) {
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > 115200)
+ nsc_ircc_fir_interrupt(self, iobase, eir);
+ else
+ nsc_ircc_sir_interrupt(self, eir);
+ }
+
+ outb(self->ier, iobase+IER); /* Restore interrupts */
+ outb(bsr, iobase+BSR); /* Restore bank register */
+
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(eir);
+}
+
+/*
+ * Function nsc_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self)
+{
+ unsigned long flags;
+ int status = FALSE;
+ int iobase;
+ __u8 bank;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ if (self->io.speed > 115200) {
+ iobase = self->io.fir_base;
+
+ /* Check if rx FIFO is not empty */
+ bank = inb(iobase+BSR);
+ switch_bank(iobase, BANK2);
+ if ((inb(iobase+RXFLV) & 0x3f) != 0) {
+ /* We are receiving something */
+ status = TRUE;
+ }
+ outb(bank, iobase+BSR);
+ } else
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return status;
+}
+
+/*
+ * Function nsc_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int nsc_ircc_net_open(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+ __u8 bank;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* turn on interrupts */
+ switch_bank(iobase, BANK0);
+ outb(IER_LS_IE | IER_RXHDL_IE, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /* Give self a hardware name */
+ sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int nsc_ircc_net_close(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ int iobase;
+ __u8 bank;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ iobase = self->io.fir_base;
+
+ disable_dma(self->io.dma);
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ /* This is already protected */
+ irq->ifr_receiving = nsc_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct nsc_ircc_cb *self = platform_get_drvdata(dev);
+ int bank;
+ unsigned long flags;
+ int iobase = self->io.fir_base;
+
+ if (self->io.suspended)
+ return 0;
+
+ pr_debug("%s, Suspending\n", driver_name);
+
+ rtnl_lock();
+ if (netif_running(self->netdev)) {
+ netif_device_detach(self->netdev);
+ spin_lock_irqsave(&self->lock, flags);
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+ free_irq(self->io.irq, self->netdev);
+ disable_dma(self->io.dma);
+ }
+ self->io.suspended = 1;
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int nsc_ircc_resume(struct platform_device *dev)
+{
+ struct nsc_ircc_cb *self = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ if (!self->io.suspended)
+ return 0;
+
+ pr_debug("%s, Waking up\n", driver_name);
+
+ rtnl_lock();
+ nsc_ircc_setup(&self->io);
+ nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id);
+
+ if (netif_running(self->netdev)) {
+ if (request_irq(self->io.irq, nsc_ircc_interrupt, 0,
+ self->netdev->name, self->netdev)) {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+
+ /*
+ * Don't fail resume process, just kill this
+ * network interface
+ */
+ unregister_netdevice(self->netdev);
+ } else {
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, self->io.speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ netif_device_attach(self->netdev);
+ }
+
+ } else {
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, 9600);
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
+ self->io.suspended = 0;
+ rtnl_unlock();
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("NSC IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+module_param_hw_array(dma, int, dma, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channels");
+module_param(dongle_id, int, 0);
+MODULE_PARM_DESC(dongle_id, "Type-id of used dongle");
+
+module_init(nsc_ircc_init);
+module_exit(nsc_ircc_cleanup);
+
diff --git a/drivers/staging/irda/drivers/nsc-ircc.h b/drivers/staging/irda/drivers/nsc-ircc.h
new file mode 100644
index 000000000000..7be5acb56532
--- /dev/null
+++ b/drivers/staging/irda/drivers/nsc-ircc.h
@@ -0,0 +1,281 @@
+/*********************************************************************
+ *
+ * Filename: nsc-ircc.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Nov 13 14:37:40 1998
+ * Modified at: Sun Jan 23 17:47:00 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
+ * Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef NSC_IRCC_H
+#define NSC_IRCC_H
+
+#include <linux/ktime.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* Features for chips (set in driver_data) */
+#define NSC_FORCE_DONGLE_TYPE9 0x00000001
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+/* Config registers for the '108 */
+#define CFG_108_BAIC 0x00
+#define CFG_108_CSRT 0x01
+#define CFG_108_MCTL 0x02
+
+/* Config registers for the '338 */
+#define CFG_338_FER 0x00
+#define CFG_338_FAR 0x01
+#define CFG_338_PTR 0x02
+#define CFG_338_PNP0 0x1b
+#define CFG_338_PNP1 0x1c
+#define CFG_338_PNP3 0x4f
+
+/* Config registers for the '39x (in the logical device bank) */
+#define CFG_39X_LDN 0x07 /* Logical device number (Super I/O bank) */
+#define CFG_39X_SIOCF1 0x21 /* SuperI/O Config */
+#define CFG_39X_ACT 0x30 /* Device activation */
+#define CFG_39X_BASEH 0x60 /* Device base address (high bits) */
+#define CFG_39X_BASEL 0x61 /* Device base address (low bits) */
+#define CFG_39X_IRQNUM 0x70 /* Interrupt number & wake up enable */
+#define CFG_39X_IRQSEL 0x71 /* Interrupt select (edge/level + polarity) */
+#define CFG_39X_DMA0 0x74 /* DMA 0 configuration */
+#define CFG_39X_DMA1 0x75 /* DMA 1 configuration */
+#define CFG_39X_SPC 0xF0 /* Serial port configuration register */
+
+/* Flags for configuration register CRF0 */
+#define APEDCRC 0x02
+#define ENBNKSEL 0x01
+
+/* Set 0 */
+#define TXD 0x00 /* Transmit data port */
+#define RXD 0x00 /* Receive data port */
+
+/* Register 1 */
+#define IER 0x01 /* Interrupt Enable Register*/
+#define IER_RXHDL_IE 0x01 /* Receiver high data level interrupt */
+#define IER_TXLDL_IE 0x02 /* Transeiver low data level interrupt */
+#define IER_LS_IE 0x04//* Link Status Interrupt */
+#define IER_ETXURI 0x04 /* Tx underrun */
+#define IER_DMA_IE 0x10 /* DMA finished interrupt */
+#define IER_TXEMP_IE 0x20
+#define IER_SFIF_IE 0x40 /* Frame status FIFO intr */
+#define IER_TMR_IE 0x80 /* Timer event */
+
+#define FCR 0x02 /* (write only) */
+#define FCR_FIFO_EN 0x01 /* Enable FIFO's */
+#define FCR_RXSR 0x02 /* Rx FIFO soft reset */
+#define FCR_TXSR 0x04 /* Tx FIFO soft reset */
+#define FCR_RXTH 0x40 /* Rx FIFO threshold (set to 16) */
+#define FCR_TXTH 0x20 /* Tx FIFO threshold (set to 17) */
+
+#define EIR 0x02 /* (read only) */
+#define EIR_RXHDL_EV 0x01
+#define EIR_TXLDL_EV 0x02
+#define EIR_LS_EV 0x04
+#define EIR_DMA_EV 0x10
+#define EIR_TXEMP_EV 0x20
+#define EIR_SFIF_EV 0x40
+#define EIR_TMR_EV 0x80
+
+#define LCR 0x03 /* Link control register */
+#define LCR_WLS_8 0x03 /* 8 bits */
+
+#define BSR 0x03 /* Bank select register */
+#define BSR_BKSE 0x80
+#define BANK0 LCR_WLS_8 /* Must make sure that we set 8N1 */
+#define BANK1 0x80
+#define BANK2 0xe0
+#define BANK3 0xe4
+#define BANK4 0xe8
+#define BANK5 0xec
+#define BANK6 0xf0
+#define BANK7 0xf4
+
+#define MCR 0x04 /* Mode Control Register */
+#define MCR_MODE_MASK ~(0xd0)
+#define MCR_UART 0x00
+#define MCR_RESERVED 0x20
+#define MCR_SHARP_IR 0x40
+#define MCR_SIR 0x60
+#define MCR_MIR 0x80
+#define MCR_FIR 0xa0
+#define MCR_CEIR 0xb0
+#define MCR_IR_PLS 0x10
+#define MCR_DMA_EN 0x04
+#define MCR_EN_IRQ 0x08
+#define MCR_TX_DFR 0x08
+
+#define LSR 0x05 /* Link status register */
+#define LSR_RXDA 0x01 /* Receiver data available */
+#define LSR_TXRDY 0x20 /* Transmitter ready */
+#define LSR_TXEMP 0x40 /* Transmitter empty */
+
+#define ASCR 0x07 /* Auxiliary Status and Control Register */
+#define ASCR_RXF_TOUT 0x01 /* Rx FIFO timeout */
+#define ASCR_FEND_INF 0x02 /* Frame end bytes in rx FIFO */
+#define ASCR_S_EOT 0x04 /* Set end of transmission */
+#define ASCT_RXBSY 0x20 /* Rx busy */
+#define ASCR_TXUR 0x40 /* Transeiver underrun */
+#define ASCR_CTE 0x80 /* Clear timer event */
+
+/* Bank 2 */
+#define BGDL 0x00 /* Baud Generator Divisor Port (Low Byte) */
+#define BGDH 0x01 /* Baud Generator Divisor Port (High Byte) */
+
+#define ECR1 0x02 /* Extended Control Register 1 */
+#define ECR1_EXT_SL 0x01 /* Extended Mode Select */
+#define ECR1_DMANF 0x02 /* DMA Fairness */
+#define ECR1_DMATH 0x04 /* DMA Threshold */
+#define ECR1_DMASWP 0x08 /* DMA Swap */
+
+#define EXCR2 0x04
+#define EXCR2_TFSIZ 0x01 /* Rx FIFO size = 32 */
+#define EXCR2_RFSIZ 0x04 /* Tx FIFO size = 32 */
+
+#define TXFLV 0x06 /* Tx FIFO level */
+#define RXFLV 0x07 /* Rx FIFO level */
+
+/* Bank 3 */
+#define MID 0x00
+
+/* Bank 4 */
+#define TMRL 0x00 /* Timer low byte */
+#define TMRH 0x01 /* Timer high byte */
+#define IRCR1 0x02 /* Infrared control register 1 */
+#define IRCR1_TMR_EN 0x01 /* Timer enable */
+
+#define TFRLL 0x04
+#define TFRLH 0x05
+#define RFRLL 0x06
+#define RFRLH 0x07
+
+/* Bank 5 */
+#define IRCR2 0x04 /* Infrared control register 2 */
+#define IRCR2_MDRS 0x04 /* MIR data rate select */
+#define IRCR2_FEND_MD 0x20 /* */
+
+#define FRM_ST 0x05 /* Frame status FIFO */
+#define FRM_ST_VLD 0x80 /* Frame status FIFO data valid */
+#define FRM_ST_ERR_MSK 0x5f
+#define FRM_ST_LOST_FR 0x40 /* Frame lost */
+#define FRM_ST_MAX_LEN 0x10 /* Max frame len exceeded */
+#define FRM_ST_PHY_ERR 0x08 /* Physical layer error */
+#define FRM_ST_BAD_CRC 0x04
+#define FRM_ST_OVR1 0x02 /* Rx FIFO overrun */
+#define FRM_ST_OVR2 0x01 /* Frame status FIFO overrun */
+
+#define RFLFL 0x06
+#define RFLFH 0x07
+
+/* Bank 6 */
+#define IR_CFG2 0x00
+#define IR_CFG2_DIS_CRC 0x02
+
+/* Bank 7 */
+#define IRM_CR 0x07 /* Infrared module control register */
+#define IRM_CR_IRX_MSL 0x40
+#define IRM_CR_AF_MNT 0x80 /* Automatic format */
+
+/* NSC chip information */
+struct nsc_chip {
+ char *name; /* Name of chipset */
+ int cfg[3]; /* Config registers */
+ u_int8_t cid_index; /* Chip identification index reg */
+ u_int8_t cid_value; /* Chip identification expected value */
+ u_int8_t cid_mask; /* Chip identification revision mask */
+
+ /* Functions for probing and initializing the specific chip */
+ int (*probe)(struct nsc_chip *chip, chipio_t *info);
+ int (*init)(struct nsc_chip *chip, chipio_t *info);
+};
+typedef struct nsc_chip nsc_chip_t;
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Length of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+/* Private data for each instance */
+struct nsc_ircc_cb {
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ ktime_t stamp;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ struct platform_device *pldev;
+};
+
+static inline void switch_bank(int iobase, int bank)
+{
+ outb(bank, iobase+BSR);
+}
+
+#endif /* NSC_IRCC_H */
diff --git a/drivers/staging/irda/drivers/old_belkin-sir.c b/drivers/staging/irda/drivers/old_belkin-sir.c
new file mode 100644
index 000000000000..a7c2e990ae69
--- /dev/null
+++ b/drivers/staging/irda/drivers/old_belkin-sir.c
@@ -0,0 +1,146 @@
+/*********************************************************************
+ *
+ * Filename: old_belkin.c
+ * Version: 1.1
+ * Description: Driver for the Belkin (old) SmartBeam dongle
+ * Status: Experimental...
+ * Author: Jean Tourrilhes <jt@hpl.hp.com>
+ * Created at: 22/11/99
+ * Modified at: Fri Dec 17 09:13:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+// #include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+
+/*
+ * Belkin is selling a dongle called the SmartBeam.
+ * In fact, there is two hardware version of this dongle, of course with
+ * the same name and looking the exactly same (grrr...).
+ * I guess that I've got the old one, because inside I don't have
+ * a jumper for IrDA/ASK...
+ *
+ * As far as I can make it from info on their web site, the old dongle
+ * support only 9600 b/s, which make our life much simpler as far as
+ * the driver is concerned, but you might not like it very much ;-)
+ * The new SmartBeam does 115 kb/s, and I've not tested it...
+ *
+ * Belkin claim that the correct driver for the old dongle (in Windows)
+ * is the generic Parallax 9500a driver, but the Linux LiteLink driver
+ * fails for me (probably because Linux-IrDA doesn't rate fallback),
+ * so I created this really dumb driver...
+ *
+ * In fact, this driver doesn't do much. The only thing it does is to
+ * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This
+ * driver is called "old_belkin" so that when the new SmartBeam is supported
+ * its driver can be called "belkin" instead of "new_belkin".
+ *
+ * Note : this driver was written without any info/help from Belkin,
+ * so a lot of info here might be totally wrong. Blame me ;-)
+ */
+
+static int old_belkin_open(struct sir_dev *dev);
+static int old_belkin_close(struct sir_dev *dev);
+static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed);
+static int old_belkin_reset(struct sir_dev *dev);
+
+static struct dongle_driver old_belkin = {
+ .owner = THIS_MODULE,
+ .driver_name = "Old Belkin SmartBeam",
+ .type = IRDA_OLD_BELKIN_DONGLE,
+ .open = old_belkin_open,
+ .close = old_belkin_close,
+ .reset = old_belkin_reset,
+ .set_speed = old_belkin_change_speed,
+};
+
+static int __init old_belkin_sir_init(void)
+{
+ return irda_register_dongle(&old_belkin);
+}
+
+static void __exit old_belkin_sir_cleanup(void)
+{
+ irda_unregister_dongle(&old_belkin);
+}
+
+static int old_belkin_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Not too fast, please... */
+ qos->baud_rate.bits &= IR_9600;
+ /* Needs at least 10 ms (totally wild guess, can do probably better) */
+ qos->min_turn_time.bits = 0x01;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int old_belkin_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function old_belkin_change_speed (task)
+ *
+ * With only one speed available, not much to do...
+ */
+static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ dev->speed = 9600;
+ return (speed==dev->speed) ? 0 : -EINVAL;
+}
+
+/*
+ * Function old_belkin_reset (task)
+ *
+ * Reset the Old-Belkin type dongle.
+ *
+ */
+static int old_belkin_reset(struct sir_dev *dev)
+{
+ /* This dongles speed "defaults" to 9600 bps ;-) */
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */
+
+module_init(old_belkin_sir_init);
+module_exit(old_belkin_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/pxaficp_ir.c b/drivers/staging/irda/drivers/pxaficp_ir.c
new file mode 100644
index 000000000000..1dba16bc7f8d
--- /dev/null
+++ b/drivers/staging/irda/drivers/pxaficp_ir.c
@@ -0,0 +1,1076 @@
+/*
+ * linux/drivers/net/irda/pxaficp_ir.c
+ *
+ * Based on sa1100_ir.c by Russell King
+ *
+ * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/sched/clock.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <linux/platform_data/irda-pxaficp.h>
+#undef __REG
+#define __REG(x) ((x) & 0xffff)
+#include <mach/regs-uart.h>
+
+#define ICCR0 0x0000 /* ICP Control Register 0 */
+#define ICCR1 0x0004 /* ICP Control Register 1 */
+#define ICCR2 0x0008 /* ICP Control Register 2 */
+#define ICDR 0x000c /* ICP Data Register */
+#define ICSR0 0x0014 /* ICP Status Register 0 */
+#define ICSR1 0x0018 /* ICP Status Register 1 */
+
+#define ICCR0_AME (1 << 7) /* Address match enable */
+#define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
+#define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */
+#define ICCR0_RXE (1 << 4) /* Receive enable */
+#define ICCR0_TXE (1 << 3) /* Transmit enable */
+#define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
+#define ICCR0_LBM (1 << 1) /* Loopback mode */
+#define ICCR0_ITR (1 << 0) /* IrDA transmission */
+
+#define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
+#define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
+#define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
+#define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
+#define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
+#define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
+
+#define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
+#define ICSR0_FRE (1 << 5) /* Framing error */
+#define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
+#define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
+#define ICSR0_RAB (1 << 2) /* Receiver abort */
+#define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
+#define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
+
+#define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
+#define ICSR1_CRE (1 << 5) /* CRC error */
+#define ICSR1_EOF (1 << 4) /* End of frame */
+#define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
+#define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
+#define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
+#define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
+
+#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
+#define IrSR_RXPL_POS_IS_ZERO 0x0
+#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
+#define IrSR_TXPL_POS_IS_ZERO 0x0
+#define IrSR_XMODE_PULSE_1_6 (1<<2)
+#define IrSR_XMODE_PULSE_3_16 0x0
+#define IrSR_RCVEIR_IR_MODE (1<<1)
+#define IrSR_RCVEIR_UART_MODE 0x0
+#define IrSR_XMITIR_IR_MODE (1<<0)
+#define IrSR_XMITIR_UART_MODE 0x0
+
+#define IrSR_IR_RECEIVE_ON (\
+ IrSR_RXPL_NEG_IS_ZERO | \
+ IrSR_TXPL_POS_IS_ZERO | \
+ IrSR_XMODE_PULSE_3_16 | \
+ IrSR_RCVEIR_IR_MODE | \
+ IrSR_XMITIR_UART_MODE)
+
+#define IrSR_IR_TRANSMIT_ON (\
+ IrSR_RXPL_NEG_IS_ZERO | \
+ IrSR_TXPL_POS_IS_ZERO | \
+ IrSR_XMODE_PULSE_3_16 | \
+ IrSR_RCVEIR_UART_MODE | \
+ IrSR_XMITIR_IR_MODE)
+
+/* macros for registers read/write */
+#define ficp_writel(irda, val, off) \
+ do { \
+ dev_vdbg(irda->dev, \
+ "%s():%d ficp_writel(0x%x, %s)\n", \
+ __func__, __LINE__, (val), #off); \
+ writel_relaxed((val), (irda)->irda_base + (off)); \
+ } while (0)
+
+#define ficp_readl(irda, off) \
+ ({ \
+ unsigned int _v; \
+ _v = readl_relaxed((irda)->irda_base + (off)); \
+ dev_vdbg(irda->dev, \
+ "%s():%d ficp_readl(%s): 0x%x\n", \
+ __func__, __LINE__, #off, _v); \
+ _v; \
+ })
+
+#define stuart_writel(irda, val, off) \
+ do { \
+ dev_vdbg(irda->dev, \
+ "%s():%d stuart_writel(0x%x, %s)\n", \
+ __func__, __LINE__, (val), #off); \
+ writel_relaxed((val), (irda)->stuart_base + (off)); \
+ } while (0)
+
+#define stuart_readl(irda, off) \
+ ({ \
+ unsigned int _v; \
+ _v = readl_relaxed((irda)->stuart_base + (off)); \
+ dev_vdbg(irda->dev, \
+ "%s():%d stuart_readl(%s): 0x%x\n", \
+ __func__, __LINE__, #off, _v); \
+ _v; \
+ })
+
+struct pxa_irda {
+ int speed;
+ int newspeed;
+ unsigned long long last_clk;
+
+ void __iomem *stuart_base;
+ void __iomem *irda_base;
+ unsigned char *dma_rx_buff;
+ unsigned char *dma_tx_buff;
+ dma_addr_t dma_rx_buff_phy;
+ dma_addr_t dma_tx_buff_phy;
+ unsigned int dma_tx_buff_len;
+ struct dma_chan *txdma;
+ struct dma_chan *rxdma;
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+ int drcmr_rx;
+ int drcmr_tx;
+
+ int uart_irq;
+ int icp_irq;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ struct device *dev;
+ struct pxaficp_platform_data *pdata;
+ struct clk *fir_clk;
+ struct clk *sir_clk;
+ struct clk *cur_clk;
+};
+
+static int pxa_irda_set_speed(struct pxa_irda *si, int speed);
+
+static inline void pxa_irda_disable_clk(struct pxa_irda *si)
+{
+ if (si->cur_clk)
+ clk_disable_unprepare(si->cur_clk);
+ si->cur_clk = NULL;
+}
+
+static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
+{
+ si->cur_clk = si->fir_clk;
+ clk_prepare_enable(si->fir_clk);
+}
+
+static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
+{
+ si->cur_clk = si->sir_clk;
+ clk_prepare_enable(si->sir_clk);
+}
+
+
+#define IS_FIR(si) ((si)->speed >= 4000000)
+#define IRDA_FRAME_SIZE_LIMIT 2047
+
+static void pxa_irda_fir_dma_rx_irq(void *data);
+static void pxa_irda_fir_dma_tx_irq(void *data);
+
+inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_single(si->rxdma, si->dma_rx_buff_phy,
+ IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(si->dev, "prep_slave_sg() failed\n");
+ return;
+ }
+ tx->callback = pxa_irda_fir_dma_rx_irq;
+ tx->callback_param = si;
+ si->rx_cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(si->rxdma);
+}
+
+inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_single(si->txdma, si->dma_tx_buff_phy,
+ si->dma_tx_buff_len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(si->dev, "prep_slave_sg() failed\n");
+ return;
+ }
+ tx->callback = pxa_irda_fir_dma_tx_irq;
+ tx->callback_param = si;
+ si->tx_cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(si->rxdma);
+}
+
+/*
+ * Set the IrDA communications mode.
+ */
+static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
+{
+ if (si->pdata->transceiver_mode)
+ si->pdata->transceiver_mode(si->dev, mode);
+ else {
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_set_value(si->pdata->gpio_pwdown,
+ !(mode & IR_OFF) ^
+ !si->pdata->gpio_pwdown_inverted);
+ pxa2xx_transceiver_mode(si->dev, mode);
+ }
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
+{
+ unsigned long flags;
+ unsigned int divisor;
+
+ switch (speed) {
+ case 9600: case 19200: case 38400:
+ case 57600: case 115200:
+
+ /* refer to PXA250/210 Developer's Manual 10-7 */
+ /* BaudRate = 14.7456 MHz / (16*Divisor) */
+ divisor = 14745600 / (16 * speed);
+
+ local_irq_save(flags);
+
+ if (IS_FIR(si)) {
+ /* stop RX DMA */
+ dmaengine_terminate_all(si->rxdma);
+ /* disable FICP */
+ ficp_writel(si, 0, ICCR0);
+ pxa_irda_disable_clk(si);
+
+ /* set board transceiver to SIR mode */
+ pxa_irda_set_mode(si, IR_SIRMODE);
+
+ /* enable the STUART clock */
+ pxa_irda_enable_sirclk(si);
+ }
+
+ /* disable STUART first */
+ stuart_writel(si, 0, STIER);
+
+ /* access DLL & DLH */
+ stuart_writel(si, stuart_readl(si, STLCR) | LCR_DLAB, STLCR);
+ stuart_writel(si, divisor & 0xff, STDLL);
+ stuart_writel(si, divisor >> 8, STDLH);
+ stuart_writel(si, stuart_readl(si, STLCR) & ~LCR_DLAB, STLCR);
+
+ si->speed = speed;
+ stuart_writel(si, IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6,
+ STISR);
+ stuart_writel(si, IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE,
+ STIER);
+
+ local_irq_restore(flags);
+ break;
+
+ case 4000000:
+ local_irq_save(flags);
+
+ /* disable STUART */
+ stuart_writel(si, 0, STIER);
+ stuart_writel(si, 0, STISR);
+ pxa_irda_disable_clk(si);
+
+ /* disable FICP first */
+ ficp_writel(si, 0, ICCR0);
+
+ /* set board transceiver to FIR mode */
+ pxa_irda_set_mode(si, IR_FIRMODE);
+
+ /* enable the FICP clock */
+ pxa_irda_enable_firclk(si);
+
+ si->speed = speed;
+ pxa_irda_fir_dma_rx_start(si);
+ ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
+
+ local_irq_restore(flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* SIR interrupt service routine. */
+static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pxa_irda *si = netdev_priv(dev);
+ int iir, lsr, data;
+
+ iir = stuart_readl(si, STIIR);
+
+ switch (iir & 0x0F) {
+ case 0x06: /* Receiver Line Status */
+ lsr = stuart_readl(si, STLSR);
+ while (lsr & LSR_FIFOE) {
+ data = stuart_readl(si, STRBR);
+ if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
+ printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
+ dev->stats.rx_errors++;
+ if (lsr & LSR_FE)
+ dev->stats.rx_frame_errors++;
+ if (lsr & LSR_OE)
+ dev->stats.rx_fifo_errors++;
+ } else {
+ dev->stats.rx_bytes++;
+ async_unwrap_char(dev, &dev->stats,
+ &si->rx_buff, data);
+ }
+ lsr = stuart_readl(si, STLSR);
+ }
+ si->last_clk = sched_clock();
+ break;
+
+ case 0x04: /* Received Data Available */
+ /* forth through */
+
+ case 0x0C: /* Character Timeout Indication */
+ do {
+ dev->stats.rx_bytes++;
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff,
+ stuart_readl(si, STRBR));
+ } while (stuart_readl(si, STLSR) & LSR_DR);
+ si->last_clk = sched_clock();
+ break;
+
+ case 0x02: /* Transmit FIFO Data Request */
+ while ((si->tx_buff.len) &&
+ (stuart_readl(si, STLSR) & LSR_TDRQ)) {
+ stuart_writel(si, *si->tx_buff.data++, STTHR);
+ si->tx_buff.len -= 1;
+ }
+
+ if (si->tx_buff.len == 0) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
+
+ /* We need to ensure that the transmitter has finished. */
+ while ((stuart_readl(si, STLSR) & LSR_TEMT) == 0)
+ cpu_relax();
+ si->last_clk = sched_clock();
+
+ /*
+ * Ok, we've finished transmitting. Now enable
+ * the receiver. Sometimes we get a receive IRQ
+ * immediately after a transmit...
+ */
+ if (si->newspeed) {
+ pxa_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ } else {
+ /* enable IR Receiver, disable IR Transmitter */
+ stuart_writel(si, IrSR_IR_RECEIVE_ON |
+ IrSR_XMODE_PULSE_1_6, STISR);
+ /* enable STUART and receive interrupts */
+ stuart_writel(si, IER_UUE | IER_RLSE |
+ IER_RAVIE | IER_RTIOE, STIER);
+ }
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ }
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* FIR Receive DMA interrupt handler */
+static void pxa_irda_fir_dma_rx_irq(void *data)
+{
+ struct net_device *dev = data;
+ struct pxa_irda *si = netdev_priv(dev);
+
+ dmaengine_terminate_all(si->rxdma);
+ netdev_dbg(dev, "pxa_ir: fir rx dma bus error\n");
+}
+
+/* FIR Transmit DMA interrupt handler */
+static void pxa_irda_fir_dma_tx_irq(void *data)
+{
+ struct net_device *dev = data;
+ struct pxa_irda *si = netdev_priv(dev);
+
+ dmaengine_terminate_all(si->txdma);
+ if (dmaengine_tx_status(si->txdma, si->tx_cookie, NULL) == DMA_ERROR) {
+ dev->stats.tx_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += si->dma_tx_buff_len;
+ }
+
+ while (ficp_readl(si, ICSR1) & ICSR1_TBY)
+ cpu_relax();
+ si->last_clk = sched_clock();
+
+ /*
+ * HACK: It looks like the TBY bit is dropped too soon.
+ * Without this delay things break.
+ */
+ udelay(120);
+
+ if (si->newspeed) {
+ pxa_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ } else {
+ int i = 64;
+
+ ficp_writel(si, 0, ICCR0);
+ pxa_irda_fir_dma_rx_start(si);
+ while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
+ ficp_readl(si, ICDR);
+ ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
+
+ if (i < 0)
+ printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
+ }
+ netif_wake_queue(dev);
+}
+
+/* EIF(Error in FIFO/End in Frame) handler for FIR */
+static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
+{
+ unsigned int len, stat, data;
+ struct dma_tx_state state;
+
+ /* Get the current data position. */
+
+ dmaengine_tx_status(si->rxdma, si->rx_cookie, &state);
+ len = IRDA_FRAME_SIZE_LIMIT - state.residue;
+
+ do {
+ /* Read Status, and then Data. */
+ stat = ficp_readl(si, ICSR1);
+ rmb();
+ data = ficp_readl(si, ICDR);
+
+ if (stat & (ICSR1_CRE | ICSR1_ROR)) {
+ dev->stats.rx_errors++;
+ if (stat & ICSR1_CRE) {
+ printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
+ dev->stats.rx_crc_errors++;
+ }
+ if (stat & ICSR1_ROR) {
+ printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
+ dev->stats.rx_over_errors++;
+ }
+ } else {
+ si->dma_rx_buff[len++] = data;
+ }
+ /* If we hit the end of frame, there's no point in continuing. */
+ if (stat & ICSR1_EOF)
+ break;
+ } while (ficp_readl(si, ICSR0) & ICSR0_EIF);
+
+ if (stat & ICSR1_EOF) {
+ /* end of frame. */
+ struct sk_buff *skb;
+
+ if (icsr0 & ICSR0_FRE) {
+ printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ skb = alloc_skb(len+1,GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ /* Align IP header to 20 bytes */
+ skb_reserve(skb, 1);
+ skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
+ skb_put(skb, len);
+
+ /* Feed it to IrLAP */
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+}
+
+/* FIR interrupt handler */
+static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pxa_irda *si = netdev_priv(dev);
+ int icsr0, i = 64;
+
+ /* stop RX DMA */
+ dmaengine_terminate_all(si->rxdma);
+ si->last_clk = sched_clock();
+ icsr0 = ficp_readl(si, ICSR0);
+
+ if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
+ if (icsr0 & ICSR0_FRE) {
+ printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
+ dev->stats.rx_frame_errors++;
+ } else {
+ printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
+ dev->stats.rx_errors++;
+ }
+ ficp_writel(si, icsr0 & (ICSR0_FRE | ICSR0_RAB), ICSR0);
+ }
+
+ if (icsr0 & ICSR0_EIF) {
+ /* An error in FIFO occurred, or there is a end of frame */
+ pxa_irda_fir_irq_eif(si, dev, icsr0);
+ }
+
+ ficp_writel(si, 0, ICCR0);
+ pxa_irda_fir_dma_rx_start(si);
+ while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
+ ficp_readl(si, ICDR);
+ ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
+
+ if (i < 0)
+ printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
+
+ return IRQ_HANDLED;
+}
+
+/* hard_xmit interface of irda device */
+static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa_irda *si = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ /*
+ * Does this packet contain a request to change the interface
+ * speed? If so, remember it until we complete the transmission
+ * of this frame.
+ */
+ if (speed != si->speed && speed != -1)
+ si->newspeed = speed;
+
+ /*
+ * If this is an empty frame, we can bypass a lot.
+ */
+ if (skb->len == 0) {
+ if (si->newspeed) {
+ si->newspeed = 0;
+ pxa_irda_set_speed(si, speed);
+ }
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+ if (!IS_FIR(si)) {
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
+
+ /* Disable STUART interrupts and switch to transmit mode. */
+ stuart_writel(si, 0, STIER);
+ stuart_writel(si, IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6,
+ STISR);
+
+ /* enable STUART and transmit interrupts */
+ stuart_writel(si, IER_UUE | IER_TIE, STIER);
+ } else {
+ unsigned long mtt = irda_get_mtt(skb);
+
+ si->dma_tx_buff_len = skb->len;
+ skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
+
+ if (mtt)
+ while ((sched_clock() - si->last_clk) * 1000 < mtt)
+ cpu_relax();
+
+ /* stop RX DMA, disable FICP */
+ dmaengine_terminate_all(si->rxdma);
+ ficp_writel(si, 0, ICCR0);
+
+ pxa_irda_fir_dma_tx_start(si);
+ ficp_writel(si, ICCR0_ITR | ICCR0_TXE, ICCR0);
+ }
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct pxa_irda *si = netdev_priv(dev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (netif_running(dev)) {
+ ret = pxa_irda_set_speed(si,
+ rq->ifr_baudrate);
+ } else {
+ printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ ret = 0;
+ rq->ifr_receiving = IS_FIR(si) ? 0
+ : si->rx_buff.state != OUTSIDE_FRAME;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void pxa_irda_startup(struct pxa_irda *si)
+{
+ /* Disable STUART interrupts */
+ stuart_writel(si, 0, STIER);
+ /* enable STUART interrupt to the processor */
+ stuart_writel(si, MCR_OUT2, STMCR);
+ /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
+ stuart_writel(si, LCR_WLS0 | LCR_WLS1, STLCR);
+ /* enable FIFO, we use FIFO to improve performance */
+ stuart_writel(si, FCR_TRFIFOE | FCR_ITL_32, STFCR);
+
+ /* disable FICP */
+ ficp_writel(si, 0, ICCR0);
+ /* configure FICP ICCR2 */
+ ficp_writel(si, ICCR2_TXP | ICCR2_TRIG_32, ICCR2);
+
+ /* force SIR reinitialization */
+ si->speed = 4000000;
+ pxa_irda_set_speed(si, 9600);
+
+ printk(KERN_DEBUG "pxa_ir: irda startup\n");
+}
+
+static void pxa_irda_shutdown(struct pxa_irda *si)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* disable STUART and interrupt */
+ stuart_writel(si, 0, STIER);
+ /* disable STUART SIR mode */
+ stuart_writel(si, 0, STISR);
+
+ /* disable DMA */
+ dmaengine_terminate_all(si->rxdma);
+ dmaengine_terminate_all(si->txdma);
+ /* disable FICP */
+ ficp_writel(si, 0, ICCR0);
+
+ /* disable the STUART or FICP clocks */
+ pxa_irda_disable_clk(si);
+
+ local_irq_restore(flags);
+
+ /* power off board transceiver */
+ pxa_irda_set_mode(si, IR_OFF);
+
+ printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
+}
+
+static int pxa_irda_start(struct net_device *dev)
+{
+ struct pxa_irda *si = netdev_priv(dev);
+ dma_cap_mask_t mask;
+ struct dma_slave_config config;
+ struct pxad_param param;
+ int err;
+
+ si->speed = 9600;
+
+ err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq1;
+
+ err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq2;
+
+ /*
+ * The interrupt must remain disabled for now.
+ */
+ disable_irq(si->uart_irq);
+ disable_irq(si->icp_irq);
+
+ err = -EBUSY;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.src_addr = (dma_addr_t)si->irda_base + ICDR;
+ config.dst_addr = (dma_addr_t)si->irda_base + ICDR;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+
+ param.drcmr = si->drcmr_rx;
+ si->rxdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev, "rx");
+ if (!si->rxdma)
+ goto err_rx_dma;
+
+ param.drcmr = si->drcmr_tx;
+ si->txdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev, "tx");
+ if (!si->txdma)
+ goto err_tx_dma;
+
+ err = dmaengine_slave_config(si->rxdma, &config);
+ if (err)
+ goto err_dma_rx_buff;
+ err = dmaengine_slave_config(si->txdma, &config);
+ if (err)
+ goto err_dma_rx_buff;
+
+ err = -ENOMEM;
+ si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
+ &si->dma_rx_buff_phy, GFP_KERNEL);
+ if (!si->dma_rx_buff)
+ goto err_dma_rx_buff;
+
+ si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
+ &si->dma_tx_buff_phy, GFP_KERNEL);
+ if (!si->dma_tx_buff)
+ goto err_dma_tx_buff;
+
+ /* Setup the serial port for the initial speed. */
+ pxa_irda_startup(si);
+
+ /*
+ * Open a new IrLAP layer instance.
+ */
+ si->irlap = irlap_open(dev, &si->qos, "pxa");
+ err = -ENOMEM;
+ if (!si->irlap)
+ goto err_irlap;
+
+ /*
+ * Now enable the interrupt and start the queue
+ */
+ enable_irq(si->uart_irq);
+ enable_irq(si->icp_irq);
+ netif_start_queue(dev);
+
+ printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
+
+ return 0;
+
+err_irlap:
+ pxa_irda_shutdown(si);
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
+err_dma_tx_buff:
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
+err_dma_rx_buff:
+ dma_release_channel(si->txdma);
+err_tx_dma:
+ dma_release_channel(si->rxdma);
+err_rx_dma:
+ free_irq(si->icp_irq, dev);
+err_irq2:
+ free_irq(si->uart_irq, dev);
+err_irq1:
+
+ return err;
+}
+
+static int pxa_irda_stop(struct net_device *dev)
+{
+ struct pxa_irda *si = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ pxa_irda_shutdown(si);
+
+ /* Stop IrLAP */
+ if (si->irlap) {
+ irlap_close(si->irlap);
+ si->irlap = NULL;
+ }
+
+ free_irq(si->uart_irq, dev);
+ free_irq(si->icp_irq, dev);
+
+ dmaengine_terminate_all(si->rxdma);
+ dmaengine_terminate_all(si->txdma);
+ dma_release_channel(si->rxdma);
+ dma_release_channel(si->txdma);
+
+ if (si->dma_rx_buff)
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
+ if (si->dma_tx_buff)
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
+
+ printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
+ return 0;
+}
+
+static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(_dev);
+ struct pxa_irda *si;
+
+ if (dev && netif_running(dev)) {
+ si = netdev_priv(dev);
+ netif_device_detach(dev);
+ pxa_irda_shutdown(si);
+ }
+
+ return 0;
+}
+
+static int pxa_irda_resume(struct platform_device *_dev)
+{
+ struct net_device *dev = platform_get_drvdata(_dev);
+ struct pxa_irda *si;
+
+ if (dev && netif_running(dev)) {
+ si = netdev_priv(dev);
+ pxa_irda_startup(si);
+ netif_device_attach(dev);
+ netif_wake_queue(dev);
+ }
+
+ return 0;
+}
+
+
+static int pxa_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+static const struct net_device_ops pxa_irda_netdev_ops = {
+ .ndo_open = pxa_irda_start,
+ .ndo_stop = pxa_irda_stop,
+ .ndo_start_xmit = pxa_irda_hard_xmit,
+ .ndo_do_ioctl = pxa_irda_ioctl,
+};
+
+static int pxa_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct resource *res;
+ struct pxa_irda *si;
+ void __iomem *ficp, *stuart;
+ unsigned int baudrate_mask;
+ int err;
+
+ if (!pdev->dev.platform_data)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ficp = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ficp)) {
+ dev_err(&pdev->dev, "resource ficp not defined\n");
+ return PTR_ERR(ficp);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ stuart = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(stuart)) {
+ dev_err(&pdev->dev, "resource stuart not defined\n");
+ return PTR_ERR(stuart);
+ }
+
+ dev = alloc_irdadev(sizeof(struct pxa_irda));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_mem_1;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ si = netdev_priv(dev);
+ si->dev = &pdev->dev;
+ si->pdata = pdev->dev.platform_data;
+
+ si->irda_base = ficp;
+ si->stuart_base = stuart;
+ si->uart_irq = platform_get_irq(pdev, 0);
+ si->icp_irq = platform_get_irq(pdev, 1);
+
+ si->sir_clk = devm_clk_get(&pdev->dev, "UARTCLK");
+ si->fir_clk = devm_clk_get(&pdev->dev, "FICPCLK");
+ if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
+ err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
+ goto err_mem_4;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res)
+ si->drcmr_rx = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res)
+ si->drcmr_tx = res->start;
+
+ /*
+ * Initialise the SIR buffers
+ */
+ err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
+ if (err)
+ goto err_mem_4;
+ err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
+ if (err)
+ goto err_mem_5;
+
+ if (gpio_is_valid(si->pdata->gpio_pwdown)) {
+ err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
+ if (err)
+ goto err_startup;
+ err = gpio_direction_output(si->pdata->gpio_pwdown,
+ !si->pdata->gpio_pwdown_inverted);
+ if (err) {
+ gpio_free(si->pdata->gpio_pwdown);
+ goto err_startup;
+ }
+ }
+
+ if (si->pdata->startup) {
+ err = si->pdata->startup(si->dev);
+ if (err)
+ goto err_startup;
+ }
+
+ if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
+ dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
+
+ dev->netdev_ops = &pxa_irda_netdev_ops;
+
+ irda_init_max_qos_capabilies(&si->qos);
+
+ baudrate_mask = 0;
+ if (si->pdata->transceiver_cap & IR_SIRMODE)
+ baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ if (si->pdata->transceiver_cap & IR_FIRMODE)
+ baudrate_mask |= IR_4000000 << 8;
+
+ si->qos.baud_rate.bits &= baudrate_mask;
+ si->qos.min_turn_time.bits = 7; /* 1ms or more */
+
+ irda_qos_bits_to_value(&si->qos);
+
+ err = register_netdev(dev);
+
+ if (err == 0)
+ platform_set_drvdata(pdev, dev);
+
+ if (err) {
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+err_startup:
+ kfree(si->tx_buff.head);
+err_mem_5:
+ kfree(si->rx_buff.head);
+err_mem_4:
+ free_netdev(dev);
+ }
+err_mem_1:
+ return err;
+}
+
+static int pxa_irda_remove(struct platform_device *_dev)
+{
+ struct net_device *dev = platform_get_drvdata(_dev);
+
+ if (dev) {
+ struct pxa_irda *si = netdev_priv(dev);
+ unregister_netdev(dev);
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_free(si->pdata->gpio_pwdown);
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver pxa_ir_driver = {
+ .driver = {
+ .name = "pxa2xx-ir",
+ },
+ .probe = pxa_irda_probe,
+ .remove = pxa_irda_remove,
+ .suspend = pxa_irda_suspend,
+ .resume = pxa_irda_resume,
+};
+
+module_platform_driver(pxa_ir_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/staging/irda/drivers/sa1100_ir.c b/drivers/staging/irda/drivers/sa1100_ir.c
new file mode 100644
index 000000000000..b6e44ff4e373
--- /dev/null
+++ b/drivers/staging/irda/drivers/sa1100_ir.c
@@ -0,0 +1,1150 @@
+/*
+ * linux/drivers/net/irda/sa1100_ir.c
+ *
+ * Copyright (C) 2000-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Infra-red driver for the StrongARM SA1100 embedded microprocessor
+ *
+ * Note that we don't have to worry about the SA1111's DMA bugs in here,
+ * so we use the straight forward dma_map_* functions with a null pointer.
+ *
+ * This driver takes one kernel command line parameter, sa1100ir=, with
+ * the following options:
+ * max_rate:baudrate - set the maximum baud rate
+ * power_level:level - set the transmitter power level
+ * tx_lpm:0|1 - set transmit low power mode
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sa11x0-dma.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <mach/hardware.h>
+#include <linux/platform_data/irda-sa11x0.h>
+
+static int power_level = 3;
+static int tx_lpm;
+static int max_rate = 4000000;
+
+struct sa1100_buf {
+ struct device *dev;
+ struct sk_buff *skb;
+ struct scatterlist sg;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+};
+
+struct sa1100_irda {
+ unsigned char utcr4;
+ unsigned char power;
+ unsigned char open;
+
+ int speed;
+ int newspeed;
+
+ struct sa1100_buf dma_rx;
+ struct sa1100_buf dma_tx;
+
+ struct device *dev;
+ struct irda_platform_data *pdata;
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *);
+ irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *);
+};
+
+static int sa1100_irda_set_speed(struct sa1100_irda *, int);
+
+#define IS_FIR(si) ((si)->speed >= 4000000)
+
+#define HPSIR_MAX_RXLEN 2047
+
+static struct dma_slave_config sa1100_irda_sir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2UTDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 4,
+};
+
+static struct dma_slave_config sa1100_irda_fir_rx = {
+ .direction = DMA_FROM_DEVICE,
+ .src_addr = __PREG(Ser2HSDR),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = 8,
+};
+
+static struct dma_slave_config sa1100_irda_fir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2HSDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 8,
+};
+
+static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
+{
+ struct dma_chan *chan = buf->chan;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, buf->cookie, &state);
+ if (status != DMA_PAUSED)
+ return 0;
+
+ return sg_dma_len(&buf->sg) - state.residue;
+}
+
+static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
+ const char *name, struct dma_slave_config *cfg)
+{
+ dma_cap_mask_t m;
+ int ret;
+
+ dma_cap_zero(m);
+ dma_cap_set(DMA_SLAVE, m);
+
+ buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
+ if (!buf->chan) {
+ dev_err(dev, "unable to request DMA channel for %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ ret = dmaengine_slave_config(buf->chan, cfg);
+ if (ret)
+ dev_warn(dev, "DMA slave_config for %s returned %d\n",
+ name, ret);
+
+ buf->dev = buf->chan->device->dev;
+
+ return 0;
+}
+
+static void sa1100_irda_dma_start(struct sa1100_buf *buf,
+ enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = buf->chan;
+
+ desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (desc) {
+ desc->callback = cb;
+ desc->callback_param = cb_p;
+ buf->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ }
+}
+
+/*
+ * Allocate and map the receive buffer, unless it is already allocated.
+ */
+static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
+{
+ if (si->dma_rx.skb)
+ return 0;
+
+ si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
+ if (!si->dma_rx.skb) {
+ printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Align any IP headers that may be contained
+ * within the frame.
+ */
+ skb_reserve(si->dma_rx.skb, 1);
+
+ sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
+ if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
+ dev_kfree_skb_any(si->dma_rx.skb);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * We want to get here as soon as possible, and get the receiver setup.
+ * We use the existing buffer.
+ */
+static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
+{
+ if (!si->dma_rx.skb) {
+ printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
+ return;
+ }
+
+ /*
+ * First empty receive FIFO
+ */
+ Ser2HSCR0 = HSCR0_HSSP;
+
+ /*
+ * Enable the DMA, receiver and receive interrupt.
+ */
+ dmaengine_terminate_all(si->dma_rx.chan);
+ sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL);
+
+ Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
+}
+
+static void sa1100_irda_check_speed(struct sa1100_irda *si)
+{
+ if (si->newspeed) {
+ sa1100_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ }
+}
+
+/*
+ * HP-SIR format support.
+ */
+static void sa1100_irda_sirtxdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
+
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
+ dev_kfree_skb(si->dma_tx.skb);
+ si->dma_tx.skb = NULL;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
+
+ /* We need to ensure that the transmitter has finished. */
+ do
+ rmb();
+ while (Ser2UTSR1 & UTSR1_TBY);
+
+ /*
+ * Ok, we've finished transmitting. Now enable the receiver.
+ * Sometimes we get a receive IRQ immediately after a transmit...
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ sa1100_irda_check_speed(si);
+
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+}
+
+static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
+ struct sa1100_irda *si)
+{
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
+ si->tx_buff.truesize);
+
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
+
+ /*
+ * The mean turn-around time is enforced by XBOF padding,
+ * so we don't have to do anything special here.
+ */
+ Ser2UTCR3 = UTCR3_TXE;
+
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si)
+{
+ int status;
+
+ status = Ser2UTSR0;
+
+ /*
+ * Deal with any receive errors first. The bytes in error may be
+ * the only bytes in the receive FIFO, so we do this first.
+ */
+ while (status & UTSR0_EIF) {
+ int stat, data;
+
+ stat = Ser2UTSR1;
+ data = Ser2UTDR;
+
+ if (stat & (UTSR1_FRE | UTSR1_ROR)) {
+ dev->stats.rx_errors++;
+ if (stat & UTSR1_FRE)
+ dev->stats.rx_frame_errors++;
+ if (stat & UTSR1_ROR)
+ dev->stats.rx_fifo_errors++;
+ } else
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
+
+ status = Ser2UTSR0;
+ }
+
+ /*
+ * We must clear certain bits.
+ */
+ Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
+
+ if (status & UTSR0_RFS) {
+ /*
+ * There are at least 4 bytes in the FIFO. Read 3 bytes
+ * and leave the rest to the block below.
+ */
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+ }
+
+ if (status & (UTSR0_RFS | UTSR0_RID)) {
+ /*
+ * Fifo contains more than 1 character.
+ */
+ do {
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff,
+ Ser2UTDR);
+ } while (Ser2UTSR1 & UTSR1_RNE);
+
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * FIR format support.
+ */
+static void sa1100_irda_firtxdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ /*
+ * Wait for the transmission to complete. Unfortunately,
+ * the hardware doesn't give us an interrupt to indicate
+ * "end of frame".
+ */
+ do
+ rmb();
+ while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
+
+ /*
+ * Clear the transmit underrun bit.
+ */
+ Ser2HSSR0 = HSSR0_TUR;
+
+ /*
+ * Do we need to change speed? Note that we're lazy
+ * here - we don't free the old dma_rx.skb. We don't need
+ * to allocate a buffer either.
+ */
+ sa1100_irda_check_speed(si);
+
+ /*
+ * Start reception. This disables the transmitter for
+ * us. This will be using the existing RX buffer.
+ */
+ sa1100_irda_rx_dma_start(si);
+
+ /* Account and free the packet. */
+ skb = si->dma_tx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
+ dev->stats.tx_packets ++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ si->dma_tx.skb = NULL;
+ }
+
+ /*
+ * Make sure that the TX queue is available for sending
+ * (for retries). TX has priority over RX at all times.
+ */
+ netif_wake_queue(dev);
+}
+
+static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
+ struct sa1100_irda *si)
+{
+ int mtt = irda_get_mtt(skb);
+
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev);
+
+ /*
+ * If we have a mean turn-around time, impose the specified
+ * specified delay. We could shorten this by timing from
+ * the point we received the packet.
+ */
+ if (mtt)
+ udelay(mtt);
+
+ Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE;
+
+ return NETDEV_TX_OK;
+}
+
+static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
+{
+ struct sk_buff *skb = si->dma_rx.skb;
+ unsigned int len, stat, data;
+
+ if (!skb) {
+ printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
+ return;
+ }
+
+ /*
+ * Get the current data position.
+ */
+ len = sa1100_irda_dma_xferred(&si->dma_rx);
+ if (len > HPSIR_MAX_RXLEN)
+ len = HPSIR_MAX_RXLEN;
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
+
+ do {
+ /*
+ * Read Status, and then Data.
+ */
+ stat = Ser2HSSR1;
+ rmb();
+ data = Ser2HSDR;
+
+ if (stat & (HSSR1_CRE | HSSR1_ROR)) {
+ dev->stats.rx_errors++;
+ if (stat & HSSR1_CRE)
+ dev->stats.rx_crc_errors++;
+ if (stat & HSSR1_ROR)
+ dev->stats.rx_frame_errors++;
+ } else
+ skb->data[len++] = data;
+
+ /*
+ * If we hit the end of frame, there's
+ * no point in continuing.
+ */
+ if (stat & HSSR1_EOF)
+ break;
+ } while (Ser2HSSR0 & HSSR0_EIF);
+
+ if (stat & HSSR1_EOF) {
+ si->dma_rx.skb = NULL;
+
+ skb_put(skb, len);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ /*
+ * Before we pass the buffer up, allocate a new one.
+ */
+ sa1100_irda_rx_alloc(si);
+
+ netif_rx(skb);
+ } else {
+ /*
+ * Remap the buffer - it was previously mapped, and we
+ * hope that this succeeds.
+ */
+ dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
+ }
+}
+
+/*
+ * We only have to handle RX events here; transmit events go via the TX
+ * DMA handler. We disable RX, process, and the restart RX.
+ */
+static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si)
+{
+ /*
+ * Stop RX DMA
+ */
+ dmaengine_pause(si->dma_rx.chan);
+
+ /*
+ * Framing error - we throw away the packet completely.
+ * Clearing RXE flushes the error conditions and data
+ * from the fifo.
+ */
+ if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
+ dev->stats.rx_errors++;
+
+ if (Ser2HSSR0 & HSSR0_FRE)
+ dev->stats.rx_frame_errors++;
+
+ /*
+ * Clear out the DMA...
+ */
+ Ser2HSCR0 = HSCR0_HSSP;
+
+ /*
+ * Clear selected status bits now, so we
+ * don't miss them next time around.
+ */
+ Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB;
+ }
+
+ /*
+ * Deal with any receive errors. The any of the lowest
+ * 8 bytes in the FIFO may contain an error. We must read
+ * them one by one. The "error" could even be the end of
+ * packet!
+ */
+ if (Ser2HSSR0 & HSSR0_EIF)
+ sa1100_irda_fir_error(si, dev);
+
+ /*
+ * No matter what happens, we must restart reception.
+ */
+ sa1100_irda_rx_dma_start(si);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
+{
+ unsigned long flags;
+ int brd, ret = -EINVAL;
+
+ switch (speed) {
+ case 9600: case 19200: case 38400:
+ case 57600: case 115200:
+ brd = 3686400 / (16 * speed) - 1;
+
+ /* Stop the receive DMA, and configure transmit. */
+ if (IS_FIR(si)) {
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_sir_tx);
+ }
+
+ local_irq_save(flags);
+
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+
+ Ser2UTCR1 = brd >> 8;
+ Ser2UTCR2 = brd;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ si->speed = speed;
+ si->tx_start = sa1100_irda_sir_tx_start;
+ si->irq = sa1100_irda_sir_irq;
+
+ local_irq_restore(flags);
+ ret = 0;
+ break;
+
+ case 4000000:
+ if (!IS_FIR(si))
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_fir_tx);
+
+ local_irq_save(flags);
+
+ Ser2HSSR0 = 0xff;
+ Ser2HSCR0 = HSCR0_HSSP;
+ Ser2UTCR3 = 0;
+
+ si->speed = speed;
+ si->tx_start = sa1100_irda_fir_tx_start;
+ si->irq = sa1100_irda_fir_irq;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ sa1100_irda_rx_alloc(si);
+ sa1100_irda_rx_dma_start(si);
+
+ local_irq_restore(flags);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Control the power state of the IrDA transmitter.
+ * State:
+ * 0 - off
+ * 1 - short range, lowest power
+ * 2 - medium range, medium power
+ * 3 - maximum range, high power
+ *
+ * Currently, only assabet is known to support this.
+ */
+static int
+__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret = 0;
+ if (si->pdata->set_power)
+ ret = si->pdata->set_power(si->dev, state);
+ return ret;
+}
+
+static inline int
+sa1100_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret;
+
+ ret = __sa1100_irda_set_power(si, state);
+ if (ret == 0)
+ si->power = state;
+
+ return ret;
+}
+
+static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct sa1100_irda *si = netdev_priv(dev);
+
+ return si->irq(dev, si);
+}
+
+static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sa1100_irda *si = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ /*
+ * Does this packet contain a request to change the interface
+ * speed? If so, remember it until we complete the transmission
+ * of this frame.
+ */
+ if (speed != si->speed && speed != -1)
+ si->newspeed = speed;
+
+ /* If this is an empty frame, we can bypass a lot. */
+ if (skb->len == 0) {
+ sa1100_irda_check_speed(si);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+ /* We must not already have a skb to transmit... */
+ BUG_ON(si->dma_tx.skb);
+
+ return si->tx_start(skb, dev, si);
+}
+
+static int
+sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct sa1100_irda *si = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (si->open) {
+ ret = sa1100_irda_set_speed(si,
+ rq->ifr_baudrate);
+ } else {
+ printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = IS_FIR(si) ? 0
+ : si->rx_buff.state != OUTSIDE_FRAME;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int sa1100_irda_startup(struct sa1100_irda *si)
+{
+ int ret;
+
+ /*
+ * Ensure that the ports for this device are setup correctly.
+ */
+ if (si->pdata->startup) {
+ ret = si->pdata->startup(si->dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Configure PPC for IRDA - we want to drive TXD2 low.
+ * We also want to drive this pin low during sleep.
+ */
+ PPSR &= ~PPC_TXD2;
+ PSDR &= ~PPC_TXD2;
+ PPDR |= PPC_TXD2;
+
+ /*
+ * Enable HP-SIR modulation, and ensure that the port is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+ Ser2UTCR4 = si->utcr4;
+ Ser2UTCR0 = UTCR0_8BitData;
+ Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+
+ ret = sa1100_irda_set_speed(si, si->speed = 9600);
+ if (ret) {
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+ }
+
+ return ret;
+}
+
+static void sa1100_irda_shutdown(struct sa1100_irda *si)
+{
+ /*
+ * Stop all DMA activity.
+ */
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_terminate_all(si->dma_tx.chan);
+
+ /* Disable the port. */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+}
+
+static int sa1100_irda_start(struct net_device *dev)
+{
+ struct sa1100_irda *si = netdev_priv(dev);
+ int err;
+
+ si->speed = 9600;
+
+ err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc",
+ &sa1100_irda_fir_rx);
+ if (err)
+ goto err_rx_dma;
+
+ err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr",
+ &sa1100_irda_sir_tx);
+ if (err)
+ goto err_tx_dma;
+
+ /*
+ * Setup the serial port for the specified speed.
+ */
+ err = sa1100_irda_startup(si);
+ if (err)
+ goto err_startup;
+
+ /*
+ * Open a new IrLAP layer instance.
+ */
+ si->irlap = irlap_open(dev, &si->qos, "sa1100");
+ err = -ENOMEM;
+ if (!si->irlap)
+ goto err_irlap;
+
+ err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq;
+
+ /*
+ * Now enable the interrupt and start the queue
+ */
+ si->open = 1;
+ sa1100_set_power(si, power_level); /* low power mode */
+
+ netif_start_queue(dev);
+ return 0;
+
+err_irq:
+ irlap_close(si->irlap);
+err_irlap:
+ si->open = 0;
+ sa1100_irda_shutdown(si);
+err_startup:
+ dma_release_channel(si->dma_tx.chan);
+err_tx_dma:
+ dma_release_channel(si->dma_rx.chan);
+err_rx_dma:
+ return err;
+}
+
+static int sa1100_irda_stop(struct net_device *dev)
+{
+ struct sa1100_irda *si = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ netif_stop_queue(dev);
+
+ si->open = 0;
+ sa1100_irda_shutdown(si);
+
+ /*
+ * If we have been doing any DMA activity, make sure we
+ * tidy that up cleanly.
+ */
+ skb = si->dma_rx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ si->dma_rx.skb = NULL;
+ }
+
+ skb = si->dma_tx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ si->dma_tx.skb = NULL;
+ }
+
+ /* Stop IrLAP */
+ if (si->irlap) {
+ irlap_close(si->irlap);
+ si->irlap = NULL;
+ }
+
+ /*
+ * Free resources
+ */
+ dma_release_channel(si->dma_tx.chan);
+ dma_release_channel(si->dma_rx.chan);
+ free_irq(dev->irq, dev);
+
+ sa1100_set_power(si, 0);
+
+ return 0;
+}
+
+static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+static const struct net_device_ops sa1100_irda_netdev_ops = {
+ .ndo_open = sa1100_irda_start,
+ .ndo_stop = sa1100_irda_stop,
+ .ndo_start_xmit = sa1100_irda_hard_xmit,
+ .ndo_do_ioctl = sa1100_irda_ioctl,
+};
+
+static int sa1100_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sa1100_irda *si;
+ unsigned int baudrate_mask;
+ int err, irq;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq < 0 ? irq : -ENXIO;
+
+ err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_1;
+ err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_2;
+ err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_3;
+
+ dev = alloc_irdadev(sizeof(struct sa1100_irda));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_mem_4;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ si = netdev_priv(dev);
+ si->dev = &pdev->dev;
+ si->pdata = pdev->dev.platform_data;
+
+ sg_init_table(&si->dma_rx.sg, 1);
+ sg_init_table(&si->dma_tx.sg, 1);
+
+ /*
+ * Initialise the HP-SIR buffers
+ */
+ err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
+ if (err)
+ goto err_mem_5;
+ err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_5;
+
+ dev->netdev_ops = &sa1100_irda_netdev_ops;
+ dev->irq = irq;
+
+ irda_init_max_qos_capabilies(&si->qos);
+
+ /*
+ * We support original IRDA up to 115k2. (we don't currently
+ * support 4Mbps). Min Turn Time set to 1ms or greater.
+ */
+ baudrate_mask = IR_9600;
+
+ switch (max_rate) {
+ case 4000000: baudrate_mask |= IR_4000000 << 8;
+ case 115200: baudrate_mask |= IR_115200;
+ case 57600: baudrate_mask |= IR_57600;
+ case 38400: baudrate_mask |= IR_38400;
+ case 19200: baudrate_mask |= IR_19200;
+ }
+
+ si->qos.baud_rate.bits &= baudrate_mask;
+ si->qos.min_turn_time.bits = 7;
+
+ irda_qos_bits_to_value(&si->qos);
+
+ si->utcr4 = UTCR4_HPSIR;
+ if (tx_lpm)
+ si->utcr4 |= UTCR4_Z1_6us;
+
+ /*
+ * Initially enable HP-SIR modulation, and ensure that the port
+ * is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2UTCR4 = si->utcr4;
+ Ser2HSCR0 = HSCR0_UART;
+
+ err = register_netdev(dev);
+ if (err == 0)
+ platform_set_drvdata(pdev, dev);
+
+ if (err) {
+ err_mem_5:
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ err_mem_4:
+ release_mem_region(__PREG(Ser2HSCR2), 0x04);
+ err_mem_3:
+ release_mem_region(__PREG(Ser2HSCR0), 0x1c);
+ err_mem_2:
+ release_mem_region(__PREG(Ser2UTCR0), 0x24);
+ }
+ err_mem_1:
+ return err;
+}
+
+static int sa1100_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ struct sa1100_irda *si = netdev_priv(dev);
+ unregister_netdev(dev);
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ }
+
+ release_mem_region(__PREG(Ser2HSCR2), 0x04);
+ release_mem_region(__PREG(Ser2HSCR0), 0x1c);
+ release_mem_region(__PREG(Ser2UTCR0), 0x24);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * Suspend the IrDA interface.
+ */
+static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sa1100_irda *si;
+
+ if (!dev)
+ return 0;
+
+ si = netdev_priv(dev);
+ if (si->open) {
+ /*
+ * Stop the transmit queue
+ */
+ netif_device_detach(dev);
+ disable_irq(dev->irq);
+ sa1100_irda_shutdown(si);
+ __sa1100_irda_set_power(si, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Resume the IrDA interface.
+ */
+static int sa1100_irda_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sa1100_irda *si;
+
+ if (!dev)
+ return 0;
+
+ si = netdev_priv(dev);
+ if (si->open) {
+ /*
+ * If we missed a speed change, initialise at the new speed
+ * directly. It is debatable whether this is actually
+ * required, but in the interests of continuing from where
+ * we left off it is desirable. The converse argument is
+ * that we should re-negotiate at 9600 baud again.
+ */
+ if (si->newspeed) {
+ si->speed = si->newspeed;
+ si->newspeed = 0;
+ }
+
+ sa1100_irda_startup(si);
+ __sa1100_irda_set_power(si, si->power);
+ enable_irq(dev->irq);
+
+ /*
+ * This automatically wakes up the queue
+ */
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define sa1100_irda_suspend NULL
+#define sa1100_irda_resume NULL
+#endif
+
+static struct platform_driver sa1100ir_driver = {
+ .probe = sa1100_irda_probe,
+ .remove = sa1100_irda_remove,
+ .suspend = sa1100_irda_suspend,
+ .resume = sa1100_irda_resume,
+ .driver = {
+ .name = "sa11x0-ir",
+ },
+};
+
+static int __init sa1100_irda_init(void)
+{
+ /*
+ * Limit power level a sensible range.
+ */
+ if (power_level < 1)
+ power_level = 1;
+ if (power_level > 3)
+ power_level = 3;
+
+ return platform_driver_register(&sa1100ir_driver);
+}
+
+static void __exit sa1100_irda_exit(void)
+{
+ platform_driver_unregister(&sa1100ir_driver);
+}
+
+module_init(sa1100_irda_init);
+module_exit(sa1100_irda_exit);
+module_param(power_level, int, 0);
+module_param(tx_lpm, int, 0);
+module_param(max_rate, int, 0);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
+MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
+MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
+MODULE_ALIAS("platform:sa11x0-ir");
diff --git a/drivers/staging/irda/drivers/sh_sir.c b/drivers/staging/irda/drivers/sh_sir.c
new file mode 100644
index 000000000000..fede6864c737
--- /dev/null
+++ b/drivers/staging/irda/drivers/sh_sir.c
@@ -0,0 +1,810 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on bfin_sir.c
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/clock.h>
+
+#define DRIVER_NAME "sh_sir"
+
+#define RX_PHASE (1 << 0)
+#define TX_PHASE (1 << 1)
+#define TX_COMP_PHASE (1 << 2) /* tx complete */
+#define NONE_PHASE (1 << 31)
+
+#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
+#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
+#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
+#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
+#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
+#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
+#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
+#define IRIF_SIR_EOF 0x002A /* EOF value */
+#define IRIF_SIR_FLG 0x002C /* Flag clear */
+#define IRIF_UART_STS2 0x002E /* UART status 2 */
+#define IRIF_UART0 0x0030 /* UART control */
+#define IRIF_UART1 0x0032 /* UART status */
+#define IRIF_UART2 0x0034 /* UART mode */
+#define IRIF_UART3 0x0036 /* UART transmit data */
+#define IRIF_UART4 0x0038 /* UART receive data */
+#define IRIF_UART5 0x003A /* UART interrupt mask */
+#define IRIF_UART6 0x003C /* UART baud rate error correction */
+#define IRIF_UART7 0x003E /* UART baud rate count set */
+#define IRIF_CRC0 0x0040 /* CRC engine control */
+#define IRIF_CRC1 0x0042 /* CRC engine input data */
+#define IRIF_CRC2 0x0044 /* CRC engine calculation */
+#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
+#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
+
+/* IRIF_SIR0 */
+#define IRTPW (1 << 1) /* transmit pulse width select */
+#define IRERRC (1 << 0) /* Clear receive pulse width error */
+
+/* IRIF_SIR3 */
+#define IRERR (1 << 0) /* received pulse width Error */
+
+/* IRIF_SIR_FRM */
+#define EOFD (1 << 9) /* EOF detection flag */
+#define FRER (1 << 8) /* Frame Error bit */
+#define FRP (1 << 0) /* Frame processing set */
+
+/* IRIF_UART_STS2 */
+#define IRSME (1 << 6) /* Receive Sum Error flag */
+#define IROVE (1 << 5) /* Receive Overrun Error flag */
+#define IRFRE (1 << 4) /* Receive Framing Error flag */
+#define IRPRE (1 << 3) /* Receive Parity Error flag */
+
+/* IRIF_UART0_*/
+#define TBEC (1 << 2) /* Transmit Data Clear */
+#define RIE (1 << 1) /* Receive Enable */
+#define TIE (1 << 0) /* Transmit Enable */
+
+/* IRIF_UART1 */
+#define URSME (1 << 6) /* Receive Sum Error Flag */
+#define UROVE (1 << 5) /* Receive Overrun Error Flag */
+#define URFRE (1 << 4) /* Receive Framing Error Flag */
+#define URPRE (1 << 3) /* Receive Parity Error Flag */
+#define RBF (1 << 2) /* Receive Buffer Full Flag */
+#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
+#define TBE (1 << 0) /* Transmit Buffer Empty flag */
+#define TBCOMP (TSBE | TBE)
+
+/* IRIF_UART5 */
+#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
+#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
+#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
+#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
+#define RX_MASK (RSEIM | RBFIM)
+
+/* IRIF_CRC0 */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF
+
+/************************************************************************
+
+
+ structure
+
+
+************************************************************************/
+struct sh_sir_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
+{
+ iowrite16(data, self->membase + offset);
+}
+
+static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
+{
+ return ioread16(self->membase + offset);
+}
+
+static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ u16 old, new;
+
+ old = sh_sir_read(self, offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ sh_sir_write(self, offset, new);
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_sir_crc_reset(struct sh_sir_self *self)
+{
+ sh_sir_write(self, IRIF_CRC0, CRC_RST);
+}
+
+static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
+{
+ sh_sir_write(self, IRIF_CRC1, (u16)data);
+}
+
+static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
+{
+ return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
+}
+
+static u16 sh_sir_crc_out(struct sh_sir_self *self)
+{
+ return sh_sir_read(self, IRIF_CRC4);
+}
+
+static int sh_sir_crc_init(struct sh_sir_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_sir_crc_reset(self);
+
+ sh_sir_crc_add(self, 0xCC);
+ sh_sir_crc_add(self, 0xF5);
+ sh_sir_crc_add(self, 0xF1);
+ sh_sir_crc_add(self, 0xA7);
+
+ val = sh_sir_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_sir_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_sir_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ baud rate functions
+
+
+************************************************************************/
+#define SCLK_BASE 1843200 /* 1.8432MHz */
+
+static u32 sh_sir_find_sclk(struct clk *irda_clk)
+{
+ struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
+ struct cpufreq_frequency_table *pos;
+ struct clk *pclk = clk_get(NULL, "peripheral_clk");
+ u32 limit, min = 0xffffffff, tmp;
+ int index = 0;
+
+ limit = clk_get_rate(pclk);
+ clk_put(pclk);
+
+ /* IrDA can not set over peripheral_clk */
+ cpufreq_for_each_valid_entry(pos, freq_table) {
+ u32 freq = pos->frequency;
+
+ /* IrDA should not over peripheral_clk */
+ if (freq > limit)
+ continue;
+
+ tmp = freq % SCLK_BASE;
+ if (tmp < min) {
+ min = tmp;
+ index = pos - freq_table;
+ }
+ }
+
+ return freq_table[index].frequency;
+}
+
+#define ERR_ROUNDING(a) ((a + 5000) / 10000)
+static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
+{
+ struct clk *clk;
+ struct device *dev = &self->ndev->dev;
+ u32 rate;
+ u16 uabca, uabc;
+ u16 irbca, irbc;
+ u32 min, rerr, tmp;
+ int i;
+
+ /* Baud Rate Error Correction x 10000 */
+ u32 rate_err_array[] = {
+ 0, 625, 1250, 1875,
+ 2500, 3125, 3750, 4375,
+ 5000, 5625, 6250, 6875,
+ 7500, 8125, 8750, 9375,
+ };
+
+ /*
+ * FIXME
+ *
+ * it support 9600 only now
+ */
+ switch (baudrate) {
+ case 9600:
+ break;
+ default:
+ dev_err(dev, "un-supported baudrate %d\n", baudrate);
+ return -EIO;
+ }
+
+ clk = clk_get(NULL, "irda_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "can not get irda_clk\n");
+ return -EIO;
+ }
+
+ clk_set_rate(clk, sh_sir_find_sclk(clk));
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ dev_dbg(dev, "selected sclk = %d\n", rate);
+
+ /*
+ * CALCULATION
+ *
+ * 1843200 = system rate / (irbca + (irbc + 1))
+ */
+
+ irbc = rate / SCLK_BASE;
+
+ tmp = rate - (SCLK_BASE * irbc);
+ tmp *= 10000;
+
+ rerr = tmp / SCLK_BASE;
+
+ min = 0xffffffff;
+ irbca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ irbca = i;
+ }
+ }
+
+ tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
+ if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
+ dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
+ SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
+
+ irbca = (irbca & 0xF) << 4;
+ irbc = (irbc - 1) & 0xF;
+
+ if (!irbc) {
+ dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
+ return -EIO;
+ }
+
+ sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
+ sh_sir_write(self, IRIF_SIR1, irbca);
+ sh_sir_write(self, IRIF_SIR2, irbc);
+
+ /*
+ * CALCULATION
+ *
+ * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
+ */
+
+ uabc = rate / baudrate;
+ uabc = (uabc / 16) - 1;
+ uabc = (uabc + 1) * 16;
+
+ tmp = rate - (uabc * baudrate);
+ tmp *= 10000;
+
+ rerr = tmp / baudrate;
+
+ min = 0xffffffff;
+ uabca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ uabca = i;
+ }
+ }
+
+ tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
+ if ((baudrate / 100) < abs(tmp - baudrate))
+ dev_warn(dev, "UART freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
+ baudrate, tmp,
+ uabc, rate_err_array[uabca]);
+
+ uabca = (uabca & 0xF) << 4;
+ uabc = (uabc / 16) - 1;
+
+ sh_sir_write(self, IRIF_UART6, uabca);
+ sh_sir_write(self, IRIF_UART7, uabc);
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static int __sh_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+
+ return 0;
+}
+
+static void sh_sir_remove_iobuf(struct sh_sir_self *self)
+{
+ kfree(self->rx_buff.head);
+ kfree(self->tx_buff.head);
+
+ self->rx_buff.head = NULL;
+ self->tx_buff.head = NULL;
+}
+
+static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
+{
+ int err = -ENOMEM;
+
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return err;
+ }
+
+ err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
+ if (err)
+ goto iobuf_err;
+
+ err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
+
+iobuf_err:
+ if (err)
+ sh_sir_remove_iobuf(self);
+
+ return err;
+}
+
+/************************************************************************
+
+
+ status function
+
+
+************************************************************************/
+static void sh_sir_clear_all_err(struct sh_sir_self *self)
+{
+ /* Clear error flag for receive pulse width */
+ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
+
+ /* Clear frame / EOF error flag */
+ sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
+
+ /* Clear all status error */
+ sh_sir_write(self, IRIF_UART_STS2, 0);
+}
+
+static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
+{
+ u16 uart5 = 0;
+ u16 uart0 = 0;
+
+ switch (phase) {
+ case TX_PHASE:
+ uart5 = TBEIM;
+ uart0 = TBEC | TIE;
+ break;
+ case TX_COMP_PHASE:
+ uart5 = TSBEIM;
+ uart0 = TIE;
+ break;
+ case RX_PHASE:
+ uart5 = RX_MASK;
+ uart0 = RIE;
+ break;
+ default:
+ break;
+ }
+
+ sh_sir_write(self, IRIF_UART5, uart5);
+ sh_sir_write(self, IRIF_UART0, uart0);
+}
+
+static int sh_sir_is_which_phase(struct sh_sir_self *self)
+{
+ u16 val = sh_sir_read(self, IRIF_UART5);
+
+ if (val & TBEIM)
+ return TX_PHASE;
+
+ if (val & TSBEIM)
+ return TX_COMP_PHASE;
+
+ if (val & RX_MASK)
+ return RX_PHASE;
+
+ return NONE_PHASE;
+}
+
+static void sh_sir_tx(struct sh_sir_self *self, int phase)
+{
+ switch (phase) {
+ case TX_PHASE:
+ if (0 >= self->tx_buff.len) {
+ sh_sir_set_phase(self, TX_COMP_PHASE);
+ } else {
+ sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
+ self->tx_buff.len--;
+ self->tx_buff.data++;
+ }
+ break;
+ case TX_COMP_PHASE:
+ sh_sir_set_phase(self, RX_PHASE);
+ netif_wake_queue(self->ndev);
+ break;
+ default:
+ dev_err(&self->ndev->dev, "should not happen\n");
+ break;
+ }
+}
+
+static int sh_sir_read_data(struct sh_sir_self *self)
+{
+ u16 val = 0;
+ int timeout = 1024;
+
+ while (timeout--) {
+ val = sh_sir_read(self, IRIF_UART1);
+
+ /* data get */
+ if (val & RBF) {
+ if (val & (URSME | UROVE | URFRE | URPRE))
+ break;
+
+ return (int)sh_sir_read(self, IRIF_UART4);
+ }
+
+ udelay(1);
+ }
+
+ dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
+ val, sh_sir_read(self, IRIF_UART_STS2));
+
+ /* read data register for clear error */
+ sh_sir_read(self, IRIF_UART4);
+
+ return -1;
+}
+
+static void sh_sir_rx(struct sh_sir_self *self)
+{
+ int timeout = 1024;
+ int data;
+
+ while (timeout--) {
+ data = sh_sir_read_data(self);
+ if (data < 0)
+ break;
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, (u8)data);
+
+ if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
+ continue;
+
+ break;
+ }
+}
+
+static irqreturn_t sh_sir_irq(int irq, void *dev_id)
+{
+ struct sh_sir_self *self = dev_id;
+ struct device *dev = &self->ndev->dev;
+ int phase = sh_sir_is_which_phase(self);
+
+ switch (phase) {
+ case TX_COMP_PHASE:
+ case TX_PHASE:
+ sh_sir_tx(self, phase);
+ break;
+ case RX_PHASE:
+ if (sh_sir_read(self, IRIF_SIR3))
+ dev_err(dev, "rcv pulse width error occurred\n");
+
+ sh_sir_rx(self);
+ sh_sir_clear_all_err(self);
+ break;
+ default:
+ dev_err(dev, "unknown interrupt\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int speed = irda_get_next_speed(skb);
+
+ if ((0 < speed) &&
+ (9600 != speed)) {
+ dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
+ return -EIO;
+ }
+
+ netif_stop_queue(ndev);
+
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = 0;
+ if (skb->len)
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ sh_sir_set_phase(self, TX_PHASE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_sir_open(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_sir_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_sir_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENODEV;
+ goto open_err;
+ }
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
+ sh_sir_read(self, IRIF_UART1); /* flag clear */
+ sh_sir_read(self, IRIF_UART4); /* flag clear */
+ sh_sir_set_phase(self, RX_PHASE);
+
+ netif_start_queue(ndev);
+
+ dev_info(&self->ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_sir_stop(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_sir_ndo = {
+ .ndo_open = sh_sir_open,
+ .ndo_stop = sh_sir_stop,
+ .ndo_start_xmit = sh_sir_hard_xmit,
+ .ndo_do_ioctl = sh_sir_ioctl,
+ .ndo_get_stats = sh_sir_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int sh_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_sir_self *self;
+ struct resource *res;
+ char clk_name[8];
+ int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ err = -ENODEV;
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_sir_ndo;
+ ndev->irq = irq;
+
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+ err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
+ if (err) {
+ dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_sir_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int sh_sir_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_sir_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver sh_sir_driver = {
+ .probe = sh_sir_probe,
+ .remove = sh_sir_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(sh_sir_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/sir-dev.h b/drivers/staging/irda/drivers/sir-dev.h
new file mode 100644
index 000000000000..f50b9c1c0639
--- /dev/null
+++ b/drivers/staging/irda/drivers/sir-dev.h
@@ -0,0 +1,191 @@
+/*********************************************************************
+ *
+ * sir.h: include file for irda-sir device abstraction layer
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef IRDA_SIR_H
+#define IRDA_SIR_H
+
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> // iobuff_t
+
+struct sir_fsm {
+ struct semaphore sem;
+ struct delayed_work work;
+ unsigned state, substate;
+ int param;
+ int result;
+};
+
+#define SIRDEV_STATE_WAIT_TX_COMPLETE 0x0100
+
+/* substates for wait_tx_complete */
+#define SIRDEV_STATE_WAIT_XMIT 0x0101
+#define SIRDEV_STATE_WAIT_UNTIL_SENT 0x0102
+#define SIRDEV_STATE_TX_DONE 0x0103
+
+#define SIRDEV_STATE_DONGLE_OPEN 0x0300
+
+/* 0x0301-0x03ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_DONGLE_CLOSE 0x0400
+
+/* 0x0401-0x04ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_SET_DTR_RTS 0x0500
+
+#define SIRDEV_STATE_SET_SPEED 0x0700
+#define SIRDEV_STATE_DONGLE_CHECK 0x0800
+#define SIRDEV_STATE_DONGLE_RESET 0x0900
+
+/* 0x0901-0x09ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_DONGLE_SPEED 0x0a00
+/* 0x0a01-0x0aff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_PORT_SPEED 0x0b00
+#define SIRDEV_STATE_DONE 0x0c00
+#define SIRDEV_STATE_ERROR 0x0d00
+#define SIRDEV_STATE_COMPLETE 0x0e00
+
+#define SIRDEV_STATE_DEAD 0xffff
+
+
+struct sir_dev;
+
+struct dongle_driver {
+
+ struct module *owner;
+
+ const char *driver_name;
+
+ IRDA_DONGLE type;
+
+ int (*open)(struct sir_dev *dev);
+ int (*close)(struct sir_dev *dev);
+ int (*reset)(struct sir_dev *dev);
+ int (*set_speed)(struct sir_dev *dev, unsigned speed);
+
+ struct list_head dongle_list;
+};
+
+struct sir_driver {
+
+ struct module *owner;
+
+ const char *driver_name;
+
+ int qos_mtt_bits;
+
+ int (*chars_in_buffer)(struct sir_dev *dev);
+ void (*wait_until_sent)(struct sir_dev *dev);
+ int (*set_speed)(struct sir_dev *dev, unsigned speed);
+ int (*set_dtr_rts)(struct sir_dev *dev, int dtr, int rts);
+
+ int (*do_write)(struct sir_dev *dev, const unsigned char *ptr, size_t len);
+
+ int (*start_dev)(struct sir_dev *dev);
+ int (*stop_dev)(struct sir_dev *dev);
+};
+
+
+/* exported */
+
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
+
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+ const char *name);
+int sirdev_put_instance(struct sir_dev *self);
+
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+
+/* low level helpers for SIR device/dongle setup */
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+
+/* not exported */
+
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
+
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+
+/* inline helpers */
+
+static inline int sirdev_schedule_speed(struct sir_dev *dev, unsigned speed)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_SPEED, speed);
+}
+
+static inline int sirdev_schedule_dongle_open(struct sir_dev *dev, int dongle_id)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_OPEN, dongle_id);
+}
+
+static inline int sirdev_schedule_dongle_close(struct sir_dev *dev)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_CLOSE, 0);
+}
+
+static inline int sirdev_schedule_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ int dtrrts;
+
+ dtrrts = ((dtr) ? 0x02 : 0x00) | ((rts) ? 0x01 : 0x00);
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_DTR_RTS, dtrrts);
+}
+
+#if 0
+static inline int sirdev_schedule_mode(struct sir_dev *dev, int mode)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_MODE, mode);
+}
+#endif
+
+
+struct sir_dev {
+ struct net_device *netdev;
+
+ struct irlap_cb *irlap;
+
+ struct qos_info qos;
+
+ char hwname[32];
+
+ struct sir_fsm fsm;
+ atomic_t enable_rx;
+ int raw_tx;
+ spinlock_t tx_lock;
+
+ u32 new_speed;
+ u32 flags;
+
+ unsigned speed;
+
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ struct sk_buff *tx_skb;
+
+ const struct dongle_driver * dongle_drv;
+ const struct sir_driver * drv;
+ void *priv;
+
+};
+
+#endif /* IRDA_SIR_H */
diff --git a/drivers/staging/irda/drivers/sir_dev.c b/drivers/staging/irda/drivers/sir_dev.c
new file mode 100644
index 000000000000..6af26a7d787c
--- /dev/null
+++ b/drivers/staging/irda/drivers/sir_dev.c
@@ -0,0 +1,987 @@
+/*********************************************************************
+ *
+ * sir_dev.c: irda sir network device
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+
+
+static struct workqueue_struct *irda_sir_wq;
+
+/* STATE MACHINE */
+
+/* substate handler of the config-fsm to handle the cases where we want
+ * to wait for transmit completion before changing the port configuration
+ */
+
+static int sirdev_tx_complete_fsm(struct sir_dev *dev)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+ unsigned next_state, delay;
+ unsigned bytes_left;
+
+ do {
+ next_state = fsm->substate; /* default: stay in current substate */
+ delay = 0;
+
+ switch(fsm->substate) {
+
+ case SIRDEV_STATE_WAIT_XMIT:
+ if (dev->drv->chars_in_buffer)
+ bytes_left = dev->drv->chars_in_buffer(dev);
+ else
+ bytes_left = 0;
+ if (!bytes_left) {
+ next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
+ break;
+ }
+
+ if (dev->speed > 115200)
+ delay = (bytes_left*8*10000) / (dev->speed/100);
+ else if (dev->speed > 0)
+ delay = (bytes_left*10*10000) / (dev->speed/100);
+ else
+ delay = 0;
+ /* expected delay (usec) until remaining bytes are sent */
+ if (delay < 100) {
+ udelay(delay);
+ delay = 0;
+ break;
+ }
+ /* sleep some longer delay (msec) */
+ delay = (delay+999) / 1000;
+ break;
+
+ case SIRDEV_STATE_WAIT_UNTIL_SENT:
+ /* block until underlaying hardware buffer are empty */
+ if (dev->drv->wait_until_sent)
+ dev->drv->wait_until_sent(dev);
+ next_state = SIRDEV_STATE_TX_DONE;
+ break;
+
+ case SIRDEV_STATE_TX_DONE:
+ return 0;
+
+ default:
+ net_err_ratelimited("%s - undefined state\n", __func__);
+ return -EINVAL;
+ }
+ fsm->substate = next_state;
+ } while (delay == 0);
+ return delay;
+}
+
+/*
+ * Function sirdev_config_fsm
+ *
+ * State machine to handle the configuration of the device (and attached dongle, if any).
+ * This handler is scheduled for execution in kIrDAd context, so we can sleep.
+ * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
+ * long. Instead, for longer delays we start a timer to reschedule us later.
+ * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
+ * Both must be unlocked/restarted on completion - but only on final exit.
+ */
+
+static void sirdev_config_fsm(struct work_struct *work)
+{
+ struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
+ struct sir_fsm *fsm = &dev->fsm;
+ int next_state;
+ int ret = -1;
+ unsigned delay;
+
+ pr_debug("%s(), <%ld>\n", __func__, jiffies);
+
+ do {
+ pr_debug("%s - state=0x%04x / substate=0x%04x\n",
+ __func__, fsm->state, fsm->substate);
+
+ next_state = fsm->state;
+ delay = 0;
+
+ switch(fsm->state) {
+
+ case SIRDEV_STATE_DONGLE_OPEN:
+ if (dev->dongle_drv != NULL) {
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+
+ /* Initialize dongle */
+ ret = sirdev_get_dongle(dev, fsm->param);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ /* Dongles are powered through the modem control lines which
+ * were just set during open. Before resetting, let's wait for
+ * the power to stabilize. This is what some dongle drivers did
+ * in open before, while others didn't - should be safe anyway.
+ */
+
+ delay = 50;
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+
+ fsm->param = 9600;
+
+ break;
+
+ case SIRDEV_STATE_DONGLE_CLOSE:
+ /* shouldn't we just treat this as success=? */
+ if (dev->dongle_drv == NULL) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_DTR_RTS:
+ ret = sirdev_set_dtr_rts(dev,
+ (fsm->param&0x02) ? TRUE : FALSE,
+ (fsm->param&0x01) ? TRUE : FALSE);
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_SPEED:
+ fsm->substate = SIRDEV_STATE_WAIT_XMIT;
+ next_state = SIRDEV_STATE_DONGLE_CHECK;
+ break;
+
+ case SIRDEV_STATE_DONGLE_CHECK:
+ ret = sirdev_tx_complete_fsm(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ if ((delay=ret) != 0)
+ break;
+
+ if (dev->dongle_drv) {
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+ }
+ else {
+ dev->speed = fsm->param;
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_RESET:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->reset(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0) {
+ /* set serial port according to dongle default speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
+ next_state = SIRDEV_STATE_DONGLE_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+ if (dev->dongle_drv->set_speed) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0)
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ break;
+
+ case SIRDEV_STATE_PORT_SPEED:
+ /* Finally we are ready to change the serial port speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ dev->new_speed = 0;
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_DONE:
+ /* Signal network layer so it can send more frames */
+ netif_wake_queue(dev->netdev);
+ next_state = SIRDEV_STATE_COMPLETE;
+ break;
+
+ default:
+ net_err_ratelimited("%s - undefined state\n", __func__);
+ fsm->result = -EINVAL;
+ /* fall thru */
+
+ case SIRDEV_STATE_ERROR:
+ net_err_ratelimited("%s - error: %d\n",
+ __func__, fsm->result);
+
+#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
+ netif_stop_queue(dev->netdev);
+#else
+ netif_wake_queue(dev->netdev);
+#endif
+ /* fall thru */
+
+ case SIRDEV_STATE_COMPLETE:
+ /* config change finished, so we are not busy any longer */
+ sirdev_enable_rx(dev);
+ up(&fsm->sem);
+ return;
+ }
+ fsm->state = next_state;
+ } while(!delay);
+
+ queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
+}
+
+/* schedule some device configuration task for execution by kIrDAd
+ * on behalf of the above state machine.
+ * can be called from process or interrupt/tasklet context.
+ */
+
+int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+
+ pr_debug("%s - state=0x%04x / param=%u\n", __func__,
+ initial_state, param);
+
+ if (down_trylock(&fsm->sem)) {
+ if (in_interrupt() || in_atomic() || irqs_disabled()) {
+ pr_debug("%s(), state machine busy!\n", __func__);
+ return -EWOULDBLOCK;
+ } else
+ down(&fsm->sem);
+ }
+
+ if (fsm->state == SIRDEV_STATE_DEAD) {
+ /* race with sirdev_close should never happen */
+ net_err_ratelimited("%s(), instance staled!\n", __func__);
+ up(&fsm->sem);
+ return -ESTALE; /* or better EPIPE? */
+ }
+
+ netif_stop_queue(dev->netdev);
+ atomic_set(&dev->enable_rx, 0);
+
+ fsm->state = initial_state;
+ fsm->param = param;
+ fsm->result = 0;
+
+ INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+ queue_delayed_work(irda_sir_wq, &fsm->work, 0);
+ return 0;
+}
+
+
+/***************************************************************************/
+
+void sirdev_enable_rx(struct sir_dev *dev)
+{
+ if (unlikely(atomic_read(&dev->enable_rx)))
+ return;
+
+ /* flush rx-buffer - should also help in case of problems with echo cancelation */
+ dev->rx_buff.data = dev->rx_buff.head;
+ dev->rx_buff.len = 0;
+ dev->rx_buff.in_frame = FALSE;
+ dev->rx_buff.state = OUTSIDE_FRAME;
+ atomic_set(&dev->enable_rx, 1);
+}
+
+static int sirdev_is_receiving(struct sir_dev *dev)
+{
+ if (!atomic_read(&dev->enable_rx))
+ return 0;
+
+ return dev->rx_buff.state != OUTSIDE_FRAME;
+}
+
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
+{
+ int err;
+
+ pr_debug("%s : requesting dongle %d.\n", __func__, type);
+
+ err = sirdev_schedule_dongle_open(dev, type);
+ if (unlikely(err))
+ return err;
+ down(&dev->fsm.sem); /* block until config change completed */
+ err = dev->fsm.result;
+ up(&dev->fsm.sem);
+ return err;
+}
+EXPORT_SYMBOL(sirdev_set_dongle);
+
+/* used by dongle drivers for dongle programming */
+
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
+{
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(len > dev->tx_buff.truesize))
+ return -ENOSPC;
+
+ spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
+ while (dev->tx_buff.len > 0) { /* wait until tx idle */
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ }
+
+ dev->tx_buff.data = dev->tx_buff.head;
+ memcpy(dev->tx_buff.data, buf, len);
+ dev->tx_buff.len = len;
+
+ ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+ if (ret > 0) {
+ pr_debug("%s(), raw-tx started\n", __func__);
+
+ dev->tx_buff.data += ret;
+ dev->tx_buff.len -= ret;
+ dev->raw_tx = 1;
+ ret = len; /* all data is going to be sent */
+ }
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(sirdev_raw_write);
+
+/* seems some dongle drivers may need this */
+
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
+{
+ int count;
+
+ if (atomic_read(&dev->enable_rx))
+ return -EIO; /* fail if we expect irda-frames */
+
+ count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
+
+ if (count > 0) {
+ memcpy(buf, dev->rx_buff.data, count);
+ dev->rx_buff.data += count;
+ dev->rx_buff.len -= count;
+ }
+
+ /* remaining stuff gets flushed when re-enabling normal rx */
+
+ return count;
+}
+EXPORT_SYMBOL(sirdev_raw_read);
+
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ int ret = -ENXIO;
+ if (dev->drv->set_dtr_rts)
+ ret = dev->drv->set_dtr_rts(dev, dtr, rts);
+ return ret;
+}
+EXPORT_SYMBOL(sirdev_set_dtr_rts);
+
+/**********************************************************************/
+
+/* called from client driver - likely with bh-context - to indicate
+ * it made some progress with transmission. Hence we send the next
+ * chunk, if any, or complete the skb otherwise
+ */
+
+void sirdev_write_complete(struct sir_dev *dev)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int actual = 0;
+ int err;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ pr_debug("%s() - dev->tx_buff.len = %d\n",
+ __func__, dev->tx_buff.len);
+
+ if (likely(dev->tx_buff.len > 0)) {
+ /* Write data left in transmit buffer */
+ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+
+ if (likely(actual>0)) {
+ dev->tx_buff.data += actual;
+ dev->tx_buff.len -= actual;
+ }
+ else if (unlikely(actual<0)) {
+ /* could be dropped later when we have tx_timeout to recover */
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
+ if ((skb=dev->tx_skb) != NULL) {
+ dev->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ dev->netdev->stats.tx_errors++;
+ dev->netdev->stats.tx_dropped++;
+ }
+ dev->tx_buff.len = 0;
+ }
+ if (dev->tx_buff.len > 0)
+ goto done; /* more data to send later */
+ }
+
+ if (unlikely(dev->raw_tx != 0)) {
+ /* in raw mode we are just done now after the buffer was sent
+ * completely. Since this was requested by some dongle driver
+ * running under the control of the irda-thread we must take
+ * care here not to re-enable the queue. The queue will be
+ * restarted when the irda-thread has completed the request.
+ */
+
+ pr_debug("%s(), raw-tx done\n", __func__);
+ dev->raw_tx = 0;
+ goto done; /* no post-frame handling in raw mode */
+ }
+
+ /* we have finished now sending this skb.
+ * update statistics and free the skb.
+ * finally we check and trigger a pending speed change, if any.
+ * if not we switch to rx mode and wake the queue for further
+ * packets.
+ * note the scheduled speed request blocks until the lower
+ * client driver and the corresponding hardware has really
+ * finished sending all data (xmit fifo drained f.e.)
+ * before the speed change gets finally done and the queue
+ * re-activated.
+ */
+
+ pr_debug("%s(), finished with frame!\n", __func__);
+
+ if ((skb=dev->tx_skb) != NULL) {
+ dev->tx_skb = NULL;
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (unlikely(dev->new_speed > 0)) {
+ pr_debug("%s(), Changing speed!\n", __func__);
+ err = sirdev_schedule_speed(dev, dev->new_speed);
+ if (unlikely(err)) {
+ /* should never happen
+ * forget the speed change and hope the stack recovers
+ */
+ net_err_ratelimited("%s - schedule speed change failed: %d\n",
+ __func__, err);
+ netif_wake_queue(dev->netdev);
+ }
+ /* else: success
+ * speed change in progress now
+ * on completion dev->new_speed gets cleared,
+ * rx-reenabled and the queue restarted
+ */
+ }
+ else {
+ sirdev_enable_rx(dev);
+ netif_wake_queue(dev->netdev);
+ }
+
+done:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+EXPORT_SYMBOL(sirdev_write_complete);
+
+/* called from client driver - likely with bh-context - to give us
+ * some more received bytes. We put them into the rx-buffer,
+ * normally unwrapping and building LAP-skb's (unless rx disabled)
+ */
+
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
+{
+ if (!dev || !dev->netdev) {
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
+ return -1;
+ }
+
+ if (!dev->irlap) {
+ net_warn_ratelimited("%s - too early: %p / %zd!\n",
+ __func__, cp, count);
+ return -1;
+ }
+
+ if (cp==NULL) {
+ /* error already at lower level receive
+ * just update stats and set media busy
+ */
+ irda_device_set_media_busy(dev->netdev, TRUE);
+ dev->netdev->stats.rx_dropped++;
+ pr_debug("%s; rx-drop: %zd\n", __func__, count);
+ return 0;
+ }
+
+ /* Read the characters into the buffer */
+ if (likely(atomic_read(&dev->enable_rx))) {
+ while (count--)
+ /* Unwrap and destuff one byte */
+ async_unwrap_char(dev->netdev, &dev->netdev->stats,
+ &dev->rx_buff, *cp++);
+ } else {
+ while (count--) {
+ /* rx not enabled: save the raw bytes and never
+ * trigger any netif_rx. The received bytes are flushed
+ * later when we re-enable rx but might be read meanwhile
+ * by the dongle driver.
+ */
+ dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
+
+ /* What should we do when the buffer is full? */
+ if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
+ dev->rx_buff.len = 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sirdev_receive);
+
+/**********************************************************************/
+
+/* callbacks from network layer */
+
+static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct sir_dev *dev = netdev_priv(ndev);
+ unsigned long flags;
+ int actual = 0;
+ int err;
+ s32 speed;
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+
+ netif_stop_queue(ndev);
+
+ pr_debug("%s(), skb->len = %d\n", __func__, skb->len);
+
+ speed = irda_get_next_speed(skb);
+ if ((speed != dev->speed) && (speed != -1)) {
+ if (!skb->len) {
+ err = sirdev_schedule_speed(dev, speed);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ /* Failed to initiate the speed change, likely the fsm
+ * is still busy (pretty unlikely, but...)
+ * We refuse to accept the skb and return with the queue
+ * stopped so the network layer will retry after the
+ * fsm completes and wakes the queue.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ else if (unlikely(err)) {
+ /* other fatal error - forget the speed change and
+ * hope the stack will recover somehow
+ */
+ netif_start_queue(ndev);
+ }
+ /* else: success
+ * speed change in progress now
+ * on completion the queue gets restarted
+ */
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ } else
+ dev->new_speed = speed;
+ }
+
+ /* Init tx buffer*/
+ dev->tx_buff.data = dev->tx_buff.head;
+
+ /* Check problems */
+ if(spin_is_locked(&dev->tx_lock)) {
+ pr_debug("%s(), write not completed\n", __func__);
+ }
+
+ /* serialize with write completion */
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
+
+ /* transmission will start now - disable receive.
+ * if we are just in the middle of an incoming frame,
+ * treat it as collision. probably it's a good idea to
+ * reset the rx_buf OUTSIDE_FRAME in this case too?
+ */
+ atomic_set(&dev->enable_rx, 0);
+ if (unlikely(sirdev_is_receiving(dev)))
+ dev->netdev->stats.collisions++;
+
+ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+
+ if (likely(actual > 0)) {
+ dev->tx_skb = skb;
+ dev->tx_buff.data += actual;
+ dev->tx_buff.len -= actual;
+ }
+ else if (unlikely(actual < 0)) {
+ /* could be dropped later when we have tx_timeout to recover */
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
+ dev_kfree_skb_any(skb);
+ dev->netdev->stats.tx_errors++;
+ dev->netdev->stats.tx_dropped++;
+ netif_wake_queue(ndev);
+ }
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* called from network layer with rtnl hold */
+
+static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct sir_dev *dev = netdev_priv(ndev);
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSDONGLE: /* Set dongle */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ irda_device_set_media_busy(dev->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = sirdev_is_receiving(dev);
+ break;
+
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSMODE:
+#if 0
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_mode(dev, irq->ifr_mode);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+#endif
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* ----------------------------------------------------------------------------- */
+
+#define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
+
+static int sirdev_alloc_buffers(struct sir_dev *dev)
+{
+ dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
+ dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+
+ /* Bootstrap ZeroCopy Rx */
+ dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
+ GFP_KERNEL);
+ if (dev->rx_buff.skb == NULL)
+ return -ENOMEM;
+ skb_reserve(dev->rx_buff.skb, 1);
+ dev->rx_buff.head = dev->rx_buff.skb->data;
+
+ dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
+ if (dev->tx_buff.head == NULL) {
+ kfree_skb(dev->rx_buff.skb);
+ dev->rx_buff.skb = NULL;
+ dev->rx_buff.head = NULL;
+ return -ENOMEM;
+ }
+
+ dev->tx_buff.data = dev->tx_buff.head;
+ dev->rx_buff.data = dev->rx_buff.head;
+ dev->tx_buff.len = 0;
+ dev->rx_buff.len = 0;
+
+ dev->rx_buff.in_frame = FALSE;
+ dev->rx_buff.state = OUTSIDE_FRAME;
+ return 0;
+};
+
+static void sirdev_free_buffers(struct sir_dev *dev)
+{
+ kfree_skb(dev->rx_buff.skb);
+ kfree(dev->tx_buff.head);
+ dev->rx_buff.head = dev->tx_buff.head = NULL;
+ dev->rx_buff.skb = NULL;
+}
+
+static int sirdev_open(struct net_device *ndev)
+{
+ struct sir_dev *dev = netdev_priv(ndev);
+ const struct sir_driver *drv = dev->drv;
+
+ if (!drv)
+ return -ENODEV;
+
+ /* increase the reference count of the driver module before doing serious stuff */
+ if (!try_module_get(drv->owner))
+ return -ESTALE;
+
+ if (sirdev_alloc_buffers(dev))
+ goto errout_dec;
+
+ if (!dev->drv->start_dev || dev->drv->start_dev(dev))
+ goto errout_free;
+
+ sirdev_enable_rx(dev);
+ dev->raw_tx = 0;
+
+ netif_start_queue(ndev);
+ dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
+ if (!dev->irlap)
+ goto errout_stop;
+
+ netif_wake_queue(ndev);
+
+ pr_debug("%s - done, speed = %d\n", __func__, dev->speed);
+
+ return 0;
+
+errout_stop:
+ atomic_set(&dev->enable_rx, 0);
+ if (dev->drv->stop_dev)
+ dev->drv->stop_dev(dev);
+errout_free:
+ sirdev_free_buffers(dev);
+errout_dec:
+ module_put(drv->owner);
+ return -EAGAIN;
+}
+
+static int sirdev_close(struct net_device *ndev)
+{
+ struct sir_dev *dev = netdev_priv(ndev);
+ const struct sir_driver *drv;
+
+/* pr_debug("%s\n", __func__); */
+
+ netif_stop_queue(ndev);
+
+ down(&dev->fsm.sem); /* block on pending config completion */
+
+ atomic_set(&dev->enable_rx, 0);
+
+ if (unlikely(!dev->irlap))
+ goto out;
+ irlap_close(dev->irlap);
+ dev->irlap = NULL;
+
+ drv = dev->drv;
+ if (unlikely(!drv || !dev->priv))
+ goto out;
+
+ if (drv->stop_dev)
+ drv->stop_dev(dev);
+
+ sirdev_free_buffers(dev);
+ module_put(drv->owner);
+
+out:
+ dev->speed = 0;
+ up(&dev->fsm.sem);
+ return 0;
+}
+
+static const struct net_device_ops sirdev_ops = {
+ .ndo_start_xmit = sirdev_hard_xmit,
+ .ndo_open = sirdev_open,
+ .ndo_stop = sirdev_close,
+ .ndo_do_ioctl = sirdev_ioctl,
+};
+/* ----------------------------------------------------------------------------- */
+
+struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
+{
+ struct net_device *ndev;
+ struct sir_dev *dev;
+
+ pr_debug("%s - %s\n", __func__, name);
+
+ /* instead of adding tests to protect against drv->do_write==NULL
+ * at several places we refuse to create a sir_dev instance for
+ * drivers which don't implement do_write.
+ */
+ if (!drv || !drv->do_write)
+ return NULL;
+
+ /*
+ * Allocate new instance of the device
+ */
+ ndev = alloc_irdadev(sizeof(*dev));
+ if (ndev == NULL) {
+ net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n",
+ __func__);
+ goto out;
+ }
+ dev = netdev_priv(ndev);
+
+ irda_init_max_qos_capabilies(&dev->qos);
+ dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
+ irda_qos_bits_to_value(&dev->qos);
+
+ strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
+
+ atomic_set(&dev->enable_rx, 0);
+ dev->tx_skb = NULL;
+
+ spin_lock_init(&dev->tx_lock);
+ sema_init(&dev->fsm.sem, 1);
+
+ dev->drv = drv;
+ dev->netdev = ndev;
+
+ /* Override the network functions we need to use */
+ ndev->netdev_ops = &sirdev_ops;
+
+ if (register_netdev(ndev)) {
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
+ goto out_freenetdev;
+ }
+
+ return dev;
+
+out_freenetdev:
+ free_netdev(ndev);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(sirdev_get_instance);
+
+int sirdev_put_instance(struct sir_dev *dev)
+{
+ int err = 0;
+
+ pr_debug("%s\n", __func__);
+
+ atomic_set(&dev->enable_rx, 0);
+
+ netif_carrier_off(dev->netdev);
+ netif_device_detach(dev->netdev);
+
+ if (dev->dongle_drv)
+ err = sirdev_schedule_dongle_close(dev);
+ if (err)
+ net_err_ratelimited("%s - error %d\n", __func__, err);
+
+ sirdev_close(dev->netdev);
+
+ down(&dev->fsm.sem);
+ dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
+ dev->dongle_drv = NULL;
+ dev->priv = NULL;
+ up(&dev->fsm.sem);
+
+ /* Remove netdevice */
+ unregister_netdev(dev->netdev);
+
+ free_netdev(dev->netdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sirdev_put_instance);
+
+static int __init sir_wq_init(void)
+{
+ irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
+ if (!irda_sir_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit sir_wq_exit(void)
+{
+ destroy_workqueue(irda_sir_wq);
+}
+
+module_init(sir_wq_init);
+module_exit(sir_wq_exit);
+
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/sir_dongle.c b/drivers/staging/irda/drivers/sir_dongle.c
new file mode 100644
index 000000000000..7436f73ff1bb
--- /dev/null
+++ b/drivers/staging/irda/drivers/sir_dongle.c
@@ -0,0 +1,133 @@
+/*********************************************************************
+ *
+ * sir_dongle.c: manager for serial dongle protocol drivers
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/mutex.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/**************************************************************************
+ *
+ * dongle registration and attachment
+ *
+ */
+
+static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
+static DEFINE_MUTEX(dongle_list_lock); /* protects the list */
+
+int irda_register_dongle(struct dongle_driver *new)
+{
+ struct list_head *entry;
+ struct dongle_driver *drv;
+
+ pr_debug("%s : registering dongle \"%s\" (%d).\n",
+ __func__, new->driver_name, new->type);
+
+ mutex_lock(&dongle_list_lock);
+ list_for_each(entry, &dongle_list) {
+ drv = list_entry(entry, struct dongle_driver, dongle_list);
+ if (new->type == drv->type) {
+ mutex_unlock(&dongle_list_lock);
+ return -EEXIST;
+ }
+ }
+ list_add(&new->dongle_list, &dongle_list);
+ mutex_unlock(&dongle_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(irda_register_dongle);
+
+int irda_unregister_dongle(struct dongle_driver *drv)
+{
+ mutex_lock(&dongle_list_lock);
+ list_del(&drv->dongle_list);
+ mutex_unlock(&dongle_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(irda_unregister_dongle);
+
+int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
+{
+ struct list_head *entry;
+ const struct dongle_driver *drv = NULL;
+ int err = -EINVAL;
+
+ request_module("irda-dongle-%d", type);
+
+ if (dev->dongle_drv != NULL)
+ return -EBUSY;
+
+ /* serialize access to the list of registered dongles */
+ mutex_lock(&dongle_list_lock);
+
+ list_for_each(entry, &dongle_list) {
+ drv = list_entry(entry, struct dongle_driver, dongle_list);
+ if (drv->type == type)
+ break;
+ else
+ drv = NULL;
+ }
+
+ if (!drv) {
+ err = -ENODEV;
+ goto out_unlock; /* no such dongle */
+ }
+
+ /* handling of SMP races with dongle module removal - three cases:
+ * 1) dongle driver was already unregistered - then we haven't found the
+ * requested dongle above and are already out here
+ * 2) the module is already marked deleted but the driver is still
+ * registered - then the try_module_get() below will fail
+ * 3) the try_module_get() below succeeds before the module is marked
+ * deleted - then sys_delete_module() fails and prevents the removal
+ * because the module is in use.
+ */
+
+ if (!try_module_get(drv->owner)) {
+ err = -ESTALE;
+ goto out_unlock; /* rmmod already pending */
+ }
+ dev->dongle_drv = drv;
+
+ if (!drv->open || (err=drv->open(dev))!=0)
+ goto out_reject; /* failed to open driver */
+
+ mutex_unlock(&dongle_list_lock);
+ return 0;
+
+out_reject:
+ dev->dongle_drv = NULL;
+ module_put(drv->owner);
+out_unlock:
+ mutex_unlock(&dongle_list_lock);
+ return err;
+}
+
+int sirdev_put_dongle(struct sir_dev *dev)
+{
+ const struct dongle_driver *drv = dev->dongle_drv;
+
+ if (drv) {
+ if (drv->close)
+ drv->close(dev); /* close this dongle instance */
+
+ dev->dongle_drv = NULL; /* unlink the dongle driver */
+ module_put(drv->owner);/* decrement driver's module refcount */
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/irda/drivers/smsc-ircc2.c b/drivers/staging/irda/drivers/smsc-ircc2.c
new file mode 100644
index 000000000000..19a55cba6beb
--- /dev/null
+++ b/drivers/staging/irda/drivers/smsc-ircc2.c
@@ -0,0 +1,3026 @@
+/*********************************************************************
+ *
+ * Description: Driver for the SMC Infrared Communications Controller
+ * Author: Daniele Peri (peri@csai.unipa.it)
+ * Created at:
+ * Modified at:
+ * Modified by:
+ *
+ * Copyright (c) 2002 Daniele Peri
+ * All Rights Reserved.
+ * Copyright (c) 2002 Jean Tourrilhes
+ * Copyright (c) 2006 Linus Walleij
+ *
+ *
+ * Based on smc-ircc.c:
+ *
+ * Copyright (c) 2001 Stefani Seibold
+ * Copyright (c) 1999-2001 Dag Brattli
+ * Copyright (c) 1998-1999 Thomas Davis,
+ *
+ * and irport.c:
+ *
+ * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "smsc-ircc2.h"
+#include "smsc-sio.h"
+
+
+MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
+MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
+MODULE_LICENSE("GPL");
+
+static bool smsc_nopnp = true;
+module_param_named(nopnp, smsc_nopnp, bool, 0);
+MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
+
+#define DMA_INVAL 255
+static int ircc_dma = DMA_INVAL;
+module_param_hw(ircc_dma, int, dma, 0);
+MODULE_PARM_DESC(ircc_dma, "DMA channel");
+
+#define IRQ_INVAL 255
+static int ircc_irq = IRQ_INVAL;
+module_param_hw(ircc_irq, int, irq, 0);
+MODULE_PARM_DESC(ircc_irq, "IRQ line");
+
+static int ircc_fir;
+module_param_hw(ircc_fir, int, ioport, 0);
+MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
+
+static int ircc_sir;
+module_param_hw(ircc_sir, int, ioport, 0);
+MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
+
+static int ircc_cfg;
+module_param_hw(ircc_cfg, int, ioport, 0);
+MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
+
+static int ircc_transceiver;
+module_param(ircc_transceiver, int, 0);
+MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
+
+/* Types */
+
+#ifdef CONFIG_PCI
+struct smsc_ircc_subsystem_configuration {
+ unsigned short vendor; /* PCI vendor ID */
+ unsigned short device; /* PCI vendor ID */
+ unsigned short subvendor; /* PCI subsystem vendor ID */
+ unsigned short subdevice; /* PCI subsystem device ID */
+ unsigned short sir_io; /* I/O port for SIR */
+ unsigned short fir_io; /* I/O port for FIR */
+ unsigned char fir_irq; /* FIR IRQ */
+ unsigned char fir_dma; /* FIR DMA */
+ unsigned short cfg_base; /* I/O port for chip configuration */
+ int (*preconfigure)(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); /* Preconfig function */
+ const char *name; /* name shown as info */
+};
+#endif
+
+struct smsc_transceiver {
+ char *name;
+ void (*set_for_speed)(int fir_base, u32 speed);
+ int (*probe)(int fir_base);
+};
+
+struct smsc_chip {
+ char *name;
+ #if 0
+ u8 type;
+ #endif
+ u16 flags;
+ u8 devid;
+ u8 rev;
+};
+
+struct smsc_chip_address {
+ unsigned int cfg_base;
+ unsigned int type;
+};
+
+/* Private data for each instance */
+struct smsc_ircc_cb {
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ __u32 flags; /* Interface flags */
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ int transceiver;
+ struct platform_device *pldev;
+};
+
+/* Constants */
+
+#define SMSC_IRCC2_DRIVER_NAME "smsc-ircc2"
+
+#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600
+#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1
+#define SMSC_IRCC2_C_NET_TIMEOUT 0
+#define SMSC_IRCC2_C_SIR_STOP 0
+
+static const char *driver_name = SMSC_IRCC2_DRIVER_NAME;
+
+/* Prototypes */
+
+static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq);
+static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base);
+static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq);
+static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self);
+static void smsc_ircc_init_chip(struct smsc_ircc_cb *self);
+static int __exit smsc_ircc_close(struct smsc_ircc_cb *self);
+static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self);
+static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self);
+static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self);
+static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t smsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs);
+static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self);
+static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed);
+static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, u32 speed);
+static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id);
+static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev);
+static void smsc_ircc_sir_start(struct smsc_ircc_cb *self);
+#if SMSC_IRCC2_C_SIR_STOP
+static void smsc_ircc_sir_stop(struct smsc_ircc_cb *self);
+#endif
+static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self);
+static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
+static int smsc_ircc_net_open(struct net_device *dev);
+static int smsc_ircc_net_close(struct net_device *dev);
+static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#if SMSC_IRCC2_C_NET_TIMEOUT
+static void smsc_ircc_timeout(struct net_device *dev);
+#endif
+static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
+static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
+static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
+static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
+
+/* Probing */
+static int __init smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
+#ifdef CONFIG_PCI
+static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void __init preconfigure_ali_port(struct pci_dev *dev,
+ unsigned short port);
+static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+ unsigned short ircc_fir,
+ unsigned short ircc_sir,
+ unsigned char ircc_dma,
+ unsigned char ircc_irq);
+#endif
+
+/* Transceivers specific functions */
+
+static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base);
+static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base);
+static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
+
+/* Power Management */
+
+static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int smsc_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver smsc_ircc_driver = {
+ .suspend = smsc_ircc_suspend,
+ .resume = smsc_ircc_resume,
+ .driver = {
+ .name = SMSC_IRCC2_DRIVER_NAME,
+ },
+};
+
+/* Transceivers for SMSC-ircc */
+
+static struct smsc_transceiver smsc_transceivers[] =
+{
+ { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800 },
+ { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select },
+ { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc },
+ { NULL, NULL }
+};
+#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (ARRAY_SIZE(smsc_transceivers) - 1)
+
+/* SMC SuperIO chipsets definitions */
+
+#define KEY55_1 0 /* SuperIO Configuration mode with Key <0x55> */
+#define KEY55_2 1 /* SuperIO Configuration mode with Key <0x55,0x55> */
+#define NoIRDA 2 /* SuperIO Chip has no IRDA Port */
+#define SIR 0 /* SuperIO Chip has only slow IRDA */
+#define FIR 4 /* SuperIO Chip has fast IRDA */
+#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */
+
+static struct smsc_chip __initdata fdc_chips_flat[] =
+{
+ /* Base address 0x3f0 or 0x370 */
+ { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */
+ { "37C665GT", KEY55_2|NoIRDA, 0x65, 0x01 },
+ { "37C665GT", KEY55_2|NoIRDA, 0x66, 0x01 },
+ { "37C669", KEY55_2|SIR|SERx4, 0x03, 0x02 },
+ { "37C669", KEY55_2|SIR|SERx4, 0x04, 0x02 }, /* ID? */
+ { "37C78", KEY55_2|NoIRDA, 0x78, 0x00 },
+ { "37N769", KEY55_1|FIR|SERx4, 0x28, 0x00 },
+ { "37N869", KEY55_1|FIR|SERx4, 0x29, 0x00 },
+ { NULL }
+};
+
+static struct smsc_chip __initdata fdc_chips_paged[] =
+{
+ /* Base address 0x3f0 or 0x370 */
+ { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 },
+ { "37B77X", KEY55_1|SIR|SERx4, 0x43, 0x00 },
+ { "37B78X", KEY55_1|SIR|SERx4, 0x44, 0x00 },
+ { "37B80X", KEY55_1|SIR|SERx4, 0x42, 0x00 },
+ { "37C67X", KEY55_1|FIR|SERx4, 0x40, 0x00 },
+ { "37C93X", KEY55_2|SIR|SERx4, 0x02, 0x01 },
+ { "37C93XAPM", KEY55_1|SIR|SERx4, 0x30, 0x01 },
+ { "37C93XFR", KEY55_2|FIR|SERx4, 0x03, 0x01 },
+ { "37M707", KEY55_1|SIR|SERx4, 0x42, 0x00 },
+ { "37M81X", KEY55_1|SIR|SERx4, 0x4d, 0x00 },
+ { "37N958FR", KEY55_1|FIR|SERx4, 0x09, 0x04 },
+ { "37N971", KEY55_1|FIR|SERx4, 0x0a, 0x00 },
+ { "37N972", KEY55_1|FIR|SERx4, 0x0b, 0x00 },
+ { NULL }
+};
+
+static struct smsc_chip __initdata lpc_chips_flat[] =
+{
+ /* Base address 0x2E or 0x4E */
+ { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
+ { "47N227", KEY55_1|FIR|SERx4, 0x7a, 0x00 },
+ { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 },
+ { NULL }
+};
+
+static struct smsc_chip __initdata lpc_chips_paged[] =
+{
+ /* Base address 0x2E or 0x4E */
+ { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 },
+ { "47B37X", KEY55_1|SIR|SERx4, 0x52, 0x00 },
+ { "47M10X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
+ { "47M120", KEY55_1|NoIRDA|SERx4, 0x5c, 0x00 },
+ { "47M13X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
+ { "47M14X", KEY55_1|SIR|SERx4, 0x5f, 0x00 },
+ { "47N252", KEY55_1|FIR|SERx4, 0x0e, 0x00 },
+ { "47S42X", KEY55_1|SIR|SERx4, 0x57, 0x00 },
+ { NULL }
+};
+
+#define SMSCSIO_TYPE_FDC 1
+#define SMSCSIO_TYPE_LPC 2
+#define SMSCSIO_TYPE_FLAT 4
+#define SMSCSIO_TYPE_PAGED 8
+
+static struct smsc_chip_address __initdata possible_addresses[] =
+{
+ { 0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0, 0 }
+};
+
+/* Globals */
+
+static struct smsc_ircc_cb *dev_self[] = { NULL, NULL };
+static unsigned short dev_count;
+
+static inline void register_bank(int iobase, int bank)
+{
+ outb(((inb(iobase + IRCC_MASTER) & 0xf0) | (bank & 0x07)),
+ iobase + IRCC_MASTER);
+}
+
+/* PNP hotplug support */
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
+ { .id = "SMCf010", .driver_data = 0 },
+ /* and presumably others */
+ { }
+};
+MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
+
+static int pnp_driver_registered;
+
+#ifdef CONFIG_PNP
+static int smsc_ircc_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ unsigned int firbase, sirbase;
+ u8 dma, irq;
+
+ if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+ pnp_dma_valid(dev, 0) && pnp_irq_valid(dev, 0)))
+ return -EINVAL;
+
+ sirbase = pnp_port_start(dev, 0);
+ firbase = pnp_port_start(dev, 1);
+ dma = pnp_dma(dev, 0);
+ irq = pnp_irq(dev, 0);
+
+ if (smsc_ircc_open(firbase, sirbase, dma, irq))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct pnp_driver smsc_ircc_pnp_driver = {
+ .name = "smsc-ircc2",
+ .id_table = smsc_ircc_pnp_table,
+ .probe = smsc_ircc_pnp_probe,
+};
+#else /* CONFIG_PNP */
+static struct pnp_driver smsc_ircc_pnp_driver;
+#endif
+
+/*******************************************************************************
+ *
+ *
+ * SMSC-ircc stuff
+ *
+ *
+ *******************************************************************************/
+
+static int __init smsc_ircc_legacy_probe(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_PCI
+ if (smsc_ircc_preconfigure_subsystems(ircc_cfg, ircc_fir, ircc_sir, ircc_dma, ircc_irq) < 0) {
+ /* Ignore errors from preconfiguration */
+ net_err_ratelimited("%s, Preconfiguration failed !\n",
+ driver_name);
+ }
+#endif
+
+ if (ircc_fir > 0 && ircc_sir > 0) {
+ net_info_ratelimited(" Overriding FIR address 0x%04x\n",
+ ircc_fir);
+ net_info_ratelimited(" Overriding SIR address 0x%04x\n",
+ ircc_sir);
+
+ if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq))
+ ret = -ENODEV;
+ } else {
+ ret = -ENODEV;
+
+ /* try user provided configuration register base address */
+ if (ircc_cfg > 0) {
+ net_info_ratelimited(" Overriding configuration address 0x%04x\n",
+ ircc_cfg);
+ if (!smsc_superio_fdc(ircc_cfg))
+ ret = 0;
+ if (!smsc_superio_lpc(ircc_cfg))
+ ret = 0;
+ }
+
+ if (smsc_ircc_look_for_chips() > 0)
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * Function smsc_ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init smsc_ircc_init(void)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ ret = platform_driver_register(&smsc_ircc_driver);
+ if (ret) {
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
+ return ret;
+ }
+
+ dev_count = 0;
+
+ if (smsc_nopnp || !pnp_platform_devices ||
+ ircc_cfg || ircc_fir || ircc_sir ||
+ ircc_dma != DMA_INVAL || ircc_irq != IRQ_INVAL) {
+ ret = smsc_ircc_legacy_probe();
+ } else {
+ if (pnp_register_driver(&smsc_ircc_pnp_driver) == 0)
+ pnp_driver_registered = 1;
+ }
+
+ if (ret) {
+ if (pnp_driver_registered)
+ pnp_unregister_driver(&smsc_ircc_pnp_driver);
+ platform_driver_unregister(&smsc_ircc_driver);
+ }
+
+ return ret;
+}
+
+static netdev_tx_t smsc_ircc_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+
+ if (self->io.speed > 115200)
+ return smsc_ircc_hard_xmit_fir(skb, dev);
+ else
+ return smsc_ircc_hard_xmit_sir(skb, dev);
+}
+
+static const struct net_device_ops smsc_ircc_netdev_ops = {
+ .ndo_open = smsc_ircc_net_open,
+ .ndo_stop = smsc_ircc_net_close,
+ .ndo_do_ioctl = smsc_ircc_net_ioctl,
+ .ndo_start_xmit = smsc_ircc_net_xmit,
+#if SMSC_IRCC2_C_NET_TIMEOUT
+ .ndo_tx_timeout = smsc_ircc_timeout,
+#endif
+};
+
+/*
+ * Function smsc_ircc_open (firbase, sirbase, dma, irq)
+ *
+ * Try to open driver instance
+ *
+ */
+static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+{
+ struct smsc_ircc_cb *self;
+ struct net_device *dev;
+ int err;
+
+ pr_debug("%s\n", __func__);
+
+ err = smsc_ircc_present(fir_base, sir_base);
+ if (err)
+ goto err_out;
+
+ err = -ENOMEM;
+ if (dev_count >= ARRAY_SIZE(dev_self)) {
+ net_warn_ratelimited("%s(), too many devices!\n", __func__);
+ goto err_out1;
+ }
+
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
+ if (!dev) {
+ net_warn_ratelimited("%s() can't allocate net device\n",
+ __func__);
+ goto err_out1;
+ }
+
+#if SMSC_IRCC2_C_NET_TIMEOUT
+ dev->watchdog_timeo = HZ * 2; /* Allow enough time for speed change */
+#endif
+ dev->netdev_ops = &smsc_ircc_netdev_ops;
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+
+ /* Make ifconfig display some details */
+ dev->base_addr = self->io.fir_base = fir_base;
+ dev->irq = self->io.irq = irq;
+
+ /* Need to store self somewhere */
+ dev_self[dev_count] = self;
+ spin_lock_init(&self->lock);
+
+ self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
+ self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
+
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL)
+ goto err_out2;
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL)
+ goto err_out3;
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
+ smsc_ircc_setup_qos(self);
+ smsc_ircc_init_chip(self);
+
+ if (ircc_transceiver > 0 &&
+ ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS)
+ self->transceiver = ircc_transceiver;
+ else
+ smsc_ircc_probe_transceiver(self);
+
+ err = register_netdev(self->netdev);
+ if (err) {
+ net_err_ratelimited("%s, Network device registration failed!\n",
+ driver_name);
+ goto err_out4;
+ }
+
+ self->pldev = platform_device_register_simple(SMSC_IRCC2_DRIVER_NAME,
+ dev_count, NULL, 0);
+ if (IS_ERR(self->pldev)) {
+ err = PTR_ERR(self->pldev);
+ goto err_out5;
+ }
+ platform_set_drvdata(self->pldev, self);
+
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+ dev_count++;
+
+ return 0;
+
+ err_out5:
+ unregister_netdev(self->netdev);
+
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ free_netdev(self->netdev);
+ dev_self[dev_count] = NULL;
+ err_out1:
+ release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
+ release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
+ err_out:
+ return err;
+}
+
+/*
+ * Function smsc_ircc_present(fir_base, sir_base)
+ *
+ * Check the smsc-ircc chip presence
+ *
+ */
+static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
+{
+ unsigned char low, high, chip, config, dma, irq, version;
+
+ if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
+ driver_name)) {
+ net_warn_ratelimited("%s: can't get fir_base of 0x%03x\n",
+ __func__, fir_base);
+ goto out1;
+ }
+
+ if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
+ driver_name)) {
+ net_warn_ratelimited("%s: can't get sir_base of 0x%03x\n",
+ __func__, sir_base);
+ goto out2;
+ }
+
+ register_bank(fir_base, 3);
+
+ high = inb(fir_base + IRCC_ID_HIGH);
+ low = inb(fir_base + IRCC_ID_LOW);
+ chip = inb(fir_base + IRCC_CHIP_ID);
+ version = inb(fir_base + IRCC_VERSION);
+ config = inb(fir_base + IRCC_INTERFACE);
+ dma = config & IRCC_INTERFACE_DMA_MASK;
+ irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
+
+ if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
+ net_warn_ratelimited("%s(), addr 0x%04x - no device found!\n",
+ __func__, fir_base);
+ goto out3;
+ }
+ net_info_ratelimited("SMsC IrDA Controller found\n IrCC version %d.%d, firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
+ chip & 0x0f, version,
+ fir_base, sir_base, dma, irq);
+
+ return 0;
+
+ out3:
+ release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
+ out2:
+ release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
+ out1:
+ return -ENODEV;
+}
+
+/*
+ * Function smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq)
+ *
+ * Setup I/O
+ *
+ */
+static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
+ unsigned int fir_base, unsigned int sir_base,
+ u8 dma, u8 irq)
+{
+ unsigned char config, chip_dma, chip_irq;
+
+ register_bank(fir_base, 3);
+ config = inb(fir_base + IRCC_INTERFACE);
+ chip_dma = config & IRCC_INTERFACE_DMA_MASK;
+ chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
+
+ self->io.fir_base = fir_base;
+ self->io.sir_base = sir_base;
+ self->io.fir_ext = SMSC_IRCC2_FIR_CHIP_IO_EXTENT;
+ self->io.sir_ext = SMSC_IRCC2_SIR_CHIP_IO_EXTENT;
+ self->io.fifo_size = SMSC_IRCC2_FIFO_SIZE;
+ self->io.speed = SMSC_IRCC2_C_IRDA_FALLBACK_SPEED;
+
+ if (irq != IRQ_INVAL) {
+ if (irq != chip_irq)
+ net_info_ratelimited("%s, Overriding IRQ - chip says %d, using %d\n",
+ driver_name, chip_irq, irq);
+ self->io.irq = irq;
+ } else
+ self->io.irq = chip_irq;
+
+ if (dma != DMA_INVAL) {
+ if (dma != chip_dma)
+ net_info_ratelimited("%s, Overriding DMA - chip says %d, using %d\n",
+ driver_name, chip_dma, dma);
+ self->io.dma = dma;
+ } else
+ self->io.dma = chip_dma;
+
+}
+
+/*
+ * Function smsc_ircc_setup_qos(self)
+ *
+ * Setup qos
+ *
+ */
+static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
+{
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+
+ self->qos.min_turn_time.bits = SMSC_IRCC2_MIN_TURN_TIME;
+ self->qos.window_size.bits = SMSC_IRCC2_WINDOW_SIZE;
+ irda_qos_bits_to_value(&self->qos);
+}
+
+/*
+ * Function smsc_ircc_init_chip(self)
+ *
+ * Init chip
+ *
+ */
+static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+ outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
+ outb(0x00, iobase + IRCC_MASTER);
+
+ register_bank(iobase, 1);
+ outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | IRCC_CFGA_IRDA_SIR_A),
+ iobase + IRCC_SCE_CFGA);
+
+#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
+ outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
+ iobase + IRCC_SCE_CFGB);
+#else
+ outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
+ iobase + IRCC_SCE_CFGB);
+#endif
+ (void) inb(iobase + IRCC_FIFO_THRESHOLD);
+ outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
+
+ register_bank(iobase, 4);
+ outb((inb(iobase + IRCC_CONTROL) & 0x30), iobase + IRCC_CONTROL);
+
+ register_bank(iobase, 0);
+ outb(0, iobase + IRCC_LCR_A);
+
+ smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
+
+ /* Power on device */
+ outb(0x00, iobase + IRCC_MASTER);
+}
+
+/*
+ * Function smsc_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else {
+ /* Make sure we are the only one touching
+ * self->io.speed and the hardware - Jean II */
+ spin_lock_irqsave(&self->lock, flags);
+ smsc_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = smsc_ircc_is_receiving(self);
+ break;
+ #if 0
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ smsc_ircc_sir_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ break;
+ #endif
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+#if SMSC_IRCC2_C_NET_TIMEOUT
+/*
+ * Function smsc_ircc_timeout (struct net_device *dev)
+ *
+ * The networking timeout management.
+ *
+ */
+
+static void smsc_ircc_timeout(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+ unsigned long flags;
+
+ net_warn_ratelimited("%s: transmit timed out, changing speed to: %d\n",
+ dev->name, self->io.speed);
+ spin_lock_irqsave(&self->lock, flags);
+ smsc_ircc_sir_start(self);
+ smsc_ircc_change_speed(self, self->io.speed);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+#endif
+
+/*
+ * Function smsc_ircc_hard_xmit_sir (struct sk_buff *skb, struct net_device *dev)
+ *
+ * Transmits the current frame until FIFO is full, then
+ * waits until the next transmit interrupt, and continues until the
+ * frame is transmitted.
+ */
+static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ s32 speed;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ netif_stop_queue(dev);
+
+ /* Make sure test of self->io.speed & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if (speed != self->io.speed && speed != -1) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /*
+ * We send frames one by one in SIR mode (no
+ * pipelining), so at this point, if we were sending
+ * a previous frame, we just received the interrupt
+ * telling us it is finished (UART_IIR_THRI).
+ * Therefore, waiting for the transmitter to really
+ * finish draining the fifo won't take too long.
+ * And the interrupt handler is not expected to run.
+ * - Jean II */
+ smsc_ircc_sir_wait_hw_transmitter_finish(self);
+ smsc_ircc_change_speed(self, speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ self->new_speed = speed;
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ dev->stats.tx_bytes += self->tx_buff.len;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, self->io.sir_base + UART_IER);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function smsc_ircc_set_fir_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ int fir_base, ir_mode, ctrl, fast;
+
+ IRDA_ASSERT(self != NULL, return;);
+ fir_base = self->io.fir_base;
+
+ self->io.speed = speed;
+
+ switch (speed) {
+ default:
+ case 576000:
+ ir_mode = IRCC_CFGA_IRDA_HDLC;
+ ctrl = IRCC_CRC;
+ fast = 0;
+ pr_debug("%s(), handling baud of 576000\n", __func__);
+ break;
+ case 1152000:
+ ir_mode = IRCC_CFGA_IRDA_HDLC;
+ ctrl = IRCC_1152 | IRCC_CRC;
+ fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
+ pr_debug("%s(), handling baud of 1152000\n",
+ __func__);
+ break;
+ case 4000000:
+ ir_mode = IRCC_CFGA_IRDA_4PPM;
+ ctrl = IRCC_CRC;
+ fast = IRCC_LCR_A_FAST;
+ pr_debug("%s(), handling baud of 4000000\n",
+ __func__);
+ break;
+ }
+ #if 0
+ Now in tranceiver!
+ /* This causes an interrupt */
+ register_bank(fir_base, 0);
+ outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast, fir_base + IRCC_LCR_A);
+ #endif
+
+ register_bank(fir_base, 1);
+ outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base + IRCC_SCE_CFGA);
+
+ register_bank(fir_base, 4);
+ outb((inb(fir_base + IRCC_CONTROL) & 0x30) | ctrl, fir_base + IRCC_CONTROL);
+}
+
+/*
+ * Function smsc_ircc_fir_start(self)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
+{
+ struct net_device *dev;
+ int fir_base;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+ IRDA_ASSERT(dev != NULL, return;);
+
+ fir_base = self->io.fir_base;
+
+ /* Reset everything */
+
+ /* Clear FIFO */
+ outb(inb(fir_base + IRCC_LCR_A) | IRCC_LCR_A_FIFO_RESET, fir_base + IRCC_LCR_A);
+
+ /* Enable interrupt */
+ /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base + IRCC_IER);*/
+
+ register_bank(fir_base, 1);
+
+ /* Select the TX/RX interface */
+#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */
+ outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
+ fir_base + IRCC_SCE_CFGB);
+#else
+ outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
+ fir_base + IRCC_SCE_CFGB);
+#endif
+ (void) inb(fir_base + IRCC_FIFO_THRESHOLD);
+
+ /* Enable SCE interrupts */
+ outb(0, fir_base + IRCC_MASTER);
+ register_bank(fir_base, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, fir_base + IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, fir_base + IRCC_MASTER);
+}
+
+/*
+ * Function smsc_ircc_fir_stop(self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
+{
+ int fir_base;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ fir_base = self->io.fir_base;
+ register_bank(fir_base, 0);
+ /*outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);*/
+ outb(inb(fir_base + IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base + IRCC_LCR_B);
+}
+
+
+/*
+ * Function smsc_ircc_change_speed(self, baud)
+ *
+ * Change the speed of the device
+ *
+ * This function *must* be called with spinlock held, because it may
+ * be called from the irq handler. - Jean II
+ */
+static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ struct net_device *dev;
+ int last_speed_was_sir;
+
+ pr_debug("%s() changing speed to: %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+
+ last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;
+
+ #if 0
+ /* Temp Hack */
+ speed= 1152000;
+ self->io.speed = speed;
+ last_speed_was_sir = 0;
+ smsc_ircc_fir_start(self);
+ #endif
+
+ if (self->io.speed == 0)
+ smsc_ircc_sir_start(self);
+
+ #if 0
+ if (!last_speed_was_sir) speed = self->io.speed;
+ #endif
+
+ if (self->io.speed != speed)
+ smsc_ircc_set_transceiver_for_speed(self, speed);
+
+ self->io.speed = speed;
+
+ if (speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
+ if (!last_speed_was_sir) {
+ smsc_ircc_fir_stop(self);
+ smsc_ircc_sir_start(self);
+ }
+ smsc_ircc_set_sir_speed(self, speed);
+ } else {
+ if (last_speed_was_sir) {
+ #if SMSC_IRCC2_C_SIR_STOP
+ smsc_ircc_sir_stop(self);
+ #endif
+ smsc_ircc_fir_start(self);
+ }
+ smsc_ircc_set_fir_speed(self, speed);
+
+ #if 0
+ self->tx_buff.len = 10;
+ self->tx_buff.data = self->tx_buff.head;
+
+ smsc_ircc_dma_xmit(self, 4000);
+ #endif
+ /* Be ready for incoming frames */
+ smsc_ircc_dma_receive(self);
+ }
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function smsc_ircc_set_sir_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ */
+static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
+{
+ int iobase;
+ int fcr; /* FIFO control reg */
+ int lcr; /* Line control reg */
+ int divisor;
+
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ iobase = self->io.sir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Turn off interrupts */
+ outb(0, iobase + UART_IER);
+
+ divisor = SMSC_IRCC2_MAX_SIR_SPEED / speed;
+
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ fcr |= self->io.speed < 38400 ?
+ UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase + UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase + UART_DLM);
+ outb(lcr, iobase + UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase + UART_FCR); /* Enable FIFO's */
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
+
+ pr_debug("%s() speed changed to: %d\n", __func__, speed);
+}
+
+
+/*
+ * Function smsc_ircc_hard_xmit_fir (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t smsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ s32 speed;
+ int mtt;
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ netif_stop_queue(dev);
+
+ /* Make sure test of self->io.speed & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed after this frame */
+ speed = irda_get_next_speed(skb);
+ if (speed != self->io.speed && speed != -1) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /* Note : you should make sure that speed changes
+ * are not going to corrupt any outgoing frame.
+ * Look at nsc-ircc for the gory details - Jean II */
+ smsc_ircc_change_speed(self, speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ self->new_speed = speed;
+ }
+
+ skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len);
+
+ self->tx_buff.len = skb->len;
+ self->tx_buff.data = self->tx_buff.head;
+
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ int bofs;
+
+ /*
+ * Compute how many BOFs (STA or PA's) we need to waste the
+ * min turn time given the speed of the link.
+ */
+ bofs = mtt * (self->io.speed / 1000) / 8000;
+ if (bofs > 4095)
+ bofs = 4095;
+
+ smsc_ircc_dma_xmit(self, bofs);
+ } else {
+ /* Transmit frame */
+ smsc_ircc_dma_xmit(self, 0);
+ }
+
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function smsc_ircc_dma_xmit (self, bofs)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
+{
+ int iobase = self->io.fir_base;
+ u8 ctrl;
+
+ pr_debug("%s\n", __func__);
+#if 1
+ /* Disable Rx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+#endif
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+
+ self->io.direction = IO_XMIT;
+
+ /* Set BOF additional count for generating the min turn time */
+ register_bank(iobase, 4);
+ outb(bofs & 0xff, iobase + IRCC_BOF_COUNT_LO);
+ ctrl = inb(iobase + IRCC_CONTROL) & 0xf0;
+ outb(ctrl | ((bofs >> 8) & 0x0f), iobase + IRCC_BOF_COUNT_HI);
+
+ /* Set max Tx frame size */
+ outb(self->tx_buff.len >> 8, iobase + IRCC_TX_SIZE_HI);
+ outb(self->tx_buff.len & 0xff, iobase + IRCC_TX_SIZE_LO);
+
+ /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/
+
+ /* Enable burst mode chip Tx DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
+ IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
+
+ /* Setup DMA controller (must be done after enabling chip DMA) */
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_TX_MODE);
+
+ /* Enable interrupt */
+
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
+
+ /* Enable transmit */
+ outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B);
+}
+
+/*
+ * Function smsc_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ pr_debug("%s\n", __func__);
+#if 0
+ /* Disable Tx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+#endif
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+
+ /* Check for underrun! */
+ register_bank(iobase, 0);
+ if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+
+ /* Reset error condition */
+ register_bank(iobase, 0);
+ outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
+ outb(0x00, iobase + IRCC_MASTER);
+ } else {
+ self->netdev->stats.tx_packets++;
+ self->netdev->stats.tx_bytes += self->tx_buff.len;
+ }
+
+ /* Check if it's time to change the speed */
+ if (self->new_speed) {
+ smsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ netif_wake_queue(self->netdev);
+}
+
+/*
+ * Function smsc_ircc_dma_receive(self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+#if 0
+ /* Turn off chip DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+#endif
+
+ /* Disable Tx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+
+ /* Turn off chip DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Set max Rx frame size */
+ register_bank(iobase, 4);
+ outb((2050 >> 8) & 0x0f, iobase + IRCC_RX_SIZE_HI);
+ outb(2050 & 0xff, iobase + IRCC_RX_SIZE_LO);
+
+ /* Setup DMA controller */
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Enable burst mode chip Rx DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
+ IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
+
+ /* Enable interrupt */
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
+
+ /* Enable receiver */
+ register_bank(iobase, 0);
+ outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,
+ iobase + IRCC_LCR_B);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_dma_receive_complete(self)
+ *
+ * Finished with receiving frames
+ *
+ */
+static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
+{
+ struct sk_buff *skb;
+ int len, msgcnt, lsr;
+ int iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+
+ pr_debug("%s\n", __func__);
+#if 0
+ /* Disable Rx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+#endif
+ register_bank(iobase, 0);
+ outb(inb(iobase + IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase + IRCC_LSAR);
+ lsr= inb(iobase + IRCC_LSR);
+ msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
+
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
+
+ len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
+
+ /* Look for errors */
+ if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
+ self->netdev->stats.rx_errors++;
+ if (lsr & IRCC_LSR_FRAME_ERROR)
+ self->netdev->stats.rx_frame_errors++;
+ if (lsr & IRCC_LSR_CRC_ERROR)
+ self->netdev->stats.rx_crc_errors++;
+ if (lsr & IRCC_LSR_SIZE_ERROR)
+ self->netdev->stats.rx_length_errors++;
+ if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
+ self->netdev->stats.rx_length_errors++;
+ return;
+ }
+
+ /* Remove CRC */
+ len -= self->io.speed < 4000000 ? 2 : 4;
+
+ if (len < 2 || len > 2050) {
+ net_warn_ratelimited("%s(), bogus len=%d\n", __func__, len);
+ return;
+ }
+ pr_debug("%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
+
+ skb = dev_alloc_skb(len + 1);
+ if (!skb)
+ return;
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ skb_put_data(skb, self->rx_buff.data, len);
+ self->netdev->stats.rx_packets++;
+ self->netdev->stats.rx_bytes += len;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+}
+
+/*
+ * Function smsc_ircc_sir_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ */
+static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
+ inb(iobase + UART_RX));
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 32) {
+ pr_debug("%s(), breaking!\n", __func__);
+ break;
+ }
+ } while (inb(iobase + UART_LSR) & UART_LSR_DR);
+}
+
+
+/*
+ * Function smsc_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+ int iobase, iir, lcra, lsr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Serialise the interrupt handler in various CPUs, stop Tx path */
+ spin_lock(&self->lock);
+
+ /* Check if we should use the SIR interrupt handler */
+ if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
+ ret = smsc_ircc_interrupt_sir(dev);
+ goto irq_ret_unlock;
+ }
+
+ iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+ iir = inb(iobase + IRCC_IIR);
+ if (iir == 0)
+ goto irq_ret_unlock;
+ ret = IRQ_HANDLED;
+
+ /* Disable interrupts */
+ outb(0, iobase + IRCC_IER);
+ lcra = inb(iobase + IRCC_LCR_A);
+ lsr = inb(iobase + IRCC_LSR);
+
+ pr_debug("%s(), iir = 0x%02x\n", __func__, iir);
+
+ if (iir & IRCC_IIR_EOM) {
+ if (self->io.direction == IO_RECV)
+ smsc_ircc_dma_receive_complete(self);
+ else
+ smsc_ircc_dma_xmit_complete(self);
+
+ smsc_ircc_dma_receive(self);
+ }
+
+ if (iir & IRCC_IIR_ACTIVE_FRAME) {
+ /*printk(KERN_WARNING "%s(): Active Frame\n", __func__);*/
+ }
+
+ /* Enable interrupts again */
+
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
+
+ irq_ret_unlock:
+ spin_unlock(&self->lock);
+
+ return ret;
+}
+
+/*
+ * Function irport_interrupt_sir (irq, dev_id)
+ *
+ * Interrupt handler for SIR modes
+ */
+static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+ int boguscount = 0;
+ int iobase;
+ int iir, lsr;
+
+ /* Already locked coming here in smsc_ircc_interrupt() */
+ /*spin_lock(&self->lock);*/
+
+ iobase = self->io.sir_base;
+
+ iir = inb(iobase + UART_IIR) & UART_IIR_ID;
+ if (iir == 0)
+ return IRQ_NONE;
+ while (iir) {
+ /* Clear interrupt */
+ lsr = inb(iobase + UART_LSR);
+
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
+
+ switch (iir) {
+ case UART_IIR_RLSI:
+ pr_debug("%s(), RLSI\n", __func__);
+ break;
+ case UART_IIR_RDI:
+ /* Receive interrupt */
+ smsc_ircc_sir_receive(self);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ /* Transmitter ready for data */
+ smsc_ircc_sir_write_wakeup(self);
+ break;
+ default:
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
+ break;
+ }
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 100)
+ break;
+
+ iir = inb(iobase + UART_IIR) & UART_IIR_ID;
+ }
+ /*spin_unlock(&self->lock);*/
+ return IRQ_HANDLED;
+}
+
+
+#if 0 /* unused */
+/*
+ * Function ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ircc_is_receiving(struct smsc_ircc_cb *self)
+{
+ int status = FALSE;
+ /* int iobase; */
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
+
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ return status;
+}
+#endif /* unused */
+
+static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
+{
+ int error;
+
+ error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
+ self->netdev->name, self->netdev);
+ if (error)
+ pr_debug("%s(), unable to allocate irq=%d, err=%d\n",
+ __func__, self->io.irq, error);
+
+ return error;
+}
+
+static void smsc_ircc_start_interrupts(struct smsc_ircc_cb *self)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ self->io.speed = 0;
+ smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static void smsc_ircc_stop_interrupts(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ register_bank(iobase, 0);
+ outb(0, iobase + IRCC_IER);
+ outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
+ outb(0x00, iobase + IRCC_MASTER);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+
+/*
+ * Function smsc_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int smsc_ircc_net_open(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ char hwname[16];
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ if (self->io.suspended) {
+ pr_debug("%s(), device is suspended\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
+ (void *) dev)) {
+ pr_debug("%s(), unable to allocate irq=%d\n",
+ __func__, self->io.irq);
+ return -EAGAIN;
+ }
+
+ smsc_ircc_start_interrupts(self);
+
+ /* Give self a hardware name */
+ /* It would be cool to offer the chip revision here - Jean II */
+ sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ smsc_ircc_net_close(dev);
+
+ net_warn_ratelimited("%s(), unable to allocate DMA=%d\n",
+ __func__, self->io.dma);
+ return -EAGAIN;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int smsc_ircc_net_close(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ smsc_ircc_stop_interrupts(self);
+
+ /* if we are called from smsc_ircc_resume we don't have IRQ reserved */
+ if (!self->io.suspended)
+ free_irq(self->io.irq, dev);
+
+ disable_dma(self->io.dma);
+ free_dma(self->io.dma);
+
+ return 0;
+}
+
+static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct smsc_ircc_cb *self = platform_get_drvdata(dev);
+
+ if (!self->io.suspended) {
+ pr_debug("%s, Suspending\n", driver_name);
+
+ rtnl_lock();
+ if (netif_running(self->netdev)) {
+ netif_device_detach(self->netdev);
+ smsc_ircc_stop_interrupts(self);
+ free_irq(self->io.irq, self->netdev);
+ disable_dma(self->io.dma);
+ }
+ self->io.suspended = 1;
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+static int smsc_ircc_resume(struct platform_device *dev)
+{
+ struct smsc_ircc_cb *self = platform_get_drvdata(dev);
+
+ if (self->io.suspended) {
+ pr_debug("%s, Waking up\n", driver_name);
+
+ rtnl_lock();
+ smsc_ircc_init_chip(self);
+ if (netif_running(self->netdev)) {
+ if (smsc_ircc_request_irq(self)) {
+ /*
+ * Don't fail resume process, just kill this
+ * network interface
+ */
+ unregister_netdevice(self->netdev);
+ } else {
+ enable_dma(self->io.dma);
+ smsc_ircc_start_interrupts(self);
+ netif_device_attach(self->netdev);
+ }
+ }
+ self->io.suspended = 0;
+ rtnl_unlock();
+ }
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
+{
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ platform_device_unregister(self->pldev);
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ smsc_ircc_stop_interrupts(self);
+
+ /* Release the PORTS that this driver is using */
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.fir_base);
+
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.sir_base);
+
+ release_region(self->io.sir_base, self->io.sir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+static void __exit smsc_ircc_cleanup(void)
+{
+ int i;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < 2; i++) {
+ if (dev_self[i])
+ smsc_ircc_close(dev_self[i]);
+ }
+
+ if (pnp_driver_registered)
+ pnp_unregister_driver(&smsc_ircc_pnp_driver);
+
+ platform_driver_unregister(&smsc_ircc_driver);
+}
+
+/*
+ * Start SIR operations
+ *
+ * This function *must* be called with spinlock held, because it may
+ * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II
+ */
+static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
+{
+ struct net_device *dev;
+ int fir_base, sir_base;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+ IRDA_ASSERT(dev != NULL, return;);
+
+ fir_base = self->io.fir_base;
+ sir_base = self->io.sir_base;
+
+ /* Reset everything */
+ outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);
+
+ #if SMSC_IRCC2_C_SIR_STOP
+ /*smsc_ircc_sir_stop(self);*/
+ #endif
+
+ register_bank(fir_base, 1);
+ outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base + IRCC_SCE_CFGA);
+
+ /* Initialize UART */
+ outb(UART_LCR_WLEN8, sir_base + UART_LCR); /* Reset DLAB */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base + UART_MCR);
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
+
+ pr_debug("%s() - exit\n", __func__);
+
+ outb(0x00, fir_base + IRCC_MASTER);
+}
+
+#if SMSC_IRCC2_C_SIR_STOP
+void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
+{
+ int iobase;
+
+ pr_debug("%s\n", __func__);
+ iobase = self->io.sir_base;
+
+ /* Reset UART */
+ outb(0, iobase + UART_MCR);
+
+ /* Turn off interrupts */
+ outb(0, iobase + UART_IER);
+}
+#endif
+
+/*
+ * Function smsc_sir_write_wakeup (self)
+ *
+ * Called by the SIR interrupt handler when there's room for more data.
+ * If we have more packets to send, we send them here.
+ *
+ */
+static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
+{
+ int actual = 0;
+ int iobase;
+ int fcr;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ pr_debug("%s\n", __func__);
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0) {
+ /* Write data left in transmit buffer */
+ actual = smsc_ircc_sir_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ } else {
+
+ /*if (self->tx_buff.len ==0) {*/
+
+ /*
+ * Now serial buffer is almost free & we can start
+ * transmission of another packet. But first we must check
+ * if we need to change the speed of the hardware
+ */
+ if (self->new_speed) {
+ pr_debug("%s(), Changing speed to %d.\n",
+ __func__, self->new_speed);
+ smsc_ircc_sir_wait_hw_transmitter_finish(self);
+ smsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ } else {
+ /* Tell network layer that we want more frames */
+ netif_wake_queue(self->netdev);
+ }
+ self->netdev->stats.tx_packets++;
+
+ if (self->io.speed <= 115200) {
+ /*
+ * Reset Rx FIFO to make sure that all reflected transmit data
+ * is discarded. This is needed for half duplex operation
+ */
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
+ fcr |= self->io.speed < 38400 ?
+ UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
+
+ outb(fcr, iobase + UART_FCR);
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase + UART_IER);
+ }
+ }
+}
+
+/*
+ * Function smsc_ircc_sir_write (iobase, fifo_size, buf, len)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ */
+static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
+ net_warn_ratelimited("%s(), failed, fifo not empty!\n",
+ __func__);
+ return 0;
+ }
+
+ /* Fill FIFO with current frame */
+ while (fifo_size-- > 0 && actual < len) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase + UART_TX);
+ actual++;
+ }
+ return actual;
+}
+
+/*
+ * Function smsc_ircc_is_receiving (self)
+ *
+ * Returns true is we are currently receiving data
+ *
+ */
+static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
+{
+ return self->rx_buff.state != OUTSIDE_FRAME;
+}
+
+
+/*
+ * Function smsc_ircc_probe_transceiver(self)
+ *
+ * Tries to find the used Transceiver
+ *
+ */
+static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
+{
+ unsigned int i;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ for (i = 0; smsc_transceivers[i].name != NULL; i++)
+ if (smsc_transceivers[i].probe(self->io.fir_base)) {
+ net_info_ratelimited(" %s transceiver found\n",
+ smsc_transceivers[i].name);
+ self->transceiver= i + 1;
+ return;
+ }
+
+ net_info_ratelimited("No transceiver found. Defaulting to %s\n",
+ smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
+
+ self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
+}
+
+
+/*
+ * Function smsc_ircc_set_transceiver_for_speed(self, speed)
+ *
+ * Set the transceiver according to the speed
+ *
+ */
+static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ unsigned int trx;
+
+ trx = self->transceiver;
+ if (trx > 0)
+ smsc_transceivers[trx - 1].set_for_speed(self->io.fir_base, speed);
+}
+
+/*
+ * Function smsc_ircc_wait_hw_transmitter_finish ()
+ *
+ * Wait for the real end of HW transmission
+ *
+ * The UART is a strict FIFO, and we get called only when we have finished
+ * pushing data to the FIFO, so the maximum amount of time we must wait
+ * is only for the FIFO to drain out.
+ *
+ * We use a simple calibrated loop. We may need to adjust the loop
+ * delay (udelay) to balance I/O traffic and latency. And we also need to
+ * adjust the maximum timeout.
+ * It would probably be better to wait for the proper interrupt,
+ * but it doesn't seem to be available.
+ *
+ * We can't use jiffies or kernel timers because :
+ * 1) We are called from the interrupt handler, which disable softirqs,
+ * so jiffies won't be increased
+ * 2) Jiffies granularity is usually very coarse (10ms), and we don't
+ * want to wait that long to detect stuck hardware.
+ * Jean II
+ */
+
+static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.sir_base;
+ int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US;
+
+ /* Calibrated busy loop */
+ while (count-- > 0 && !(inb(iobase + UART_LSR) & UART_LSR_TEMT))
+ udelay(1);
+
+ if (count < 0)
+ pr_debug("%s(): stuck transmitter\n", __func__);
+}
+
+
+/* PROBING
+ *
+ * REVISIT we can be told about the device by PNP, and should use that info
+ * instead of probing hardware and creating a platform_device ...
+ */
+
+static int __init smsc_ircc_look_for_chips(void)
+{
+ struct smsc_chip_address *address;
+ char *type;
+ unsigned int cfg_base, found;
+
+ found = 0;
+ address = possible_addresses;
+
+ while (address->cfg_base) {
+ cfg_base = address->cfg_base;
+
+ /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __func__, cfg_base, address->type);*/
+
+ if (address->type & SMSCSIO_TYPE_FDC) {
+ type = "FDC";
+ if (address->type & SMSCSIO_TYPE_FLAT)
+ if (!smsc_superio_flat(fdc_chips_flat, cfg_base, type))
+ found++;
+
+ if (address->type & SMSCSIO_TYPE_PAGED)
+ if (!smsc_superio_paged(fdc_chips_paged, cfg_base, type))
+ found++;
+ }
+ if (address->type & SMSCSIO_TYPE_LPC) {
+ type = "LPC";
+ if (address->type & SMSCSIO_TYPE_FLAT)
+ if (!smsc_superio_flat(lpc_chips_flat, cfg_base, type))
+ found++;
+
+ if (address->type & SMSCSIO_TYPE_PAGED)
+ if (!smsc_superio_paged(lpc_chips_paged, cfg_base, type))
+ found++;
+ }
+ address++;
+ }
+ return found;
+}
+
+/*
+ * Function smsc_superio_flat (chip, base, type)
+ *
+ * Try to get configuration of a smc SuperIO chip with flat register model
+ *
+ */
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfgbase, char *type)
+{
+ unsigned short firbase, sirbase;
+ u8 mode, dma, irq;
+ int ret = -ENODEV;
+
+ pr_debug("%s\n", __func__);
+
+ if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
+ return ret;
+
+ outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
+ mode = inb(cfgbase + 1);
+
+ /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
+
+ if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
+ net_warn_ratelimited("%s(): IrDA not enabled\n", __func__);
+
+ outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
+ sirbase = inb(cfgbase + 1) << 2;
+
+ /* FIR iobase */
+ outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase);
+ firbase = inb(cfgbase + 1) << 3;
+
+ /* DMA */
+ outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase);
+ dma = inb(cfgbase + 1) & SMSCSIOFLAT_FIRDMASELECT_MASK;
+
+ /* IRQ */
+ outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
+ irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
+
+ net_info_ratelimited("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n",
+ __func__, firbase, sirbase, dma, irq, mode);
+
+ if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
+ ret = 0;
+
+ /* Exit configuration */
+ outb(SMSCSIO_CFGEXITKEY, cfgbase);
+
+ return ret;
+}
+
+/*
+ * Function smsc_superio_paged (chip, base, type)
+ *
+ * Try to get configuration of a smc SuperIO chip with paged register model
+ *
+ */
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type)
+{
+ unsigned short fir_io, sir_io;
+ int ret = -ENODEV;
+
+ pr_debug("%s\n", __func__);
+
+ if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
+ return ret;
+
+ /* Select logical device (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base + 1);
+
+ /* SIR iobase */
+ outb(0x60, cfg_base);
+ sir_io = inb(cfg_base + 1) << 8;
+ outb(0x61, cfg_base);
+ sir_io |= inb(cfg_base + 1);
+
+ /* Read FIR base */
+ outb(0x62, cfg_base);
+ fir_io = inb(cfg_base + 1) << 8;
+ outb(0x63, cfg_base);
+ fir_io |= inb(cfg_base + 1);
+ outb(0x2b, cfg_base); /* ??? */
+
+ if (fir_io && smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0)
+ ret = 0;
+
+ /* Exit configuration */
+ outb(SMSCSIO_CFGEXITKEY, cfg_base);
+
+ return ret;
+}
+
+
+static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
+{
+ pr_debug("%s\n", __func__);
+
+ outb(reg, cfg_base);
+ return inb(cfg_base) != reg ? -1 : 0;
+}
+
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+{
+ u8 devid, xdevid, rev;
+
+ pr_debug("%s\n", __func__);
+
+ /* Leave configuration */
+
+ outb(SMSCSIO_CFGEXITKEY, cfg_base);
+
+ if (inb(cfg_base) == SMSCSIO_CFGEXITKEY) /* not a smc superio chip */
+ return NULL;
+
+ outb(reg, cfg_base);
+
+ xdevid = inb(cfg_base + 1);
+
+ /* Enter configuration */
+
+ outb(SMSCSIO_CFGACCESSKEY, cfg_base);
+
+ #if 0
+ if (smsc_access(cfg_base,0x55)) /* send second key and check */
+ return NULL;
+ #endif
+
+ /* probe device ID */
+
+ if (smsc_access(cfg_base, reg))
+ return NULL;
+
+ devid = inb(cfg_base + 1);
+
+ if (devid == 0 || devid == 0xff) /* typical values for unused port */
+ return NULL;
+
+ /* probe revision ID */
+
+ if (smsc_access(cfg_base, reg + 1))
+ return NULL;
+
+ rev = inb(cfg_base + 1);
+
+ if (rev >= 128) /* i think this will make no sense */
+ return NULL;
+
+ if (devid == xdevid) /* protection against false positives */
+ return NULL;
+
+ /* Check for expected device ID; are there others? */
+
+ while (chip->devid != devid) {
+
+ chip++;
+
+ if (chip->name == NULL)
+ return NULL;
+ }
+
+ net_info_ratelimited("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
+ devid, rev, cfg_base, type, chip->name);
+
+ if (chip->rev > rev) {
+ net_info_ratelimited("Revision higher than expected\n");
+ return NULL;
+ }
+
+ if (chip->flags & NoIRDA)
+ net_info_ratelimited("chipset does not support IRDA\n");
+
+ return chip;
+}
+
+static int __init smsc_superio_fdc(unsigned short cfg_base)
+{
+ int ret = -1;
+
+ if (!request_region(cfg_base, 2, driver_name)) {
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
+ } else {
+ if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
+ !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
+ ret = 0;
+
+ release_region(cfg_base, 2);
+ }
+
+ return ret;
+}
+
+static int __init smsc_superio_lpc(unsigned short cfg_base)
+{
+ int ret = -1;
+
+ if (!request_region(cfg_base, 2, driver_name)) {
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
+ } else {
+ if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
+ !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
+ ret = 0;
+
+ release_region(cfg_base, 2);
+ }
+ return ret;
+}
+
+/*
+ * Look for some specific subsystem setups that need
+ * pre-configuration not properly done by the BIOS (especially laptops)
+ * This code is based in part on smcinit.c, tosh1800-smcinit.c
+ * and tosh2450-smcinit.c. The table lists the device entries
+ * for ISA bridges with an LPC (Low Pin Count) controller which
+ * handles the communication with the SMSC device. After the LPC
+ * controller is initialized through PCI, the SMSC device is initialized
+ * through a dedicated port in the ISA port-mapped I/O area, this latter
+ * area is used to configure the SMSC device with default
+ * SIR and FIR I/O ports, DMA and IRQ. Different vendors have
+ * used different sets of parameters and different control port
+ * addresses making a subsystem device table necessary.
+ */
+#ifdef CONFIG_PCI
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
+ /*
+ * Subsystems needing entries:
+ * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
+ * 0x10b9:0x1533 0x0e11:0x005a Compaq nc4000 family
+ * 0x8086:0x24cc 0x0e11:0x002a HP nx9000 family
+ */
+ {
+ /* Guessed entry */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x103c,
+ .subdevice = 0x08bc,
+ .sir_io = 0x02f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x05,
+ .fir_dma = 0x03,
+ .cfg_base = 0x004e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "HP nx5000 family",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x103c,
+ .subdevice = 0x088c,
+ /* Quite certain these are the same for nc8000 as for nc6000 */
+ .sir_io = 0x02f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x05,
+ .fir_dma = 0x03,
+ .cfg_base = 0x004e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "HP nc8000 family",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x103c,
+ .subdevice = 0x0890,
+ .sir_io = 0x02f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x05,
+ .fir_dma = 0x03,
+ .cfg_base = 0x004e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "HP nc6000 family",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x0e11,
+ .subdevice = 0x0860,
+ /* I assume these are the same for x1000 as for the others */
+ .sir_io = 0x02e8,
+ .fir_io = 0x02f8,
+ .fir_irq = 0x07,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Compaq x1000 family",
+ },
+ {
+ /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x24c0,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x03f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x07,
+ .fir_dma = 0x01,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */
+ .device = 0x248c,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x03f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x03,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Toshiba laptop with Intel 82801CAM ISA bridge",
+ },
+ {
+ /* 82801DBM (ICH4-M) LPC Interface Bridge */
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x24cc,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x03f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x03,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Toshiba laptop with Intel 8281DBM LPC bridge",
+ },
+ {
+ /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = 0x1533,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x02e8,
+ .fir_io = 0x02f8,
+ .fir_irq = 0x07,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_ali,
+ .name = "Toshiba laptop with ALi ISA bridge",
+ },
+ { } // Terminator
+};
+
+
+/*
+ * This sets up the basic SMSC parameters
+ * (FIR port, SIR port, FIR DMA, FIR IRQ)
+ * through the chip configuration port.
+ */
+static int __init preconfigure_smsc_chip(struct
+ smsc_ircc_subsystem_configuration
+ *conf)
+{
+ unsigned short iobase = conf->cfg_base;
+ unsigned char tmpbyte;
+
+ outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state
+ outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID
+ tmpbyte = inb(iobase +1); // Read device ID
+ pr_debug("Detected Chip id: 0x%02x, setting up registers...\n",
+ tmpbyte);
+
+ /* Disable UART1 and set up SIR I/O port */
+ outb(0x24, iobase); // select CR24 - UART1 base addr
+ outb(0x00, iobase + 1); // disable UART1
+ outb(SMSCSIOFLAT_UART2BASEADDR_REG, iobase); // select CR25 - UART2 base addr
+ outb( (conf->sir_io >> 2), iobase + 1); // bits 2-9 of 0x3f8
+ tmpbyte = inb(iobase + 1);
+ if (tmpbyte != (conf->sir_io >> 2) ) {
+ net_warn_ratelimited("ERROR: could not configure SIR ioport\n");
+ net_warn_ratelimited("Try to supply ircc_cfg argument\n");
+ return -ENXIO;
+ }
+
+ /* Set up FIR IRQ channel for UART2 */
+ outb(SMSCSIOFLAT_UARTIRQSELECT_REG, iobase); // select CR28 - UART1,2 IRQ select
+ tmpbyte = inb(iobase + 1);
+ tmpbyte &= SMSCSIOFLAT_UART1IRQSELECT_MASK; // Do not touch the UART1 portion
+ tmpbyte |= (conf->fir_irq & SMSCSIOFLAT_UART2IRQSELECT_MASK);
+ outb(tmpbyte, iobase + 1);
+ tmpbyte = inb(iobase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
+ if (tmpbyte != conf->fir_irq) {
+ net_warn_ratelimited("ERROR: could not configure FIR IRQ channel\n");
+ return -ENXIO;
+ }
+
+ /* Set up FIR I/O port */
+ outb(SMSCSIOFLAT_FIRBASEADDR_REG, iobase); // CR2B - SCE (FIR) base addr
+ outb((conf->fir_io >> 3), iobase + 1);
+ tmpbyte = inb(iobase + 1);
+ if (tmpbyte != (conf->fir_io >> 3) ) {
+ net_warn_ratelimited("ERROR: could not configure FIR I/O port\n");
+ return -ENXIO;
+ }
+
+ /* Set up FIR DMA channel */
+ outb(SMSCSIOFLAT_FIRDMASELECT_REG, iobase); // CR2C - SCE (FIR) DMA select
+ outb((conf->fir_dma & LPC47N227_FIRDMASELECT_MASK), iobase + 1); // DMA
+ tmpbyte = inb(iobase + 1) & LPC47N227_FIRDMASELECT_MASK;
+ if (tmpbyte != (conf->fir_dma & LPC47N227_FIRDMASELECT_MASK)) {
+ net_warn_ratelimited("ERROR: could not configure FIR DMA channel\n");
+ return -ENXIO;
+ }
+
+ outb(SMSCSIOFLAT_UARTMODE0C_REG, iobase); // CR0C - UART mode
+ tmpbyte = inb(iobase + 1);
+ tmpbyte &= ~SMSCSIOFLAT_UART2MODE_MASK |
+ SMSCSIOFLAT_UART2MODE_VAL_IRDA;
+ outb(tmpbyte, iobase + 1); // enable IrDA (HPSIR) mode, high speed
+
+ outb(LPC47N227_APMBOOTDRIVE_REG, iobase); // CR07 - Auto Pwr Mgt/boot drive sel
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | LPC47N227_UART2AUTOPWRDOWN_MASK, iobase + 1); // enable UART2 autopower down
+
+ /* This one was not part of tosh1800 */
+ outb(0x0a, iobase); // CR0a - ecp fifo / ir mux
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | 0x40, iobase + 1); // send active device to ir port
+
+ outb(LPC47N227_UART12POWER_REG, iobase); // CR02 - UART 1,2 power
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | LPC47N227_UART2POWERDOWN_MASK, iobase + 1); // UART2 power up mode, UART1 power down
+
+ outb(LPC47N227_FDCPOWERVALIDCONF_REG, iobase); // CR00 - FDC Power/valid config cycle
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | LPC47N227_VALID_MASK, iobase + 1); // valid config cycle done
+
+ outb(LPC47N227_CFGEXITKEY, iobase); // Exit configuration
+
+ return 0;
+}
+
+/* 82801CAM generic registers */
+#define VID 0x00
+#define DID 0x02
+#define PIRQ_A_D_ROUT 0x60
+#define SIRQ_CNTL 0x64
+#define PIRQ_E_H_ROUT 0x68
+#define PCI_DMA_C 0x90
+/* LPC-specific registers */
+#define COM_DEC 0xe0
+#define GEN1_DEC 0xe4
+#define LPC_EN 0xe6
+#define GEN2_DEC 0xec
+/*
+ * Sets up the I/O range using the 82801CAM ISA bridge, 82801DBM LPC bridge
+ * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
+ * They all work the same way!
+ */
+static int __init preconfigure_through_82801(struct pci_dev *dev,
+ struct
+ smsc_ircc_subsystem_configuration
+ *conf)
+{
+ unsigned short tmpword;
+ unsigned char tmpbyte;
+
+ net_info_ratelimited("Setting up Intel 82801 controller and SMSC device\n");
+ /*
+ * Select the range for the COMA COM port (SIR)
+ * Register COM_DEC:
+ * Bit 7: reserved
+ * Bit 6-4, COMB decode range
+ * Bit 3: reserved
+ * Bit 2-0, COMA decode range
+ *
+ * Decode ranges:
+ * 000 = 0x3f8-0x3ff (COM1)
+ * 001 = 0x2f8-0x2ff (COM2)
+ * 010 = 0x220-0x227
+ * 011 = 0x228-0x22f
+ * 100 = 0x238-0x23f
+ * 101 = 0x2e8-0x2ef (COM4)
+ * 110 = 0x338-0x33f
+ * 111 = 0x3e8-0x3ef (COM3)
+ */
+ pci_read_config_byte(dev, COM_DEC, &tmpbyte);
+ tmpbyte &= 0xf8; /* mask COMA bits */
+ switch(conf->sir_io) {
+ case 0x3f8:
+ tmpbyte |= 0x00;
+ break;
+ case 0x2f8:
+ tmpbyte |= 0x01;
+ break;
+ case 0x220:
+ tmpbyte |= 0x02;
+ break;
+ case 0x228:
+ tmpbyte |= 0x03;
+ break;
+ case 0x238:
+ tmpbyte |= 0x04;
+ break;
+ case 0x2e8:
+ tmpbyte |= 0x05;
+ break;
+ case 0x338:
+ tmpbyte |= 0x06;
+ break;
+ case 0x3e8:
+ tmpbyte |= 0x07;
+ break;
+ default:
+ tmpbyte |= 0x01; /* COM2 default */
+ }
+ pr_debug("COM_DEC (write): 0x%02x\n", tmpbyte);
+ pci_write_config_byte(dev, COM_DEC, tmpbyte);
+
+ /* Enable Low Pin Count interface */
+ pci_read_config_word(dev, LPC_EN, &tmpword);
+ /* These seem to be set up at all times,
+ * just make sure it is properly set.
+ */
+ switch(conf->cfg_base) {
+ case 0x04e:
+ tmpword |= 0x2000;
+ break;
+ case 0x02e:
+ tmpword |= 0x1000;
+ break;
+ case 0x062:
+ tmpword |= 0x0800;
+ break;
+ case 0x060:
+ tmpword |= 0x0400;
+ break;
+ default:
+ net_warn_ratelimited("Uncommon I/O base address: 0x%04x\n",
+ conf->cfg_base);
+ break;
+ }
+ tmpword &= 0xfffd; /* disable LPC COMB */
+ tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */
+ pr_debug("LPC_EN (write): 0x%04x\n", tmpword);
+ pci_write_config_word(dev, LPC_EN, tmpword);
+
+ /*
+ * Configure LPC DMA channel
+ * PCI_DMA_C bits:
+ * Bit 15-14: DMA channel 7 select
+ * Bit 13-12: DMA channel 6 select
+ * Bit 11-10: DMA channel 5 select
+ * Bit 9-8: Reserved
+ * Bit 7-6: DMA channel 3 select
+ * Bit 5-4: DMA channel 2 select
+ * Bit 3-2: DMA channel 1 select
+ * Bit 1-0: DMA channel 0 select
+ * 00 = Reserved value
+ * 01 = PC/PCI DMA
+ * 10 = Reserved value
+ * 11 = LPC I/F DMA
+ */
+ pci_read_config_word(dev, PCI_DMA_C, &tmpword);
+ switch(conf->fir_dma) {
+ case 0x07:
+ tmpword |= 0xc000;
+ break;
+ case 0x06:
+ tmpword |= 0x3000;
+ break;
+ case 0x05:
+ tmpword |= 0x0c00;
+ break;
+ case 0x03:
+ tmpword |= 0x00c0;
+ break;
+ case 0x02:
+ tmpword |= 0x0030;
+ break;
+ case 0x01:
+ tmpword |= 0x000c;
+ break;
+ case 0x00:
+ tmpword |= 0x0003;
+ break;
+ default:
+ break; /* do not change settings */
+ }
+ pr_debug("PCI_DMA_C (write): 0x%04x\n", tmpword);
+ pci_write_config_word(dev, PCI_DMA_C, tmpword);
+
+ /*
+ * GEN2_DEC bits:
+ * Bit 15-4: Generic I/O range
+ * Bit 3-1: reserved (read as 0)
+ * Bit 0: enable GEN2 range on LPC I/F
+ */
+ tmpword = conf->fir_io & 0xfff8;
+ tmpword |= 0x0001;
+ pr_debug("GEN2_DEC (write): 0x%04x\n", tmpword);
+ pci_write_config_word(dev, GEN2_DEC, tmpword);
+
+ /* Pre-configure chip */
+ return preconfigure_smsc_chip(conf);
+}
+
+/*
+ * Pre-configure a certain port on the ALi 1533 bridge.
+ * This is based on reverse-engineering since ALi does not
+ * provide any data sheet for the 1533 chip.
+ */
+static void __init preconfigure_ali_port(struct pci_dev *dev,
+ unsigned short port)
+{
+ unsigned char reg;
+ /* These bits obviously control the different ports */
+ unsigned char mask;
+ unsigned char tmpbyte;
+
+ switch(port) {
+ case 0x0130:
+ case 0x0178:
+ reg = 0xb0;
+ mask = 0x80;
+ break;
+ case 0x03f8:
+ reg = 0xb4;
+ mask = 0x80;
+ break;
+ case 0x02f8:
+ reg = 0xb4;
+ mask = 0x30;
+ break;
+ case 0x02e8:
+ reg = 0xb4;
+ mask = 0x08;
+ break;
+ default:
+ net_err_ratelimited("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n",
+ port);
+ return;
+ }
+
+ pci_read_config_byte(dev, reg, &tmpbyte);
+ /* Turn on the right bits */
+ tmpbyte |= mask;
+ pci_write_config_byte(dev, reg, tmpbyte);
+ net_info_ratelimited("Activated ALi 1533 ISA bridge port 0x%04x\n",
+ port);
+}
+
+static int __init preconfigure_through_ali(struct pci_dev *dev,
+ struct
+ smsc_ircc_subsystem_configuration
+ *conf)
+{
+ /* Configure the two ports on the ALi 1533 */
+ preconfigure_ali_port(dev, conf->sir_io);
+ preconfigure_ali_port(dev, conf->fir_io);
+
+ /* Pre-configure chip */
+ return preconfigure_smsc_chip(conf);
+}
+
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+ unsigned short ircc_fir,
+ unsigned short ircc_sir,
+ unsigned char ircc_dma,
+ unsigned char ircc_irq)
+{
+ struct pci_dev *dev = NULL;
+ unsigned short ss_vendor = 0x0000;
+ unsigned short ss_device = 0x0000;
+ int ret = 0;
+
+ for_each_pci_dev(dev) {
+ struct smsc_ircc_subsystem_configuration *conf;
+
+ /*
+ * Cache the subsystem vendor/device:
+ * some manufacturers fail to set this for all components,
+ * so we save it in case there is just 0x0000 0x0000 on the
+ * device we want to check.
+ */
+ if (dev->subsystem_vendor != 0x0000U) {
+ ss_vendor = dev->subsystem_vendor;
+ ss_device = dev->subsystem_device;
+ }
+ conf = subsystem_configurations;
+ for( ; conf->subvendor; conf++) {
+ if(conf->vendor == dev->vendor &&
+ conf->device == dev->device &&
+ conf->subvendor == ss_vendor &&
+ /* Sometimes these are cached values */
+ (conf->subdevice == ss_device ||
+ conf->subdevice == 0xffff)) {
+ struct smsc_ircc_subsystem_configuration
+ tmpconf;
+
+ memcpy(&tmpconf, conf,
+ sizeof(struct smsc_ircc_subsystem_configuration));
+
+ /*
+ * Override the default values with anything
+ * passed in as parameter
+ */
+ if (ircc_cfg != 0)
+ tmpconf.cfg_base = ircc_cfg;
+ if (ircc_fir != 0)
+ tmpconf.fir_io = ircc_fir;
+ if (ircc_sir != 0)
+ tmpconf.sir_io = ircc_sir;
+ if (ircc_dma != DMA_INVAL)
+ tmpconf.fir_dma = ircc_dma;
+ if (ircc_irq != IRQ_INVAL)
+ tmpconf.fir_irq = ircc_irq;
+
+ net_info_ratelimited("Detected unconfigured %s SMSC IrDA chip, pre-configuring device\n",
+ conf->name);
+ if (conf->preconfigure)
+ ret = conf->preconfigure(dev, &tmpconf);
+ else
+ ret = -ENODEV;
+ }
+ }
+ }
+
+ return ret;
+}
+#endif // CONFIG_PCI
+
+/************************************************
+ *
+ * Transceivers specific functions
+ *
+ ************************************************/
+
+
+/*
+ * Function smsc_ircc_set_transceiver_smsc_ircc_atc(fir_base, speed)
+ *
+ * Program transceiver through smsc-ircc ATC circuitry
+ *
+ */
+
+static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
+{
+ unsigned long jiffies_now, jiffies_timeout;
+ u8 val;
+
+ jiffies_now = jiffies;
+ jiffies_timeout = jiffies + SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES;
+
+ /* ATC */
+ register_bank(fir_base, 4);
+ outb((inb(fir_base + IRCC_ATC) & IRCC_ATC_MASK) | IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE,
+ fir_base + IRCC_ATC);
+
+ while ((val = (inb(fir_base + IRCC_ATC) & IRCC_ATC_nPROGREADY)) &&
+ !time_after(jiffies, jiffies_timeout))
+ /* empty */;
+
+ if (val)
+ net_warn_ratelimited("%s(): ATC: 0x%02x\n",
+ __func__, inb(fir_base + IRCC_ATC));
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_smsc_ircc_atc(fir_base)
+ *
+ * Probe transceiver smsc-ircc ATC circuitry
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base)
+{
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed)
+ *
+ * Set transceiver
+ *
+ */
+
+static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed)
+{
+ u8 fast_mode;
+
+ switch (speed) {
+ default:
+ case 576000 :
+ fast_mode = 0;
+ break;
+ case 1152000 :
+ case 4000000 :
+ fast_mode = IRCC_LCR_A_FAST;
+ break;
+ }
+ register_bank(fir_base, 0);
+ outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base)
+ *
+ * Probe transceiver
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base)
+{
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed)
+ *
+ * Set transceiver
+ *
+ */
+
+static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed)
+{
+ u8 fast_mode;
+
+ switch (speed) {
+ default:
+ case 576000 :
+ fast_mode = 0;
+ break;
+ case 1152000 :
+ case 4000000 :
+ fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA;
+ break;
+
+ }
+ /* This causes an interrupt */
+ register_bank(fir_base, 0);
+ outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base)
+ *
+ * Probe transceiver
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base)
+{
+ return 0;
+}
+
+
+module_init(smsc_ircc_init);
+module_exit(smsc_ircc_cleanup);
diff --git a/drivers/staging/irda/drivers/smsc-ircc2.h b/drivers/staging/irda/drivers/smsc-ircc2.h
new file mode 100644
index 000000000000..4829fa22cb29
--- /dev/null
+++ b/drivers/staging/irda/drivers/smsc-ircc2.h
@@ -0,0 +1,191 @@
+/*********************************************************************
+ *
+ * Description: Definitions for the SMC IrCC chipset
+ * Status: Experimental.
+ * Author: Daniele Peri (peri@csai.unipa.it)
+ *
+ * Copyright (c) 2002 Daniele Peri
+ * All Rights Reserved.
+ *
+ * Based on smc-ircc.h:
+ *
+ * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net>
+ * All Rights Reserved
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef SMSC_IRCC2_H
+#define SMSC_IRCC2_H
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+/* Master Control Register */
+#define IRCC_MASTER 0x07
+#define IRCC_MASTER_POWERDOWN 0x80
+#define IRCC_MASTER_RESET 0x40
+#define IRCC_MASTER_INT_EN 0x20
+#define IRCC_MASTER_ERROR_RESET 0x10
+
+/* Register block 0 */
+
+/* Interrupt Identification */
+#define IRCC_IIR 0x01
+#define IRCC_IIR_ACTIVE_FRAME 0x80
+#define IRCC_IIR_EOM 0x40
+#define IRCC_IIR_RAW_MODE 0x20
+#define IRCC_IIR_FIFO 0x10
+
+/* Interrupt Enable */
+#define IRCC_IER 0x02
+#define IRCC_IER_ACTIVE_FRAME 0x80
+#define IRCC_IER_EOM 0x40
+#define IRCC_IER_RAW_MODE 0x20
+#define IRCC_IER_FIFO 0x10
+
+/* Line Status Register */
+#define IRCC_LSR 0x03
+#define IRCC_LSR_UNDERRUN 0x80
+#define IRCC_LSR_OVERRUN 0x40
+#define IRCC_LSR_FRAME_ERROR 0x20
+#define IRCC_LSR_SIZE_ERROR 0x10
+#define IRCC_LSR_CRC_ERROR 0x80
+#define IRCC_LSR_FRAME_ABORT 0x40
+
+/* Line Status Address Register */
+#define IRCC_LSAR 0x03
+#define IRCC_LSAR_ADDRESS_MASK 0x07
+
+/* Line Control Register A */
+#define IRCC_LCR_A 0x04
+#define IRCC_LCR_A_FIFO_RESET 0x80
+#define IRCC_LCR_A_FAST 0x40
+#define IRCC_LCR_A_GP_DATA 0x20
+#define IRCC_LCR_A_RAW_TX 0x10
+#define IRCC_LCR_A_RAW_RX 0x08
+#define IRCC_LCR_A_ABORT 0x04
+#define IRCC_LCR_A_DATA_DONE 0x02
+
+/* Line Control Register B */
+#define IRCC_LCR_B 0x05
+#define IRCC_LCR_B_SCE_DISABLED 0x00
+#define IRCC_LCR_B_SCE_TRANSMIT 0x40
+#define IRCC_LCR_B_SCE_RECEIVE 0x80
+#define IRCC_LCR_B_SCE_UNDEFINED 0xc0
+#define IRCC_LCR_B_SIP_ENABLE 0x20
+#define IRCC_LCR_B_BRICK_WALL 0x10
+
+/* Bus Status Register */
+#define IRCC_BSR 0x06
+#define IRCC_BSR_NOT_EMPTY 0x80
+#define IRCC_BSR_FIFO_FULL 0x40
+#define IRCC_BSR_TIMEOUT 0x20
+
+/* Register block 1 */
+
+#define IRCC_FIFO_THRESHOLD 0x02
+
+#define IRCC_SCE_CFGA 0x00
+#define IRCC_CFGA_AUX_IR 0x80
+#define IRCC_CFGA_HALF_DUPLEX 0x04
+#define IRCC_CFGA_TX_POLARITY 0x02
+#define IRCC_CFGA_RX_POLARITY 0x01
+
+#define IRCC_CFGA_COM 0x00
+#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87
+#define IRCC_CFGA_IRDA_SIR_A 0x08
+#define IRCC_CFGA_ASK_SIR 0x10
+#define IRCC_CFGA_IRDA_SIR_B 0x18
+#define IRCC_CFGA_IRDA_HDLC 0x20
+#define IRCC_CFGA_IRDA_4PPM 0x28
+#define IRCC_CFGA_CONSUMER 0x30
+#define IRCC_CFGA_RAW_IR 0x38
+#define IRCC_CFGA_OTHER 0x40
+
+#define IRCC_IR_HDLC 0x04
+#define IRCC_IR_4PPM 0x01
+#define IRCC_IR_CONSUMER 0x02
+
+#define IRCC_SCE_CFGB 0x01
+#define IRCC_CFGB_LOOPBACK 0x20
+#define IRCC_CFGB_LPBCK_TX_CRC 0x10
+#define IRCC_CFGB_NOWAIT 0x08
+#define IRCC_CFGB_STRING_MOVE 0x04
+#define IRCC_CFGB_DMA_BURST 0x02
+#define IRCC_CFGB_DMA_ENABLE 0x01
+
+#define IRCC_CFGB_MUX_COM 0x00
+#define IRCC_CFGB_MUX_IR 0x40
+#define IRCC_CFGB_MUX_AUX 0x80
+#define IRCC_CFGB_MUX_INACTIVE 0xc0
+
+/* Register block 3 - Identification Registers! */
+#define IRCC_ID_HIGH 0x00 /* 0x10 */
+#define IRCC_ID_LOW 0x01 /* 0xB8 */
+#define IRCC_CHIP_ID 0x02 /* 0xF1 */
+#define IRCC_VERSION 0x03 /* 0x01 */
+#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
+#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */
+#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */
+
+/* Register block 4 - IrDA */
+#define IRCC_CONTROL 0x00
+#define IRCC_BOF_COUNT_LO 0x01 /* Low byte */
+#define IRCC_BOF_COUNT_HI 0x00 /* High nibble (bit 0-3) */
+#define IRCC_BRICKWALL_CNT_LO 0x02 /* Low byte */
+#define IRCC_BRICKWALL_CNT_HI 0x03 /* High nibble (bit 4-7) */
+#define IRCC_TX_SIZE_LO 0x04 /* Low byte */
+#define IRCC_TX_SIZE_HI 0x03 /* High nibble (bit 0-3) */
+#define IRCC_RX_SIZE_HI 0x05 /* High nibble (bit 0-3) */
+#define IRCC_RX_SIZE_LO 0x06 /* Low byte */
+
+#define IRCC_1152 0x80
+#define IRCC_CRC 0x40
+
+/* Register block 5 - IrDA */
+#define IRCC_ATC 0x00
+#define IRCC_ATC_nPROGREADY 0x80
+#define IRCC_ATC_SPEED 0x40
+#define IRCC_ATC_ENABLE 0x20
+#define IRCC_ATC_MASK 0xE0
+
+
+#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01
+
+#define IRCC_SCE_TX_DELAY_TIMER 0x02
+
+/*
+ * Other definitions
+ */
+
+#define SMSC_IRCC2_MAX_SIR_SPEED 115200
+#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8
+#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8
+#define SMSC_IRCC2_FIFO_SIZE 16
+#define SMSC_IRCC2_FIFO_THRESHOLD 64
+/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+#define SMSC_IRCC2_RX_BUFF_TRUESIZE 14384
+#define SMSC_IRCC2_TX_BUFF_TRUESIZE 14384
+#define SMSC_IRCC2_MIN_TURN_TIME 0x07
+#define SMSC_IRCC2_WINDOW_SIZE 0x07
+/* Maximum wait for hw transmitter to finish */
+#define SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US 1000 /* 1 ms */
+/* Maximum wait for ATC transceiver programming to finish */
+#define SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES 1
+#endif /* SMSC_IRCC2_H */
diff --git a/drivers/staging/irda/drivers/smsc-sio.h b/drivers/staging/irda/drivers/smsc-sio.h
new file mode 100644
index 000000000000..59e20e653ebe
--- /dev/null
+++ b/drivers/staging/irda/drivers/smsc-sio.h
@@ -0,0 +1,100 @@
+#ifndef SMSC_SIO_H
+#define SMSC_SIO_H
+
+/******************************************
+ Keys. They should work with every SMsC SIO
+ ******************************************/
+
+#define SMSCSIO_CFGACCESSKEY 0x55
+#define SMSCSIO_CFGEXITKEY 0xaa
+
+/*****************************
+ * Generic SIO Flat (!?) *
+ *****************************/
+
+/* Register 0x0d */
+#define SMSCSIOFLAT_DEVICEID_REG 0x0d
+
+/* Register 0x0c */
+#define SMSCSIOFLAT_UARTMODE0C_REG 0x0c
+#define SMSCSIOFLAT_UART2MODE_MASK 0x38
+#define SMSCSIOFLAT_UART2MODE_VAL_COM 0x00
+#define SMSCSIOFLAT_UART2MODE_VAL_IRDA 0x08
+#define SMSCSIOFLAT_UART2MODE_VAL_ASKIR 0x10
+
+/* Register 0x25 */
+#define SMSCSIOFLAT_UART2BASEADDR_REG 0x25
+
+/* Register 0x2b */
+#define SMSCSIOFLAT_FIRBASEADDR_REG 0x2b
+
+/* Register 0x2c */
+#define SMSCSIOFLAT_FIRDMASELECT_REG 0x2c
+#define SMSCSIOFLAT_FIRDMASELECT_MASK 0x0f
+
+/* Register 0x28 */
+#define SMSCSIOFLAT_UARTIRQSELECT_REG 0x28
+#define SMSCSIOFLAT_UART2IRQSELECT_MASK 0x0f
+#define SMSCSIOFLAT_UART1IRQSELECT_MASK 0xf0
+#define SMSCSIOFLAT_UARTIRQSELECT_VAL_NONE 0x00
+
+
+/*********************
+ * LPC47N227 *
+ *********************/
+
+#define LPC47N227_CFGACCESSKEY 0x55
+#define LPC47N227_CFGEXITKEY 0xaa
+
+/* Register 0x00 */
+#define LPC47N227_FDCPOWERVALIDCONF_REG 0x00
+#define LPC47N227_FDCPOWER_MASK 0x08
+#define LPC47N227_VALID_MASK 0x80
+
+/* Register 0x02 */
+#define LPC47N227_UART12POWER_REG 0x02
+#define LPC47N227_UART1POWERDOWN_MASK 0x08
+#define LPC47N227_UART2POWERDOWN_MASK 0x80
+
+/* Register 0x07 */
+#define LPC47N227_APMBOOTDRIVE_REG 0x07
+#define LPC47N227_PARPORT2AUTOPWRDOWN_MASK 0x10 /* auto power down on if set */
+#define LPC47N227_UART2AUTOPWRDOWN_MASK 0x20 /* auto power down on if set */
+#define LPC47N227_UART1AUTOPWRDOWN_MASK 0x40 /* auto power down on if set */
+
+/* Register 0x0c */
+#define LPC47N227_UARTMODE0C_REG 0x0c
+#define LPC47N227_UART2MODE_MASK 0x38
+#define LPC47N227_UART2MODE_VAL_COM 0x00
+#define LPC47N227_UART2MODE_VAL_IRDA 0x08
+#define LPC47N227_UART2MODE_VAL_ASKIR 0x10
+
+/* Register 0x0d */
+#define LPC47N227_DEVICEID_REG 0x0d
+#define LPC47N227_DEVICEID_DEFVAL 0x5a
+
+/* Register 0x0e */
+#define LPC47N227_REVISIONID_REG 0x0e
+
+/* Register 0x25 */
+#define LPC47N227_UART2BASEADDR_REG 0x25
+
+/* Register 0x28 */
+#define LPC47N227_UARTIRQSELECT_REG 0x28
+#define LPC47N227_UART2IRQSELECT_MASK 0x0f
+#define LPC47N227_UART1IRQSELECT_MASK 0xf0
+#define LPC47N227_UARTIRQSELECT_VAL_NONE 0x00
+
+/* Register 0x2b */
+#define LPC47N227_FIRBASEADDR_REG 0x2b
+
+/* Register 0x2c */
+#define LPC47N227_FIRDMASELECT_REG 0x2c
+#define LPC47N227_FIRDMASELECT_MASK 0x0f
+#define LPC47N227_FIRDMASELECT_VAL_DMA1 0x01 /* 47n227 has three dma channels */
+#define LPC47N227_FIRDMASELECT_VAL_DMA2 0x02
+#define LPC47N227_FIRDMASELECT_VAL_DMA3 0x03
+#define LPC47N227_FIRDMASELECT_VAL_NONE 0x0f
+
+
+#endif
diff --git a/drivers/staging/irda/drivers/stir4200.c b/drivers/staging/irda/drivers/stir4200.c
new file mode 100644
index 000000000000..ee2cb70b688d
--- /dev/null
+++ b/drivers/staging/irda/drivers/stir4200.c
@@ -0,0 +1,1134 @@
+/*****************************************************************************
+*
+* Filename: stir4200.c
+* Version: 0.4
+* Description: Irda SigmaTel USB Dongle
+* Status: Experimental
+* Author: Stephen Hemminger <shemminger@osdl.org>
+*
+* Based on earlier driver by Paul Stewart <stewart@parc.com>
+*
+* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * This dongle does no framing, and requires polling to receive the
+ * data. The STIr4200 has bulk in and out endpoints just like
+ * usr-irda devices, but the data it sends and receives is raw; like
+ * irtty, it needs to call the wrap and unwrap functions to add and
+ * remove SOF/BOF and escape characters to/from the frame.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for SigmaTel STIr4200");
+MODULE_LICENSE("GPL");
+
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+static int rx_sensitivity = 1; /* FIR 0..4, SIR 0..6 */
+module_param(rx_sensitivity, int, 0);
+MODULE_PARM_DESC(rx_sensitivity, "Set Receiver sensitivity (0-6, 0 is most sensitive)");
+
+static int tx_power = 0; /* 0 = highest ... 3 = lowest */
+module_param(tx_power, int, 0);
+MODULE_PARM_DESC(tx_power, "Set Transmitter power (0-3, 0 is highest power)");
+
+#define STIR_IRDA_HEADER 4
+#define CTRL_TIMEOUT 100 /* milliseconds */
+#define TRANSMIT_TIMEOUT 200 /* milliseconds */
+#define STIR_FIFO_SIZE 4096
+#define FIFO_REGS_SIZE 3
+
+enum FirChars {
+ FIR_CE = 0x7d,
+ FIR_XBOF = 0x7f,
+ FIR_EOF = 0x7e,
+};
+
+enum StirRequests {
+ REQ_WRITE_REG = 0x00,
+ REQ_READ_REG = 0x01,
+ REQ_READ_ROM = 0x02,
+ REQ_WRITE_SINGLE = 0x03,
+};
+
+/* Register offsets */
+enum StirRegs {
+ REG_RSVD=0,
+ REG_MODE,
+ REG_PDCLK,
+ REG_CTRL1,
+ REG_CTRL2,
+ REG_FIFOCTL,
+ REG_FIFOLSB,
+ REG_FIFOMSB,
+ REG_DPLL,
+ REG_IRDIG,
+ REG_TEST=15,
+};
+
+enum StirModeMask {
+ MODE_FIR = 0x80,
+ MODE_SIR = 0x20,
+ MODE_ASK = 0x10,
+ MODE_FASTRX = 0x08,
+ MODE_FFRSTEN = 0x04,
+ MODE_NRESET = 0x02,
+ MODE_2400 = 0x01,
+};
+
+enum StirPdclkMask {
+ PDCLK_4000000 = 0x02,
+ PDCLK_115200 = 0x09,
+ PDCLK_57600 = 0x13,
+ PDCLK_38400 = 0x1D,
+ PDCLK_19200 = 0x3B,
+ PDCLK_9600 = 0x77,
+ PDCLK_2400 = 0xDF,
+};
+
+enum StirCtrl1Mask {
+ CTRL1_SDMODE = 0x80,
+ CTRL1_RXSLOW = 0x40,
+ CTRL1_TXPWD = 0x10,
+ CTRL1_RXPWD = 0x08,
+ CTRL1_SRESET = 0x01,
+};
+
+enum StirCtrl2Mask {
+ CTRL2_SPWIDTH = 0x08,
+ CTRL2_REVID = 0x03,
+};
+
+enum StirFifoCtlMask {
+ FIFOCTL_DIR = 0x10,
+ FIFOCTL_CLR = 0x08,
+ FIFOCTL_EMPTY = 0x04,
+};
+
+enum StirDiagMask {
+ IRDIG_RXHIGH = 0x80,
+ IRDIG_RXLOW = 0x40,
+};
+
+enum StirTestMask {
+ TEST_PLLDOWN = 0x80,
+ TEST_LOOPIR = 0x40,
+ TEST_LOOPUSB = 0x20,
+ TEST_TSTENA = 0x10,
+ TEST_TSTOSC = 0x0F,
+};
+
+struct stir_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+ unsigned speed; /* Current speed */
+
+ struct task_struct *thread; /* transmit thread */
+
+ struct sk_buff *tx_pending;
+ void *io_buf; /* transmit/receive buffer */
+ __u8 *fifo_status;
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ ktime_t rx_time;
+ int receiving;
+ struct urb *rx_urb;
+};
+
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */
+ { USB_DEVICE(0x066f, 0x4200) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+/* Send control message to set dongle register */
+static int write_reg(struct stir_cb *stir, __u16 reg, __u8 value)
+{
+ struct usb_device *dev = stir->usbdev;
+
+ pr_debug("%s: write reg %d = 0x%x\n",
+ stir->netdev->name, reg, value);
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ REQ_WRITE_SINGLE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
+ value, reg, NULL, 0,
+ CTRL_TIMEOUT);
+}
+
+/* Send control message to read multiple registers */
+static inline int read_reg(struct stir_cb *stir, __u16 reg,
+ __u8 *data, __u16 count)
+{
+ struct usb_device *dev = stir->usbdev;
+
+ return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ REQ_READ_REG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, count,
+ CTRL_TIMEOUT);
+}
+
+static inline int isfir(u32 speed)
+{
+ return speed == 4000000;
+}
+
+/*
+ * Prepare a FIR IrDA frame for transmission to the USB dongle. The
+ * FIR transmit frame is documented in the datasheet. It consists of
+ * a two byte 0x55 0xAA sequence, two little-endian length bytes, a
+ * sequence of exactly 16 XBOF bytes of 0x7E, two BOF bytes of 0x7E,
+ * then the data escaped as follows:
+ *
+ * 0x7D -> 0x7D 0x5D
+ * 0x7E -> 0x7D 0x5E
+ * 0x7F -> 0x7D 0x5F
+ *
+ * Then, 4 bytes of little endian (stuffed) FCS follow, then two
+ * trailing EOF bytes of 0x7E.
+ */
+static inline __u8 *stuff_fir(__u8 *p, __u8 c)
+{
+ switch(c) {
+ case 0x7d:
+ case 0x7e:
+ case 0x7f:
+ *p++ = 0x7d;
+ c ^= IRDA_TRANS;
+ /* fall through */
+ default:
+ *p++ = c;
+ }
+ return p;
+}
+
+/* Take raw data in skb and put it wrapped into buf */
+static unsigned wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ __u8 *ptr = buf;
+ __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
+ __u16 wraplen;
+ int i;
+
+ /* Header */
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+
+ ptr = buf + STIR_IRDA_HEADER;
+ memset(ptr, 0x7f, 16);
+ ptr += 16;
+
+ /* BOF */
+ *ptr++ = 0x7e;
+ *ptr++ = 0x7e;
+
+ /* Address / Control / Information */
+ for (i = 0; i < skb->len; i++)
+ ptr = stuff_fir(ptr, skb->data[i]);
+
+ /* FCS */
+ ptr = stuff_fir(ptr, fcs & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 8) & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 16) & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 24) & 0xff);
+
+ /* EOFs */
+ *ptr++ = 0x7e;
+ *ptr++ = 0x7e;
+
+ /* Total length, minus the header */
+ wraplen = (ptr - buf) - STIR_IRDA_HEADER;
+ buf[2] = wraplen & 0xff;
+ buf[3] = (wraplen >> 8) & 0xff;
+
+ return wraplen + STIR_IRDA_HEADER;
+}
+
+static unsigned wrap_sir_skb(struct sk_buff *skb, __u8 *buf)
+{
+ __u16 wraplen;
+
+ wraplen = async_wrap_skb(skb, buf + STIR_IRDA_HEADER,
+ STIR_FIFO_SIZE - STIR_IRDA_HEADER);
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+ buf[2] = wraplen & 0xff;
+ buf[3] = (wraplen >> 8) & 0xff;
+
+ return wraplen + STIR_IRDA_HEADER;
+}
+
+/*
+ * Frame is fully formed in the rx_buff so check crc
+ * and pass up to irlap
+ * setup for next receive
+ */
+static void fir_eof(struct stir_cb *stir)
+{
+ iobuff_t *rx_buff = &stir->rx_buff;
+ int len = rx_buff->len - 4;
+ struct sk_buff *skb, *nskb;
+ __u32 fcs;
+
+ if (unlikely(len <= 0)) {
+ pr_debug("%s: short frame len %d\n",
+ stir->netdev->name, len);
+
+ ++stir->netdev->stats.rx_errors;
+ ++stir->netdev->stats.rx_length_errors;
+ return;
+ }
+
+ fcs = ~(crc32_le(~0, rx_buff->data, len));
+ if (fcs != get_unaligned_le32(rx_buff->data + len)) {
+ pr_debug("crc error calc 0x%x len %d\n", fcs, len);
+ stir->netdev->stats.rx_errors++;
+ stir->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ /* if frame is short then just copy it */
+ if (len < IRDA_RX_COPY_THRESHOLD) {
+ nskb = dev_alloc_skb(len + 1);
+ if (unlikely(!nskb)) {
+ ++stir->netdev->stats.rx_dropped;
+ return;
+ }
+ skb_reserve(nskb, 1);
+ skb = nskb;
+ skb_copy_to_linear_data(nskb, rx_buff->data, len);
+ } else {
+ nskb = dev_alloc_skb(rx_buff->truesize);
+ if (unlikely(!nskb)) {
+ ++stir->netdev->stats.rx_dropped;
+ return;
+ }
+ skb_reserve(nskb, 1);
+ skb = rx_buff->skb;
+ rx_buff->skb = nskb;
+ rx_buff->head = nskb->data;
+ }
+
+ skb_put(skb, len);
+
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = stir->netdev;
+
+ netif_rx(skb);
+
+ stir->netdev->stats.rx_packets++;
+ stir->netdev->stats.rx_bytes += len;
+
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+}
+
+/* Unwrap FIR stuffed data and bump it to IrLAP */
+static void stir_fir_chars(struct stir_cb *stir,
+ const __u8 *bytes, int len)
+{
+ iobuff_t *rx_buff = &stir->rx_buff;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ __u8 byte = bytes[i];
+
+ switch(rx_buff->state) {
+ case OUTSIDE_FRAME:
+ /* ignore garbage till start of frame */
+ if (unlikely(byte != FIR_EOF))
+ continue;
+ /* Now receiving frame */
+ rx_buff->state = BEGIN_FRAME;
+
+ /* Time to initialize receive buffer */
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+ continue;
+
+ case LINK_ESCAPE:
+ if (byte == FIR_EOF) {
+ pr_debug("%s: got EOF after escape\n",
+ stir->netdev->name);
+ goto frame_error;
+ }
+ rx_buff->state = INSIDE_FRAME;
+ byte ^= IRDA_TRANS;
+ break;
+
+ case BEGIN_FRAME:
+ /* ignore multiple BOF/EOF */
+ if (byte == FIR_EOF)
+ continue;
+ rx_buff->state = INSIDE_FRAME;
+ rx_buff->in_frame = TRUE;
+
+ /* fall through */
+ case INSIDE_FRAME:
+ switch(byte) {
+ case FIR_CE:
+ rx_buff->state = LINK_ESCAPE;
+ continue;
+ case FIR_XBOF:
+ /* 0x7f is not used in this framing */
+ pr_debug("%s: got XBOF without escape\n",
+ stir->netdev->name);
+ goto frame_error;
+ case FIR_EOF:
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+ fir_eof(stir);
+ continue;
+ }
+ break;
+ }
+
+ /* add byte to rx buffer */
+ if (unlikely(rx_buff->len >= rx_buff->truesize)) {
+ pr_debug("%s: fir frame exceeds %d\n",
+ stir->netdev->name, rx_buff->truesize);
+ ++stir->netdev->stats.rx_over_errors;
+ goto error_recovery;
+ }
+
+ rx_buff->data[rx_buff->len++] = byte;
+ continue;
+
+ frame_error:
+ ++stir->netdev->stats.rx_frame_errors;
+
+ error_recovery:
+ ++stir->netdev->stats.rx_errors;
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+ }
+}
+
+/* Unwrap SIR stuffed data and bump it up to IrLAP */
+static void stir_sir_chars(struct stir_cb *stir,
+ const __u8 *bytes, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ async_unwrap_char(stir->netdev, &stir->netdev->stats,
+ &stir->rx_buff, bytes[i]);
+}
+
+static inline void unwrap_chars(struct stir_cb *stir,
+ const __u8 *bytes, int length)
+{
+ if (isfir(stir->speed))
+ stir_fir_chars(stir, bytes, length);
+ else
+ stir_sir_chars(stir, bytes, length);
+}
+
+/* Mode parameters for each speed */
+static const struct {
+ unsigned speed;
+ __u8 pdclk;
+} stir_modes[] = {
+ { 2400, PDCLK_2400 },
+ { 9600, PDCLK_9600 },
+ { 19200, PDCLK_19200 },
+ { 38400, PDCLK_38400 },
+ { 57600, PDCLK_57600 },
+ { 115200, PDCLK_115200 },
+ { 4000000, PDCLK_4000000 },
+};
+
+
+/*
+ * Setup chip for speed.
+ * Called at startup to initialize the chip
+ * and on speed changes.
+ *
+ * Note: Write multiple registers doesn't appear to work
+ */
+static int change_speed(struct stir_cb *stir, unsigned speed)
+{
+ int i, err;
+ __u8 mode;
+
+ for (i = 0; i < ARRAY_SIZE(stir_modes); ++i) {
+ if (speed == stir_modes[i].speed)
+ goto found;
+ }
+
+ dev_warn(&stir->netdev->dev, "invalid speed %d\n", speed);
+ return -EINVAL;
+
+ found:
+ pr_debug("speed change from %d to %d\n", stir->speed, speed);
+
+ /* Reset modulator */
+ err = write_reg(stir, REG_CTRL1, CTRL1_SRESET);
+ if (err)
+ goto out;
+
+ /* Undocumented magic to tweak the DPLL */
+ err = write_reg(stir, REG_DPLL, 0x15);
+ if (err)
+ goto out;
+
+ /* Set clock */
+ err = write_reg(stir, REG_PDCLK, stir_modes[i].pdclk);
+ if (err)
+ goto out;
+
+ mode = MODE_NRESET | MODE_FASTRX;
+ if (isfir(speed))
+ mode |= MODE_FIR | MODE_FFRSTEN;
+ else
+ mode |= MODE_SIR;
+
+ if (speed == 2400)
+ mode |= MODE_2400;
+
+ err = write_reg(stir, REG_MODE, mode);
+ if (err)
+ goto out;
+
+ /* This resets TEMIC style transceiver if any. */
+ err = write_reg(stir, REG_CTRL1,
+ CTRL1_SDMODE | (tx_power & 3) << 1);
+ if (err)
+ goto out;
+
+ err = write_reg(stir, REG_CTRL1, (tx_power & 3) << 1);
+ if (err)
+ goto out;
+
+ /* Reset sensitivity */
+ err = write_reg(stir, REG_CTRL2, (rx_sensitivity & 7) << 5);
+ out:
+ stir->speed = speed;
+ return err;
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t stir_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ skb = xchg(&stir->tx_pending, skb);
+ wake_up_process(stir->thread);
+
+ /* this should never happen unless stop/wakeup problem */
+ if (unlikely(skb)) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Wait for the transmit FIFO to have space for next data
+ *
+ * If space < 0 then wait till FIFO completely drains.
+ * FYI: can take up to 13 seconds at 2400baud.
+ */
+static int fifo_txwait(struct stir_cb *stir, int space)
+{
+ int err;
+ unsigned long count, status;
+ unsigned long prev_count = 0x1fff;
+
+ /* Read FIFO status and count */
+ for (;; prev_count = count) {
+ err = read_reg(stir, REG_FIFOCTL, stir->fifo_status,
+ FIFO_REGS_SIZE);
+ if (unlikely(err != FIFO_REGS_SIZE)) {
+ dev_warn(&stir->netdev->dev,
+ "FIFO register read error: %d\n", err);
+
+ return err;
+ }
+
+ status = stir->fifo_status[0];
+ count = (unsigned)(stir->fifo_status[2] & 0x1f) << 8
+ | stir->fifo_status[1];
+
+ pr_debug("fifo status 0x%lx count %lu\n", status, count);
+
+ /* is fifo receiving already, or empty */
+ if (!(status & FIFOCTL_DIR) ||
+ (status & FIFOCTL_EMPTY))
+ return 0;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ /* shutting down? */
+ if (!netif_running(stir->netdev) ||
+ !netif_device_present(stir->netdev))
+ return -ESHUTDOWN;
+
+ /* only waiting for some space */
+ if (space >= 0 && STIR_FIFO_SIZE - 4 > space + count)
+ return 0;
+
+ /* queue confused */
+ if (prev_count < count)
+ break;
+
+ /* estimate transfer time for remaining chars */
+ msleep((count * 8000) / stir->speed);
+ }
+
+ err = write_reg(stir, REG_FIFOCTL, FIFOCTL_CLR);
+ if (err)
+ return err;
+ err = write_reg(stir, REG_FIFOCTL, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+
+/* Wait for turnaround delay before starting transmit. */
+static void turnaround_delay(const struct stir_cb *stir, long us)
+{
+ long ticks;
+
+ if (us <= 0)
+ return;
+
+ us -= ktime_us_delta(ktime_get(), stir->rx_time);
+
+ if (us < 10)
+ return;
+
+ ticks = us / (1000000 / HZ);
+ if (ticks > 0)
+ schedule_timeout_interruptible(1 + ticks);
+ else
+ udelay(us);
+}
+
+/*
+ * Start receiver by submitting a request to the receive pipe.
+ * If nothing is available it will return after rx_interval.
+ */
+static int receive_start(struct stir_cb *stir)
+{
+ /* reset state */
+ stir->receiving = 1;
+
+ stir->rx_buff.in_frame = FALSE;
+ stir->rx_buff.state = OUTSIDE_FRAME;
+
+ stir->rx_urb->status = 0;
+ return usb_submit_urb(stir->rx_urb, GFP_KERNEL);
+}
+
+/* Stop all pending receive Urb's */
+static void receive_stop(struct stir_cb *stir)
+{
+ stir->receiving = 0;
+ usb_kill_urb(stir->rx_urb);
+
+ if (stir->rx_buff.in_frame)
+ stir->netdev->stats.collisions++;
+}
+/*
+ * Wrap data in socket buffer and send it.
+ */
+static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
+{
+ unsigned wraplen;
+ int first_frame = 0;
+
+ /* if receiving, need to turnaround */
+ if (stir->receiving) {
+ receive_stop(stir);
+ turnaround_delay(stir, irda_get_mtt(skb));
+ first_frame = 1;
+ }
+
+ if (isfir(stir->speed))
+ wraplen = wrap_fir_skb(skb, stir->io_buf);
+ else
+ wraplen = wrap_sir_skb(skb, stir->io_buf);
+
+ /* check for space available in fifo */
+ if (!first_frame)
+ fifo_txwait(stir, wraplen);
+
+ stir->netdev->stats.tx_packets++;
+ stir->netdev->stats.tx_bytes += skb->len;
+ netif_trans_update(stir->netdev);
+ pr_debug("send %d (%d)\n", skb->len, wraplen);
+
+ if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
+ stir->io_buf, wraplen,
+ NULL, TRANSMIT_TIMEOUT))
+ stir->netdev->stats.tx_errors++;
+}
+
+/*
+ * Transmit state machine thread
+ */
+static int stir_transmit_thread(void *arg)
+{
+ struct stir_cb *stir = arg;
+ struct net_device *dev = stir->netdev;
+ struct sk_buff *skb;
+
+ while (!kthread_should_stop()) {
+#ifdef CONFIG_PM
+ /* if suspending, then power off and wait */
+ if (unlikely(freezing(current))) {
+ if (stir->receiving)
+ receive_stop(stir);
+ else
+ fifo_txwait(stir, -1);
+
+ write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
+
+ try_to_freeze();
+
+ if (change_speed(stir, stir->speed))
+ break;
+ }
+#endif
+
+ /* if something to send? */
+ skb = xchg(&stir->tx_pending, NULL);
+ if (skb) {
+ unsigned new_speed = irda_get_next_speed(skb);
+ netif_wake_queue(dev);
+
+ if (skb->len > 0)
+ stir_send(stir, skb);
+ dev_kfree_skb(skb);
+
+ if ((new_speed != -1) && (stir->speed != new_speed)) {
+ if (fifo_txwait(stir, -1) ||
+ change_speed(stir, new_speed))
+ break;
+ }
+ continue;
+ }
+
+ /* nothing to send? start receiving */
+ if (!stir->receiving &&
+ irda_device_txqueue_empty(dev)) {
+ /* Wait otherwise chip gets confused. */
+ if (fifo_txwait(stir, -1))
+ break;
+
+ if (unlikely(receive_start(stir))) {
+ if (net_ratelimit())
+ dev_info(&dev->dev,
+ "%s: receive usb submit failed\n",
+ stir->netdev->name);
+ stir->receiving = 0;
+ msleep(10);
+ continue;
+ }
+ }
+
+ /* sleep if nothing to send */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ }
+ return 0;
+}
+
+
+/*
+ * USB bulk receive completion callback.
+ * Wakes up every ms (usb round trip) with wrapped
+ * data.
+ */
+static void stir_rcv_irq(struct urb *urb)
+{
+ struct stir_cb *stir = urb->context;
+ int err;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(stir->netdev))
+ return;
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0)
+ return;
+
+ if (urb->actual_length > 0) {
+ pr_debug("receive %d\n", urb->actual_length);
+ unwrap_chars(stir, urb->transfer_buffer,
+ urb->actual_length);
+
+ stir->rx_time = ktime_get();
+ }
+
+ /* kernel thread is stopping receiver don't resubmit */
+ if (!stir->receiving)
+ return;
+
+ /* resubmit existing urb */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* in case of error, the kernel thread will restart us */
+ if (err) {
+ dev_warn(&stir->netdev->dev, "usb receive submit error: %d\n",
+ err);
+ stir->receiving = 0;
+ wake_up_process(stir->thread);
+ }
+}
+
+/*
+ * Function stir_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int stir_net_open(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+ int err;
+ char hwname[16];
+
+ err = usb_clear_halt(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1));
+ if (err)
+ goto err_out1;
+ err = usb_clear_halt(stir->usbdev, usb_rcvbulkpipe(stir->usbdev, 2));
+ if (err)
+ goto err_out1;
+
+ err = change_speed(stir, 9600);
+ if (err)
+ goto err_out1;
+
+ err = -ENOMEM;
+
+ /* Initialize for SIR/FIR to copy data directly into skb. */
+ stir->receiving = 0;
+ stir->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ stir->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!stir->rx_buff.skb)
+ goto err_out1;
+
+ skb_reserve(stir->rx_buff.skb, 1);
+ stir->rx_buff.head = stir->rx_buff.skb->data;
+ stir->rx_time = ktime_get();
+
+ stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!stir->rx_urb)
+ goto err_out2;
+
+ stir->io_buf = kmalloc(STIR_FIFO_SIZE, GFP_KERNEL);
+ if (!stir->io_buf)
+ goto err_out3;
+
+ usb_fill_bulk_urb(stir->rx_urb, stir->usbdev,
+ usb_rcvbulkpipe(stir->usbdev, 2),
+ stir->io_buf, STIR_FIFO_SIZE,
+ stir_rcv_irq, stir);
+
+ stir->fifo_status = kmalloc(FIFO_REGS_SIZE, GFP_KERNEL);
+ if (!stir->fifo_status)
+ goto err_out4;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", stir->usbdev->devnum);
+ stir->irlap = irlap_open(netdev, &stir->qos, hwname);
+ if (!stir->irlap) {
+ dev_err(&stir->usbdev->dev, "irlap_open failed\n");
+ goto err_out5;
+ }
+
+ /** Start kernel thread for transmit. */
+ stir->thread = kthread_run(stir_transmit_thread, stir,
+ "%s", stir->netdev->name);
+ if (IS_ERR(stir->thread)) {
+ err = PTR_ERR(stir->thread);
+ dev_err(&stir->usbdev->dev, "unable to start kernel thread\n");
+ goto err_out6;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+ err_out6:
+ irlap_close(stir->irlap);
+ err_out5:
+ kfree(stir->fifo_status);
+ err_out4:
+ kfree(stir->io_buf);
+ err_out3:
+ usb_free_urb(stir->rx_urb);
+ err_out2:
+ kfree_skb(stir->rx_buff.skb);
+ err_out1:
+ return err;
+}
+
+/*
+ * Function stir_net_close (stir)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int stir_net_close(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Kill transmit thread */
+ kthread_stop(stir->thread);
+ kfree(stir->fifo_status);
+
+ /* Mop up receive urb's */
+ usb_kill_urb(stir->rx_urb);
+
+ kfree(stir->io_buf);
+ usb_free_urb(stir->rx_urb);
+ kfree_skb(stir->rx_buff.skb);
+
+ /* Stop and remove instance of IrLAP */
+ if (stir->irlap)
+ irlap_close(stir->irlap);
+
+ stir->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct stir_cb *stir = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(stir->netdev))
+ ret = change_speed(stir, irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(stir->netdev))
+ irda_device_set_media_busy(stir->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = stir->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops stir_netdev_ops = {
+ .ndo_open = stir_net_open,
+ .ndo_stop = stir_net_close,
+ .ndo_start_xmit = stir_hard_xmit,
+ .ndo_do_ioctl = stir_net_ioctl,
+};
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ * Note : it might be worth protecting this function by a global
+ * spinlock... Or not, because maybe USB already deal with that...
+ */
+static int stir_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct stir_cb *stir = NULL;
+ struct net_device *net;
+ int ret = -ENOMEM;
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*stir));
+ if(!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ stir = netdev_priv(net);
+ stir->netdev = net;
+ stir->usbdev = dev;
+
+ ret = usb_reset_configuration(dev);
+ if (ret != 0) {
+ dev_err(&intf->dev, "usb reset configuration failed\n");
+ goto err_out2;
+ }
+
+ printk(KERN_INFO "SigmaTel STIr4200 IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&stir->qos);
+
+ /* That's the Rx capability. */
+ stir->qos.baud_rate.bits &= IR_2400 | IR_9600 | IR_19200 |
+ IR_38400 | IR_57600 | IR_115200 |
+ (IR_4000000 << 8);
+ stir->qos.min_turn_time.bits &= qos_mtt_bits;
+ irda_qos_bits_to_value(&stir->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &stir_netdev_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto err_out2;
+
+ dev_info(&intf->dev, "IrDA: Registered SigmaTel device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, stir);
+
+ return 0;
+
+err_out2:
+ free_netdev(net);
+err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void stir_disconnect(struct usb_interface *intf)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ if (!stir)
+ return;
+
+ unregister_netdev(stir->netdev);
+ free_netdev(stir->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int stir_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ netif_device_detach(stir->netdev);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int stir_resume(struct usb_interface *intf)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ netif_device_attach(stir->netdev);
+
+ /* receiver restarted when send thread wakes up */
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "stir4200",
+ .probe = stir_probe,
+ .disconnect = stir_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = stir_suspend,
+ .resume = stir_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
diff --git a/drivers/staging/irda/drivers/tekram-sir.c b/drivers/staging/irda/drivers/tekram-sir.c
new file mode 100644
index 000000000000..9dcf0c103b9d
--- /dev/null
+++ b/drivers/staging/irda/drivers/tekram-sir.c
@@ -0,0 +1,225 @@
+/*********************************************************************
+ *
+ * Filename: tekram.c
+ * Version: 1.3
+ * Description: Implementation of the Tekram IrMate IR-210B dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Sun Oct 27 22:02:38 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int tekram_delay = 150; /* default is 150 ms */
+module_param(tekram_delay, int, 0);
+MODULE_PARM_DESC(tekram_delay, "tekram dongle write complete delay");
+
+static int tekram_open(struct sir_dev *);
+static int tekram_close(struct sir_dev *);
+static int tekram_change_speed(struct sir_dev *, unsigned);
+static int tekram_reset(struct sir_dev *);
+
+#define TEKRAM_115200 0x00
+#define TEKRAM_57600 0x01
+#define TEKRAM_38400 0x02
+#define TEKRAM_19200 0x03
+#define TEKRAM_9600 0x04
+
+#define TEKRAM_PW 0x10 /* Pulse select bit */
+
+static struct dongle_driver tekram = {
+ .owner = THIS_MODULE,
+ .driver_name = "Tekram IR-210B",
+ .type = IRDA_TEKRAM_DONGLE,
+ .open = tekram_open,
+ .close = tekram_close,
+ .reset = tekram_reset,
+ .set_speed = tekram_change_speed,
+};
+
+static int __init tekram_sir_init(void)
+{
+ if (tekram_delay < 1 || tekram_delay > 500)
+ tekram_delay = 200;
+ pr_debug("%s - using %d ms delay\n",
+ tekram.driver_name, tekram_delay);
+ return irda_register_dongle(&tekram);
+}
+
+static void __exit tekram_sir_cleanup(void)
+{
+ irda_unregister_dongle(&tekram);
+}
+
+static int tekram_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int tekram_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function tekram_change_speed (dev, state, speed)
+ *
+ * Set the speed for the Tekram IRMate 210 type dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. clear DTR
+ * 2. set RTS, and wait at least 7 us
+ * 3. send Control Byte to the IR-210 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 100 msec)
+ *
+ * [oops, why 100 msec? sending 1 byte (10 bits) takes 1.05 msec
+ * - is this probably to compensate for delays in tty layer?]
+ *
+ * 5. clear RTS (return to NORMAL Operation)
+ * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here
+ * after
+ */
+
+#define TEKRAM_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 byte;
+ static int ret = 0;
+
+ switch(state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ switch (speed) {
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall thru */
+ case 9600:
+ byte = TEKRAM_PW|TEKRAM_9600;
+ break;
+ case 19200:
+ byte = TEKRAM_PW|TEKRAM_19200;
+ break;
+ case 38400:
+ byte = TEKRAM_PW|TEKRAM_38400;
+ break;
+ case 57600:
+ byte = TEKRAM_PW|TEKRAM_57600;
+ break;
+ case 115200:
+ byte = TEKRAM_115200;
+ break;
+ }
+
+ /* Set DTR, Clear RTS */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ sirdev_raw_write(dev, &byte, 1);
+
+ dev->speed = speed;
+
+ state = TEKRAM_STATE_WAIT_SPEED;
+ delay = tekram_delay;
+ break;
+
+ case TEKRAM_STATE_WAIT_SPEED:
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ udelay(50);
+ break;
+
+ default:
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function tekram_reset (driver)
+ *
+ * This function resets the tekram dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * Algorithm:
+ * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 )
+ * 1. clear RTS
+ * 2. set DTR, and wait at least 1 ms
+ * 3. clear DTR to SPACE state, wait at least 50 us for further
+ * operation
+ */
+
+static int tekram_reset(struct sir_dev *dev)
+{
+ /* Clear DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ /* Should sleep 1 ms */
+ msleep(1);
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 50 us */
+ udelay(75);
+
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */
+
+module_init(tekram_sir_init);
+module_exit(tekram_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/toim3232-sir.c b/drivers/staging/irda/drivers/toim3232-sir.c
new file mode 100644
index 000000000000..b977d6d33e74
--- /dev/null
+++ b/drivers/staging/irda/drivers/toim3232-sir.c
@@ -0,0 +1,358 @@
+/*********************************************************************
+ *
+ * Filename: toim3232-sir.c
+ * Version: 1.0
+ * Description: Implementation of dongles based on the Vishay/Temic
+ * TOIM3232 SIR Endec chipset. Currently only the
+ * IRWave IR320ST-2 is tested, although it should work
+ * with any TOIM3232 or TOIM4232 chipset based RS232
+ * dongle with minimal modification.
+ * Based heavily on the Tekram driver (tekram.c),
+ * with thanks to Dag Brattli and Martin Diehl.
+ * Status: Experimental.
+ * Author: David Basden <davidb-irda@rcpt.to>
+ * Created at: Thu Feb 09 23:47:32 2006
+ *
+ * Copyright (c) 2006 David Basden.
+ * Copyright (c) 1998-1999 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * This driver has currently only been tested on the IRWave IR320ST-2
+ *
+ * PROTOCOL:
+ *
+ * The protocol for talking to the TOIM3232 is quite easy, and is
+ * designed to interface with RS232 with only level convertors. The
+ * BR/~D line on the chip is brought high to signal 'command mode',
+ * where a command byte is sent to select the baudrate of the RS232
+ * interface and the pulse length of the IRDA output. When BR/~D
+ * is brought low, the dongle then changes to the selected baudrate,
+ * and the RS232 interface is used for data until BR/~D is brought
+ * high again. The initial speed for the TOIMx323 after RESET is
+ * 9600 baud. The baudrate for command-mode is the last selected
+ * baud-rate, or 9600 after a RESET.
+ *
+ * The dongle I have (below) adds some extra hardware on the front end,
+ * but this is mostly directed towards pariasitic power from the RS232
+ * line rather than changing very much about how to communicate with
+ * the TOIM3232.
+ *
+ * The protocol to talk to the TOIM4232 chipset seems to be almost
+ * identical to the TOIM3232 (and the 4232 datasheet is more detailed)
+ * so this code will probably work on that as well, although I haven't
+ * tested it on that hardware.
+ *
+ * Target dongle variations that might be common:
+ *
+ * DTR and RTS function:
+ * The data sheet for the 4232 has a sample implementation that hooks the
+ * DTR and RTS lines to the RESET and BaudRate/~Data lines of the
+ * chip (through line-converters). Given both DTR and RTS would have to
+ * be held low in normal operation, and the TOIMx232 requires +5V to
+ * signal ground, most dongle designers would almost certainly choose
+ * an implementation that kept at least one of DTR or RTS high in
+ * normal operation to provide power to the dongle, but will likely
+ * vary between designs.
+ *
+ * User specified command bits:
+ * There are two user-controllable output lines from the TOIMx232 that
+ * can be set low or high by setting the appropriate bits in the
+ * high-nibble of the command byte (when setting speed and pulse length).
+ * These might be used to switch on and off added hardware or extra
+ * dongle features.
+ *
+ *
+ * Target hardware: IRWave IR320ST-2
+ *
+ * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic
+ * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transceiver.
+ * It uses a hex inverter and some discrete components to buffer and
+ * line convert the RS232 down to 5V.
+ *
+ * The dongle is powered through a voltage regulator, fed by a large
+ * capacitor. To switch the dongle on, DTR is brought high to charge
+ * the capacitor and drive the voltage regulator. DTR isn't associated
+ * with any control lines on the TOIM3232. Parisitic power is also taken
+ * from the RTS, TD and RD lines when brought high, but through resistors.
+ * When DTR is low, the circuit might lose power even with RTS high.
+ *
+ * RTS is inverted and attached to the BR/~D input pin. When RTS
+ * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode.
+ * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command
+ * mode'.
+ *
+ * For some unknown reason, the RESET line isn't actually connected
+ * to anything. This means to reset the dongle to get it to a known
+ * state (9600 baud) you must drop DTR and RTS low, wait for the power
+ * capacitor to discharge, and then bring DTR (and RTS for data mode)
+ * high again, and wait for the capacitor to charge, the power supply
+ * to stabilise, and the oscillator clock to stabilise.
+ *
+ * Fortunately, if the current baudrate is known, the chipset can
+ * easily change speed by entering command mode without having to
+ * reset the dongle first.
+ *
+ * Major Components:
+ *
+ * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings
+ * to IRDA pulse timings
+ * - 3.6864MHz crystal to drive TOIM3232 clock oscillator
+ * - DM74lS04M Inverting Hex line buffer for RS232 input buffering
+ * and level conversion
+ * - PJ2951AC 150mA voltage regulator
+ * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int toim3232delay = 150; /* default is 150 ms */
+module_param(toim3232delay, int, 0);
+MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay");
+
+static int toim3232_open(struct sir_dev *);
+static int toim3232_close(struct sir_dev *);
+static int toim3232_change_speed(struct sir_dev *, unsigned);
+static int toim3232_reset(struct sir_dev *);
+
+#define TOIM3232_115200 0x00
+#define TOIM3232_57600 0x01
+#define TOIM3232_38400 0x02
+#define TOIM3232_19200 0x03
+#define TOIM3232_9600 0x06
+#define TOIM3232_2400 0x0A
+
+#define TOIM3232_PW 0x10 /* Pulse select bit */
+
+static struct dongle_driver toim3232 = {
+ .owner = THIS_MODULE,
+ .driver_name = "Vishay TOIM3232",
+ .type = IRDA_TOIM3232_DONGLE,
+ .open = toim3232_open,
+ .close = toim3232_close,
+ .reset = toim3232_reset,
+ .set_speed = toim3232_change_speed,
+};
+
+static int __init toim3232_sir_init(void)
+{
+ if (toim3232delay < 1 || toim3232delay > 500)
+ toim3232delay = 200;
+ pr_debug("%s - using %d ms delay\n",
+ toim3232.driver_name, toim3232delay);
+ return irda_register_dongle(&toim3232);
+}
+
+static void __exit toim3232_sir_cleanup(void)
+{
+ irda_unregister_dongle(&toim3232);
+}
+
+static int toim3232_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Pull the lines high to start with.
+ *
+ * For the IR320ST-2, we need to charge the main supply capacitor to
+ * switch the device on. We keep DTR high throughout to do this.
+ * When RTS, TD and RD are high, they will also trickle-charge the
+ * cap. RTS is high for data transmission, and low for baud rate select.
+ * -- DGB
+ */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* The TOI3232 supports many speeds between 1200bps and 115000bps.
+ * We really only care about those supported by the IRDA spec, but
+ * 38400 seems to be implemented in many places */
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* From the tekram driver. Not sure what a reasonable value is -- DGB */
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int toim3232_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function toim3232change_speed (dev, state, speed)
+ *
+ * Set the speed for the TOIM3232 based dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. keep DTR high but clear RTS to bring into baud programming mode
+ * 2. wait at least 7us to enter programming mode
+ * 3. send control word to set baud rate and timing
+ * 4. wait at least 1us
+ * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver)
+ * 6. should take effect immediately (although probably worth waiting)
+ */
+
+#define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 byte;
+ static int ret = 0;
+
+ switch(state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ /* Figure out what we are going to send as a control byte */
+ switch (speed) {
+ case 2400:
+ byte = TOIM3232_PW|TOIM3232_2400;
+ break;
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall thru */
+ case 9600:
+ byte = TOIM3232_PW|TOIM3232_9600;
+ break;
+ case 19200:
+ byte = TOIM3232_PW|TOIM3232_19200;
+ break;
+ case 38400:
+ byte = TOIM3232_PW|TOIM3232_38400;
+ break;
+ case 57600:
+ byte = TOIM3232_PW|TOIM3232_57600;
+ break;
+ case 115200:
+ byte = TOIM3232_115200;
+ break;
+ }
+
+ /* Set DTR, Clear RTS: Go into baud programming mode */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ sirdev_raw_write(dev, &byte, 1);
+
+ dev->speed = speed;
+
+ state = TOIM3232_STATE_WAIT_SPEED;
+ delay = toim3232delay;
+ break;
+
+ case TOIM3232_STATE_WAIT_SPEED:
+ /* Have transmitted control byte * Wait for 'at least 1us' */
+ udelay(14);
+
+ /* Set DTR, Set RTS: Go into normal data mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait (TODO: check this is needed) */
+ udelay(50);
+ break;
+
+ default:
+ printk(KERN_ERR "%s - undefined state %d\n", __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function toim3232reset (driver)
+ *
+ * This function resets the toim3232 dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * What we should do is:
+ * 0. Pull RESET high
+ * 1. Wait for at least 7us
+ * 2. Pull RESET low
+ * 3. Wait for at least 7us
+ * 4. Pull BR/~D high
+ * 5. Wait for at least 7us
+ * 6. Send control byte to set baud rate
+ * 7. Wait at least 1us after stop bit
+ * 8. Pull BR/~D low
+ * 9. Should then be in data mode
+ *
+ * Because the IR320ST-2 doesn't have the RESET line connected for some reason,
+ * we'll have to do something else.
+ *
+ * The default speed after a RESET is 9600, so lets try just bringing it up in
+ * data mode after switching it off, waiting for the supply capacitor to
+ * discharge, and then switch it back on. This isn't actually pulling RESET
+ * high, but it seems to have the same effect.
+ *
+ * This behaviour will probably work on dongles that have the RESET line connected,
+ * but if not, add a flag for the IR320ST-2, and implment the above-listed proper
+ * behaviour.
+ *
+ * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we
+ * need to have pull RTS low
+ */
+
+static int toim3232_reset(struct sir_dev *dev)
+{
+ /* Switch off both DTR and RTS to switch off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ /* Should sleep a while. This might be evil doing it this way.*/
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(50));
+
+ /* Set DTR, Set RTS (data mode) */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 10 ms for power to stabilize again */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+
+ /* Speed should now be 9600 */
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>");
+MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */
+
+module_init(toim3232_sir_init);
+module_exit(toim3232_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/via-ircc.c b/drivers/staging/irda/drivers/via-ircc.c
new file mode 100644
index 000000000000..ca4442a9d631
--- /dev/null
+++ b/drivers/staging/irda/drivers/via-ircc.c
@@ -0,0 +1,1593 @@
+/********************************************************************
+ Filename: via-ircc.c
+ Version: 1.0
+ Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
+ Author: VIA Technologies,inc
+ Date : 08/06/2003
+
+Copyright (c) 1998-2003 VIA Technologies, Inc.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, see <http://www.gnu.org/licenses/>.
+
+F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
+F02 Oct/28/02: Add SB device ID for 3147 and 3177.
+ Comment :
+ jul/09/2002 : only implement two kind of dongle currently.
+ Oct/02/2002 : work on VT8231 and VT8233 .
+ Aug/06/2003 : change driver format to pci driver .
+
+2004-02-16: <sda@bdit.de>
+- Removed unneeded 'legacy' pci stuff.
+- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
+- On speed change from core, don't send SIR frame with new speed.
+ Use current speed and change speeds later.
+- Make module-param dongle_id actually work.
+- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
+ Tested with home-grown PCB on EPIA boards.
+- Code cleanup.
+
+ ********************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/pm.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "via-ircc.h"
+
+#define VIA_MODULE_NAME "via-ircc"
+#define CHIP_IO_EXTENT 0x40
+
+static char *driver_name = VIA_MODULE_NAME;
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static int dongle_id = 0; /* default: probe */
+
+/* We can't guess the type of connected dongle, user *must* supply it. */
+module_param(dongle_id, int, 0);
+
+/* Some prototypes */
+static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
+ unsigned int id);
+static int via_ircc_dma_receive(struct via_ircc_cb *self);
+static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
+ int iobase);
+static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static void via_hw_init(struct via_ircc_cb *self);
+static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
+static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
+static int via_ircc_is_receiving(struct via_ircc_cb *self);
+static int via_ircc_read_dongle_id(int iobase);
+
+static int via_ircc_net_open(struct net_device *dev);
+static int via_ircc_net_close(struct net_device *dev);
+static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd);
+static void via_ircc_change_dongle_speed(int iobase, int speed,
+ int dongle_id);
+static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
+static void hwreset(struct via_ircc_cb *self);
+static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
+static int upload_rxdata(struct via_ircc_cb *self, int iobase);
+static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
+static void via_remove_one(struct pci_dev *pdev);
+
+/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
+static void iodelay(int udelay)
+{
+ u8 data;
+ int i;
+
+ for (i = 0; i < udelay; i++) {
+ data = inb(0x80);
+ }
+}
+
+static const struct pci_device_id via_pci_tbl[] = {
+ { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
+ { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
+ { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
+ { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
+ { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci,via_pci_tbl);
+
+
+static struct pci_driver via_driver = {
+ .name = VIA_MODULE_NAME,
+ .id_table = via_pci_tbl,
+ .probe = via_init_one,
+ .remove = via_remove_one,
+};
+
+
+/*
+ * Function via_ircc_init ()
+ *
+ * Initialize chip. Just find out chip type and resource.
+ */
+static int __init via_ircc_init(void)
+{
+ int rc;
+
+ rc = pci_register_driver(&via_driver);
+ if (rc < 0) {
+ pr_debug("%s(): error rc = %d, returning -ENODEV...\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ int rc;
+ u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
+ u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
+ chipio_t info;
+
+ pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device);
+
+ rc = pci_enable_device (pcidev);
+ if (rc) {
+ pr_debug("%s(): error rc = %d\n", __func__, rc);
+ return -ENODEV;
+ }
+
+ // South Bridge exist
+ if ( ReadLPCReg(0x20) != 0x3C )
+ Chipset=0x3096;
+ else
+ Chipset=0x3076;
+
+ if (Chipset==0x3076) {
+ pr_debug("%s(): Chipset = 3076\n", __func__);
+
+ WriteLPCReg(7,0x0c );
+ temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
+ if((temp&0x01)==1) { // BIOS close or no FIR
+ WriteLPCReg(0x1d, 0x82 );
+ WriteLPCReg(0x23,0x18);
+ temp=ReadLPCReg(0xF0);
+ if((temp&0x01)==0) {
+ temp=(ReadLPCReg(0x74)&0x03); //DMA
+ FirDRQ0=temp + 4;
+ temp=(ReadLPCReg(0x74)&0x0C) >> 2;
+ FirDRQ1=temp + 4;
+ } else {
+ temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
+ FirDRQ0=temp + 4;
+ FirDRQ1=FirDRQ0;
+ }
+ FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
+ FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
+ FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
+ FirIOBase=FirIOBase ;
+ info.fir_base=FirIOBase;
+ info.irq=FirIRQ;
+ info.dma=FirDRQ1;
+ info.dma2=FirDRQ0;
+ pci_read_config_byte(pcidev,0x40,&bTmp);
+ pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
+ pci_read_config_byte(pcidev,0x42,&bTmp);
+ pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
+ pci_write_config_byte(pcidev,0x5a,0xc0);
+ WriteLPCReg(0x28, 0x70 );
+ rc = via_ircc_open(pcidev, &info, 0x3076);
+ } else
+ rc = -ENODEV; //IR not turn on
+ } else { //Not VT1211
+ pr_debug("%s(): Chipset = 3096\n", __func__);
+
+ pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
+ if((bTmp&0x01)==1) { // BIOS enable FIR
+ //Enable Double DMA clock
+ pci_read_config_byte(pcidev,0x42,&oldPCI_40);
+ pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
+ pci_read_config_byte(pcidev,0x40,&oldPCI_40);
+ pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
+ pci_read_config_byte(pcidev,0x44,&oldPCI_44);
+ pci_write_config_byte(pcidev,0x44,0x4e);
+ //---------- read configuration from Function0 of south bridge
+ if((bTmp&0x02)==0) {
+ pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
+ FirDRQ0 = (bTmp1 & 0x30) >> 4;
+ pci_read_config_byte(pcidev,0x44,&bTmp1);
+ FirDRQ1 = (bTmp1 & 0xc0) >> 6;
+ } else {
+ pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
+ FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
+ FirDRQ1=0;
+ }
+ pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
+ FirIRQ = bTmp1 & 0x0f;
+
+ pci_read_config_byte(pcidev,0x69,&bTmp);
+ FirIOBase = bTmp << 8;//hight byte
+ pci_read_config_byte(pcidev,0x68,&bTmp);
+ FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
+ //-------------------------
+ info.fir_base=FirIOBase;
+ info.irq=FirIRQ;
+ info.dma=FirDRQ1;
+ info.dma2=FirDRQ0;
+ rc = via_ircc_open(pcidev, &info, 0x3096);
+ } else
+ rc = -ENODEV; //IR not turn on !!!!!
+ }//Not VT1211
+
+ pr_debug("%s(): End - rc = %d\n", __func__, rc);
+ return rc;
+}
+
+static void __exit via_ircc_cleanup(void)
+{
+ /* Cleanup all instances of the driver */
+ pci_unregister_driver (&via_driver);
+}
+
+static const struct net_device_ops via_ircc_sir_ops = {
+ .ndo_start_xmit = via_ircc_hard_xmit_sir,
+ .ndo_open = via_ircc_net_open,
+ .ndo_stop = via_ircc_net_close,
+ .ndo_do_ioctl = via_ircc_net_ioctl,
+};
+static const struct net_device_ops via_ircc_fir_ops = {
+ .ndo_start_xmit = via_ircc_hard_xmit_fir,
+ .ndo_open = via_ircc_net_open,
+ .ndo_stop = via_ircc_net_close,
+ .ndo_do_ioctl = via_ircc_net_ioctl,
+};
+
+/*
+ * Function via_ircc_open(pdev, iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
+{
+ struct net_device *dev;
+ struct via_ircc_cb *self;
+ int err;
+
+ /* Allocate new instance of the driver */
+ dev = alloc_irdadev(sizeof(struct via_ircc_cb));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ pci_set_drvdata(pdev, self);
+
+ /* Initialize Resource */
+ self->io.cfg_base = info->cfg_base;
+ self->io.fir_base = info->fir_base;
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.dma2 = info->dma2;
+ self->io.fifo_size = 32;
+ self->chip_id = id;
+ self->st_fifo.len = 0;
+ self->RxDataReady = 0;
+
+ /* Reserve the ioports that we need */
+ if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
+ pr_debug("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
+ err = -ENODEV;
+ goto err_out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* Check if user has supplied the dongle id or not */
+ if (!dongle_id)
+ dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
+ self->io.dongle_id = dongle_id;
+
+ /* The only value we must override it the baudrate */
+ /* Maximum speeds and capabilities are dongle-dependent. */
+ switch( self->io.dongle_id ){
+ case 0x0d:
+ self->qos.baud_rate.bits =
+ IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
+ IR_576000 | IR_1152000 | (IR_4000000 << 8);
+ break;
+ default:
+ self->qos.baud_rate.bits =
+ IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
+ break;
+ }
+
+ /* Following was used for testing:
+ *
+ * self->qos.baud_rate.bits = IR_9600;
+ *
+ * Is is no good, as it prohibits (error-prone) speed-changes.
+ */
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384 + 2048;
+ self->tx_buff.truesize = 14384 + 2048;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ dev->netdev_ops = &via_ircc_sir_ops;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out4;
+
+ net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n",
+ dev->name);
+
+ /* Initialise the hardware..
+ */
+ self->io.speed = 9600;
+ via_hw_init(self);
+ return 0;
+ err_out4:
+ dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ err_out1:
+ free_netdev(dev);
+ return err;
+}
+
+/*
+ * Function via_remove_one(pdev)
+ *
+ * Close driver instance
+ *
+ */
+static void via_remove_one(struct pci_dev *pdev)
+{
+ struct via_ircc_cb *self = pci_get_drvdata(pdev);
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ ResetChip(iobase, 5); //hardware reset.
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+ if (self->tx_buff.head)
+ dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ if (self->rx_buff.head)
+ dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ pci_disable_device(pdev);
+}
+
+/*
+ * Function via_hw_init(self)
+ *
+ * Returns non-negative on success.
+ *
+ * Formerly via_ircc_setup
+ */
+static void via_hw_init(struct via_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
+ // FIFO Init
+ EnRXFIFOReadyInt(iobase, OFF);
+ EnRXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOUnderrunEOMInt(iobase, ON);
+ EnTXFIFOReadyInt(iobase, OFF);
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+
+ if (ReadLPCReg(0x20) == 0x3c)
+ WriteLPCReg(0xF0, 0); // for VT1211
+ /* Int Init */
+ EnRXSpecInt(iobase, ON);
+
+ /* The following is basically hwreset */
+ /* If this is the case, why not just call hwreset() ? Jean II */
+ ResetChip(iobase, 5);
+ EnableDMA(iobase, OFF);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ EnTXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ TXStart(iobase, OFF);
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+ SetBaudRate(iobase, 9600);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+
+ self->io.speed = 9600;
+ self->st_fifo.len = 0;
+
+ via_ircc_change_dongle_speed(iobase, self->io.speed,
+ self->io.dongle_id);
+
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+}
+
+/*
+ * Function via_ircc_read_dongle_id (void)
+ *
+ */
+static int via_ircc_read_dongle_id(int iobase)
+{
+ net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n");
+ return 9; /* Default to IBM */
+}
+
+/*
+ * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
+ * Change speed of the attach dongle
+ * only implement two type of dongle currently.
+ */
+static void via_ircc_change_dongle_speed(int iobase, int speed,
+ int dongle_id)
+{
+ u8 mode = 0;
+
+ /* speed is unused, as we use IsSIROn()/IsMIROn() */
+ speed = speed;
+
+ pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n",
+ __func__, speed, iobase, dongle_id);
+
+ switch (dongle_id) {
+
+ /* Note: The dongle_id's listed here are derived from
+ * nsc-ircc.c */
+
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ UseOneRX(iobase, ON); // use one RX pin RX1,RX2
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+
+ EnRX2(iobase, ON); //sir to rx2
+ EnGPIOtoRX2(iobase, OFF);
+
+ if (IsSIROn(iobase)) { //sir
+ // Mode select Off
+ SlowIRRXLowActive(iobase, ON);
+ udelay(1000);
+ SlowIRRXLowActive(iobase, OFF);
+ } else {
+ if (IsMIROn(iobase)) { //mir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ } else { // fir
+ if (IsFIROn(iobase)) { //fir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ }
+ }
+ }
+ break;
+
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ UseOneRX(iobase, ON); //use ONE RX....RX1
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF); // invert RX pin
+
+ EnRX2(iobase, ON);
+ EnGPIOtoRX2(iobase, OFF);
+ if (IsSIROn(iobase)) { //sir
+ // Mode select On
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, OFF);
+ }
+ if (IsMIROn(iobase)) { //mir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, ON);
+ } else { // fir
+ if (IsFIROn(iobase)) { //fir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ // TX On
+ WriteTX(iobase, ON);
+ udelay(20);
+ // Mode select OFF
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // TX Off
+ WriteTX(iobase, OFF);
+ }
+ }
+ break;
+
+ case 0x0d:
+ UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+ SlowIRRXLowActive(iobase, OFF);
+ if (IsSIROn(iobase)) { //sir
+ EnGPIOtoRX2(iobase, OFF);
+ WriteGIO(iobase, OFF);
+ EnRX2(iobase, OFF); //sir to rx2
+ } else { // fir mir
+ EnGPIOtoRX2(iobase, OFF);
+ WriteGIO(iobase, OFF);
+ EnRX2(iobase, OFF); //fir to rx
+ }
+ break;
+
+ case 0x11: /* Temic TFDS4500 */
+
+ pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n",
+ __func__);
+
+ UseOneRX(iobase, ON); //use ONE RX....RX1
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, ON); // invert RX pin
+
+ EnRX2(iobase, ON); //sir to rx2
+ EnGPIOtoRX2(iobase, OFF);
+
+ if( IsSIROn(iobase) ){ //sir
+
+ // Mode select On
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, OFF);
+
+ } else{
+ pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n",
+ __func__);
+ }
+ break;
+
+ case 0x0ff: /* Vishay */
+ if (IsSIROn(iobase))
+ mode = 0;
+ else if (IsMIROn(iobase))
+ mode = 1;
+ else if (IsFIROn(iobase))
+ mode = 2;
+ else if (IsVFIROn(iobase))
+ mode = 5; //VFIR-16
+ SI_SetMode(iobase, mode);
+ break;
+
+ default:
+ net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n",
+ __func__, dongle_id);
+ }
+}
+
+/*
+ * Function via_ircc_change_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
+{
+ struct net_device *dev = self->netdev;
+ u16 iobase;
+ u8 value = 0, bTmp;
+
+ iobase = self->io.fir_base;
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+ pr_debug("%s: change_speed to %d bps.\n", __func__, speed);
+
+ WriteReg(iobase, I_ST_CT_0, 0x0);
+
+ /* Controller mode sellection */
+ switch (speed) {
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ value = (115200/speed)-1;
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ break;
+ case 576000:
+ /* FIXME: this can't be right, as it's the same as 115200,
+ * and 576000 is MIR, not SIR. */
+ value = 0;
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ break;
+ case 1152000:
+ value = 0;
+ SetMIR(iobase, ON);
+ /* FIXME: CRC ??? */
+ break;
+ case 4000000:
+ value = 0;
+ SetFIR(iobase, ON);
+ SetPulseWidth(iobase, 0);
+ SetSendPreambleCount(iobase, 14);
+ CRC16(iobase, OFF);
+ EnTXCRC(iobase, ON);
+ break;
+ case 16000000:
+ value = 0;
+ SetVFIR(iobase, ON);
+ /* FIXME: CRC ??? */
+ break;
+ default:
+ value = 0;
+ break;
+ }
+
+ /* Set baudrate to 0x19[2..7] */
+ bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
+ bTmp |= value << 2;
+ WriteReg(iobase, I_CF_H_1, bTmp);
+
+ /* Some dongles may need to be informed about speed changes. */
+ via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
+
+ /* Set FIFO size to 64 */
+ SetFIFO(iobase, 64);
+
+ /* Enable IR */
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ // EnTXFIFOHalfLevelInt(iobase,ON);
+
+ /* Enable some interrupts so we can receive frames */
+ //EnAllInt(iobase,ON);
+
+ if (IsSIROn(iobase)) {
+ SIRFilter(iobase, ON);
+ SIRRecvAny(iobase, ON);
+ } else {
+ SIRFilter(iobase, OFF);
+ SIRRecvAny(iobase, OFF);
+ }
+
+ if (speed > 115200) {
+ /* Install FIR xmit handler */
+ dev->netdev_ops = &via_ircc_fir_ops;
+ via_ircc_dma_receive(self);
+ } else {
+ /* Install SIR xmit handler */
+ dev->netdev_ops = &via_ircc_sir_ops;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function via_ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ unsigned long flags;
+ u16 iobase;
+ __u32 speed;
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ via_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len =
+ async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ dev->stats.tx_bytes += self->tx_buff.len;
+ /* Send this frame with old speed */
+ SetBaudRate(iobase, self->io.speed);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ EnableTX(iobase, ON);
+ EnableRX(iobase, OFF);
+
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, ON);
+ EnRXDMA(iobase, OFF);
+
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_TX_MODE);
+
+ SetSendByte(iobase, self->tx_buff.len);
+ RXStart(iobase, OFF);
+ TXStart(iobase, ON);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ u16 iobase;
+ __u32 speed;
+ unsigned long flags;
+
+ self = netdev_priv(dev);
+ iobase = self->io.fir_base;
+
+ if (self->st_fifo.len)
+ return NETDEV_TX_OK;
+ if (self->chip_id == 0x3076)
+ iodelay(1500);
+ else
+ udelay(1500);
+ netif_stop_queue(dev);
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ if (!skb->len) {
+ via_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+
+ self->tx_fifo.tail += skb->len;
+ dev->stats.tx_bytes += skb->len;
+ skb_copy_from_linear_data(skb,
+ self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+//F01 if (self->tx_fifo.len == 1) {
+ via_ircc_dma_xmit(self, iobase);
+//F01 }
+//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
+ netif_trans_update(dev);
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return NETDEV_TX_OK;
+
+}
+
+static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
+{
+ EnTXDMA(iobase, OFF);
+ self->io.direction = IO_XMIT;
+ EnPhys(iobase, ON);
+ EnableTX(iobase, ON);
+ EnableRX(iobase, OFF);
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, ON);
+ EnRXDMA(iobase, OFF);
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
+ pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
+ __func__, self->tx_fifo.ptr,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ self->tx_fifo.len);
+
+ SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
+ RXStart(iobase, OFF);
+ TXStart(iobase, ON);
+ return 0;
+
+}
+
+/*
+ * Function via_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
+{
+ int iobase;
+ u8 Tx_status;
+
+ iobase = self->io.fir_base;
+ /* Disable DMA */
+// DisableDmaChannel(self->io.dma);
+ /* Check for underrun! */
+ /* Clear bit, by writing 1 into it */
+ Tx_status = GetTXStatus(iobase);
+ if (Tx_status & 0x08) {
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+ hwreset(self);
+ /* how to clear underrun? */
+ } else {
+ self->netdev->stats.tx_packets++;
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+ }
+ /* Check if we need to change the speed */
+ if (self->new_speed) {
+ via_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ if (IsFIROn(iobase)) {
+ if (self->tx_fifo.len) {
+ self->tx_fifo.len--;
+ self->tx_fifo.ptr++;
+ }
+ }
+ pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
+ __func__,
+ self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
+/* F01_S
+ // Any frames to be sent back-to-back?
+ if (self->tx_fifo.len) {
+ // Not finished yet!
+ via_ircc_dma_xmit(self, iobase);
+ ret = FALSE;
+ } else {
+F01_E*/
+ // Reset Tx FIFO info
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+//F01 }
+
+ // Make sure we have room for more frames
+//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
+ // Not busy transmitting anymore
+ // Tell the network layer, that we can accept more frames
+ netif_wake_queue(self->netdev);
+//F01 }
+ return TRUE;
+}
+
+/*
+ * Function via_ircc_dma_receive (self)
+ *
+ * Set configuration for receive a frame.
+ *
+ */
+static int via_ircc_dma_receive(struct via_ircc_cb *self)
+{
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ self->RxDataReady = 0;
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ EnPhys(iobase, ON);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, ON);
+
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, OFF);
+ EnRXDMA(iobase, ON);
+ irda_setup_dma(self->io.dma2, self->rx_buff_dma,
+ self->rx_buff.truesize, DMA_RX_MODE);
+ TXStart(iobase, OFF);
+ RXStart(iobase, ON);
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_dma_receive_complete (self)
+ *
+ * Controller Finished with receiving frames,
+ * and this routine is call by ISR
+ *
+ */
+static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
+ int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ int len, i;
+ u8 status = 0;
+
+ iobase = self->io.fir_base;
+ st_fifo = &self->st_fifo;
+
+ if (self->io.speed < 4000000) { //Speed below FIR
+ len = GetRecvByte(iobase, self);
+ skb = dev_alloc_skb(len + 1);
+ if (skb == NULL)
+ return FALSE;
+ // Make sure IP header gets aligned
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 2);
+ if (self->chip_id == 0x3076) {
+ for (i = 0; i < len - 2; i++)
+ skb->data[i] = self->rx_buff.data[i * 2];
+ } else {
+ if (self->chip_id == 0x3096) {
+ for (i = 0; i < len - 2; i++)
+ skb->data[i] =
+ self->rx_buff.data[i];
+ }
+ }
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ return TRUE;
+ }
+
+ else { //FIR mode
+ len = GetRecvByte(iobase, self);
+ if (len == 0)
+ return TRUE; //interrupt only, data maybe move by RxT
+ if (((len - 4) < 2) || ((len - 4) > 2048)) {
+ pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n",
+ __func__, len, RxCurCount(iobase, self),
+ self->RxLastCount);
+ hwreset(self);
+ return FALSE;
+ }
+ pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
+ __func__,
+ st_fifo->len, len - 4, RxCurCount(iobase, self));
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ if (st_fifo->tail > MAX_RX_WINDOW)
+ st_fifo->tail = 0;
+ self->RxDataReady = 0;
+
+ // It maybe have MAX_RX_WINDOW package receive by
+ // receive_complete before Timer IRQ
+/* F01_S
+ if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
+ RXStart(iobase,ON);
+ SetTimer(iobase,4);
+ }
+ else {
+F01_E */
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+//F01_S
+ // Put this entry back in fifo
+ if (st_fifo->head > MAX_RX_WINDOW)
+ st_fifo->head = 0;
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ skb = dev_alloc_skb(len + 1 - 4);
+ /*
+ * if frame size, data ptr, or skb ptr are wrong, then get next
+ * entry.
+ */
+ if ((skb == NULL) || (skb->data == NULL) ||
+ (self->rx_buff.data == NULL) || (len < 6)) {
+ self->netdev->stats.rx_dropped++;
+ kfree_skb(skb);
+ return TRUE;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4);
+
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
+ pr_debug("%s(): len=%x.rx_buff=%p\n", __func__,
+ len - 4, self->rx_buff.data);
+
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+
+//F01_E
+ } //FIR
+ return TRUE;
+
+}
+
+/*
+ * if frame is received , but no INT ,then use this routine to upload frame.
+ */
+static int upload_rxdata(struct via_ircc_cb *self, int iobase)
+{
+ struct sk_buff *skb;
+ int len;
+ struct st_fifo *st_fifo;
+ st_fifo = &self->st_fifo;
+
+ len = GetRecvByte(iobase, self);
+
+ pr_debug("%s(): len=%x\n", __func__, len);
+
+ if ((len - 4) < 2) {
+ self->netdev->stats.rx_dropped++;
+ return FALSE;
+ }
+
+ skb = dev_alloc_skb(len + 1);
+ if (skb == NULL) {
+ self->netdev->stats.rx_dropped++;
+ return FALSE;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4 + 1);
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
+ st_fifo->tail++;
+ st_fifo->len++;
+ if (st_fifo->tail > MAX_RX_WINDOW)
+ st_fifo->tail = 0;
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
+ RXStart(iobase, ON);
+ } else {
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ }
+ return TRUE;
+}
+
+/*
+ * Implement back to back receive , use this routine to upload data.
+ */
+
+static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ int len;
+ u8 status;
+
+ st_fifo = &self->st_fifo;
+
+ if (CkRxRecv(iobase, self)) {
+ // if still receiving ,then return ,don't upload frame
+ self->RetryCount = 0;
+ SetTimer(iobase, 20);
+ self->RxDataReady++;
+ return FALSE;
+ } else
+ self->RetryCount++;
+
+ if ((self->RetryCount >= 1) ||
+ ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
+ (st_fifo->len >= (MAX_RX_WINDOW))) {
+ while (st_fifo->len > 0) { //upload frame
+ // Put this entry back in fifo
+ if (st_fifo->head > MAX_RX_WINDOW)
+ st_fifo->head = 0;
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ skb = dev_alloc_skb(len + 1 - 4);
+ /*
+ * if frame size, data ptr, or skb ptr are wrong,
+ * then get next entry.
+ */
+ if ((skb == NULL) || (skb->data == NULL) ||
+ (self->rx_buff.data == NULL) || (len < 6)) {
+ self->netdev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4);
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
+
+ pr_debug("%s(): len=%x.head=%x\n", __func__,
+ len - 4, st_fifo->head);
+
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ } //while
+ self->RetryCount = 0;
+
+ pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n",
+ __func__, GetHostStatus(iobase), GetRXStatus(iobase));
+
+ /*
+ * if frame is receive complete at this routine ,then upload
+ * frame.
+ */
+ if ((GetRXStatus(iobase) & 0x10) &&
+ (RxCurCount(iobase, self) != self->RxLastCount)) {
+ upload_rxdata(self, iobase);
+ if (irda_device_txqueue_empty(self->netdev))
+ via_ircc_dma_receive(self);
+ }
+ } // timer detect complete
+ else
+ SetTimer(iobase, 4);
+ return TRUE;
+
+}
+
+
+
+/*
+ * Function via_ircc_interrupt (irq, dev_id)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct via_ircc_cb *self = netdev_priv(dev);
+ int iobase;
+ u8 iHostIntType, iRxIntType, iTxIntType;
+
+ iobase = self->io.fir_base;
+ spin_lock(&self->lock);
+ iHostIntType = GetHostStatus(iobase);
+
+ pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n",
+ __func__, iHostIntType,
+ (iHostIntType & 0x40) ? "Timer" : "",
+ (iHostIntType & 0x20) ? "Tx" : "",
+ (iHostIntType & 0x10) ? "Rx" : "",
+ (iHostIntType & 0x0e) >> 1);
+
+ if ((iHostIntType & 0x40) != 0) { //Timer Event
+ self->EventFlag.TimeOut++;
+ ClearTimerInt(iobase, 1);
+ if (self->io.direction == IO_XMIT) {
+ via_ircc_dma_xmit(self, iobase);
+ }
+ if (self->io.direction == IO_RECV) {
+ /*
+ * frame ready hold too long, must reset.
+ */
+ if (self->RxDataReady > 30) {
+ hwreset(self);
+ if (irda_device_txqueue_empty(self->netdev)) {
+ via_ircc_dma_receive(self);
+ }
+ } else { // call this to upload frame.
+ RxTimerHandler(self, iobase);
+ }
+ } //RECV
+ } //Timer Event
+ if ((iHostIntType & 0x20) != 0) { //Tx Event
+ iTxIntType = GetTXStatus(iobase);
+
+ pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n",
+ __func__, iTxIntType,
+ (iTxIntType & 0x08) ? "FIFO underr." : "",
+ (iTxIntType & 0x04) ? "EOM" : "",
+ (iTxIntType & 0x02) ? "FIFO ready" : "",
+ (iTxIntType & 0x01) ? "Early EOM" : "");
+
+ if (iTxIntType & 0x4) {
+ self->EventFlag.EOMessage++; // read and will auto clean
+ if (via_ircc_dma_xmit_complete(self)) {
+ if (irda_device_txqueue_empty
+ (self->netdev)) {
+ via_ircc_dma_receive(self);
+ }
+ } else {
+ self->EventFlag.Unknown++;
+ }
+ } //EOP
+ } //Tx Event
+ //----------------------------------------
+ if ((iHostIntType & 0x10) != 0) { //Rx Event
+ /* Check if DMA has finished */
+ iRxIntType = GetRXStatus(iobase);
+
+ pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
+ __func__, iRxIntType,
+ (iRxIntType & 0x80) ? "PHY err." : "",
+ (iRxIntType & 0x40) ? "CRC err" : "",
+ (iRxIntType & 0x20) ? "FIFO overr." : "",
+ (iRxIntType & 0x10) ? "EOF" : "",
+ (iRxIntType & 0x08) ? "RxData" : "",
+ (iRxIntType & 0x02) ? "RxMaxLen" : "",
+ (iRxIntType & 0x01) ? "SIR bad" : "");
+ if (!iRxIntType)
+ pr_debug("%s(): RxIRQ =0\n", __func__);
+
+ if (iRxIntType & 0x10) {
+ if (via_ircc_dma_receive_complete(self, iobase)) {
+//F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
+ via_ircc_dma_receive(self);
+ }
+ } // No ERR
+ else { //ERR
+ pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
+ __func__, iRxIntType, iHostIntType,
+ RxCurCount(iobase, self), self->RxLastCount);
+
+ if (iRxIntType & 0x20) { //FIFO OverRun ERR
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ } else { //PHY,CRC ERR
+
+ if (iRxIntType != 0x08)
+ hwreset(self); //F01
+ }
+ via_ircc_dma_receive(self);
+ } //ERR
+
+ } //Rx Event
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(iHostIntType);
+}
+
+static void hwreset(struct via_ircc_cb *self)
+{
+ int iobase;
+ iobase = self->io.fir_base;
+
+ ResetChip(iobase, 5);
+ EnableDMA(iobase, OFF);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ EnTXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ TXStart(iobase, OFF);
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+ SetBaudRate(iobase, 9600);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ /* Restore speed. */
+ via_ircc_change_speed(self, self->io.speed);
+
+ self->st_fifo.len = 0;
+}
+
+/*
+ * Function via_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int via_ircc_is_receiving(struct via_ircc_cb *self)
+{
+ int status = FALSE;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ iobase = self->io.fir_base;
+ if (CkRxRecv(iobase, self))
+ status = TRUE;
+
+ pr_debug("%s(): status=%x....\n", __func__, status);
+
+ return status;
+}
+
+
+/*
+ * Function via_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int via_ircc_net_open(struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ dev->stats.rx_packets = 0;
+ IRDA_ASSERT(self != NULL, return 0;);
+ iobase = self->io.fir_base;
+ if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+ if (self->io.dma2 != self->io.dma) {
+ if (request_dma(self->io.dma2, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma2=%d\n",
+ driver_name, self->io.dma2);
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+ return -EAGAIN;
+ }
+ }
+
+
+ /* turn on interrupts */
+ EnAllInt(iobase, ON);
+ EnInternalLoop(iobase, OFF);
+ EnExternalLoop(iobase, OFF);
+
+ /* */
+ via_ircc_dma_receive(self);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ sprintf(hwname, "VIA @ 0x%x", iobase);
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ self->RxLastCount = 0;
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int via_ircc_net_close(struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ int iobase;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ iobase = self->io.fir_base;
+ EnTXDMA(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ DisableDmaChannel(self->io.dma);
+
+ /* Disable interrupts */
+ EnAllInt(iobase, OFF);
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+ if (self->io.dma2 != self->io.dma)
+ free_dma(self->io.dma2);
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct via_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return -1;);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
+ cmd);
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->lock, flags);
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ via_ircc_change_speed(self, irq->ifr_baudrate);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = via_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ out:
+ spin_unlock_irqrestore(&self->lock, flags);
+ return ret;
+}
+
+MODULE_AUTHOR("VIA Technologies,inc");
+MODULE_DESCRIPTION("VIA IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+module_init(via_ircc_init);
+module_exit(via_ircc_cleanup);
diff --git a/drivers/staging/irda/drivers/via-ircc.h b/drivers/staging/irda/drivers/via-ircc.h
new file mode 100644
index 000000000000..ac1525573398
--- /dev/null
+++ b/drivers/staging/irda/drivers/via-ircc.h
@@ -0,0 +1,846 @@
+/*********************************************************************
+ *
+ * Filename: via-ircc.h
+ * Version: 1.0
+ * Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
+ * Author: VIA Technologies, inc
+ * Date : 08/06/2003
+
+Copyright (c) 1998-2003 VIA Technologies, Inc.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, see <http://www.gnu.org/licenses/>.
+
+ * Comment:
+ * jul/08/2002 : Rx buffer length should use Rx ring ptr.
+ * Oct/28/2002 : Add SB id for 3147 and 3177.
+ * jul/09/2002 : only implement two kind of dongle currently.
+ * Oct/02/2002 : work on VT8231 and VT8233 .
+ * Aug/06/2003 : change driver format to pci driver .
+ ********************************************************************/
+#ifndef via_IRCC_H
+#define via_IRCC_H
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW + 2];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW + 2]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Length of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+
+struct eventflag // for keeping track of Interrupt Events
+{
+ //--------tx part
+ unsigned char TxFIFOUnderRun;
+ unsigned char EOMessage;
+ unsigned char TxFIFOReady;
+ unsigned char EarlyEOM;
+ //--------rx part
+ unsigned char PHYErr;
+ unsigned char CRCErr;
+ unsigned char RxFIFOOverRun;
+ unsigned char EOPacket;
+ unsigned char RxAvail;
+ unsigned char TooLargePacket;
+ unsigned char SIRBad;
+ //--------unknown
+ unsigned char Unknown;
+ //----------
+ unsigned char TimeOut;
+ unsigned char RxDMATC;
+ unsigned char TxDMATC;
+};
+
+/* Private data for each instance */
+struct via_ircc_cb {
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 flags; /* Interface flags */
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ struct eventflag EventFlag;
+ unsigned int chip_id; /* to remember chip id */
+ unsigned int RetryCount;
+ unsigned int RxDataReady;
+ unsigned int RxLastCount;
+};
+
+
+//---------I=Infrared, H=Host, M=Misc, T=Tx, R=Rx, ST=Status,
+// CF=Config, CT=Control, L=Low, H=High, C=Count
+#define I_CF_L_0 0x10
+#define I_CF_H_0 0x11
+#define I_SIR_BOF 0x12
+#define I_SIR_EOF 0x13
+#define I_ST_CT_0 0x15
+#define I_ST_L_1 0x16
+#define I_ST_H_1 0x17
+#define I_CF_L_1 0x18
+#define I_CF_H_1 0x19
+#define I_CF_L_2 0x1a
+#define I_CF_H_2 0x1b
+#define I_CF_3 0x1e
+#define H_CT 0x20
+#define H_ST 0x21
+#define M_CT 0x22
+#define TX_CT_1 0x23
+#define TX_CT_2 0x24
+#define TX_ST 0x25
+#define RX_CT 0x26
+#define RX_ST 0x27
+#define RESET 0x28
+#define P_ADDR 0x29
+#define RX_C_L 0x2a
+#define RX_C_H 0x2b
+#define RX_P_L 0x2c
+#define RX_P_H 0x2d
+#define TX_C_L 0x2e
+#define TX_C_H 0x2f
+#define TIMER 0x32
+#define I_CF_4 0x33
+#define I_T_C_L 0x34
+#define I_T_C_H 0x35
+#define VERSION 0x3f
+//-------------------------------
+#define StartAddr 0x10 // the first register address
+#define EndAddr 0x3f // the last register address
+#define GetBit(val,bit) val = (unsigned char) ((val>>bit) & 0x1)
+ // Returns the bit
+#define SetBit(val,bit) val= (unsigned char ) (val | (0x1 << bit))
+ // Sets bit to 1
+#define ResetBit(val,bit) val= (unsigned char ) (val & ~(0x1 << bit))
+ // Sets bit to 0
+
+#define OFF 0
+#define ON 1
+#define DMA_TX_MODE 0x08
+#define DMA_RX_MODE 0x04
+
+#define DMA1 0
+#define DMA2 0xc0
+#define MASK1 DMA1+0x0a
+#define MASK2 DMA2+0x14
+
+#define Clk_bit 0x40
+#define Tx_bit 0x01
+#define Rd_Valid 0x08
+#define RxBit 0x08
+
+static void DisableDmaChannel(unsigned int channel)
+{
+ switch (channel) { // 8 Bit DMA channels DMAC1
+ case 0:
+ outb(4, MASK1); //mask channel 0
+ break;
+ case 1:
+ outb(5, MASK1); //Mask channel 1
+ break;
+ case 2:
+ outb(6, MASK1); //Mask channel 2
+ break;
+ case 3:
+ outb(7, MASK1); //Mask channel 3
+ break;
+ case 5:
+ outb(5, MASK2); //Mask channel 5
+ break;
+ case 6:
+ outb(6, MASK2); //Mask channel 6
+ break;
+ case 7:
+ outb(7, MASK2); //Mask channel 7
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned char ReadLPCReg(int iRegNum)
+{
+ unsigned char iVal;
+
+ outb(0x87, 0x2e);
+ outb(0x87, 0x2e);
+ outb(iRegNum, 0x2e);
+ iVal = inb(0x2f);
+ outb(0xaa, 0x2e);
+
+ return iVal;
+}
+
+static void WriteLPCReg(int iRegNum, unsigned char iVal)
+{
+
+ outb(0x87, 0x2e);
+ outb(0x87, 0x2e);
+ outb(iRegNum, 0x2e);
+ outb(iVal, 0x2f);
+ outb(0xAA, 0x2e);
+}
+
+static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
+{
+ return (__u8) inb(BaseAddr + iRegNum);
+}
+
+static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
+{
+ outb(iVal, BaseAddr + iRegNum);
+}
+
+static int WriteRegBit(unsigned int BaseAddr, unsigned char RegNum,
+ unsigned char BitPos, unsigned char value)
+{
+ __u8 Rtemp, Wtemp;
+
+ if (BitPos > 7) {
+ return -1;
+ }
+ if ((RegNum < StartAddr) || (RegNum > EndAddr))
+ return -1;
+ Rtemp = ReadReg(BaseAddr, RegNum);
+ if (value == 0)
+ Wtemp = ResetBit(Rtemp, BitPos);
+ else {
+ if (value == 1)
+ Wtemp = SetBit(Rtemp, BitPos);
+ else
+ return -1;
+ }
+ WriteReg(BaseAddr, RegNum, Wtemp);
+ return 0;
+}
+
+static __u8 CheckRegBit(unsigned int BaseAddr, unsigned char RegNum,
+ unsigned char BitPos)
+{
+ __u8 temp;
+
+ if (BitPos > 7)
+ return 0xff;
+ if ((RegNum < StartAddr) || (RegNum > EndAddr)) {
+// printf("what is the register %x!\n",RegNum);
+ }
+ temp = ReadReg(BaseAddr, RegNum);
+ return GetBit(temp, BitPos);
+}
+
+static void SetMaxRxPacketSize(__u16 iobase, __u16 size)
+{
+ __u16 low, high;
+ if ((size & 0xe000) == 0) {
+ low = size & 0x00ff;
+ high = (size & 0x1f00) >> 8;
+ WriteReg(iobase, I_CF_L_2, low);
+ WriteReg(iobase, I_CF_H_2, high);
+
+ }
+
+}
+
+//for both Rx and Tx
+
+static void SetFIFO(__u16 iobase, __u16 value)
+{
+ switch (value) {
+ case 128:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 1);
+ break;
+ case 64:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ break;
+ case 32:
+ WriteRegBit(iobase, 0x11, 0, 1);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ break;
+ default:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ }
+
+}
+
+#define CRC16(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,7,val) //0 for 32 CRC
+/*
+#define SetVFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,5,val)
+#define SetFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,6,val)
+#define SetMIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,5,val)
+#define SetSIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,4,val)
+*/
+#define SIRFilter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,3,val)
+#define Filter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,2,val)
+#define InvertTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,1,val)
+#define InvertRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,0,val)
+//****************************I_CF_H_0
+#define EnableTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,4,val)
+#define EnableRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,3,val)
+#define EnableDMA(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,2,val)
+#define SIRRecvAny(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,1,val)
+#define DiableTrans(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,0,val)
+//***************************I_SIR_BOF,I_SIR_EOF
+#define SetSIRBOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_BOF,val)
+#define SetSIREOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_EOF,val)
+#define GetSIRBOF(BaseAddr) ReadReg(BaseAddr,I_SIR_BOF)
+#define GetSIREOF(BaseAddr) ReadReg(BaseAddr,I_SIR_EOF)
+//*******************I_ST_CT_0
+#define EnPhys(BaseAddr,val) WriteRegBit(BaseAddr,I_ST_CT_0,7,val)
+#define IsModeError(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,6) //RO
+#define IsVFIROn(BaseAddr) CheckRegBit(BaseAddr,0x14,0) //RO for VT1211 only
+#define IsFIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,5) //RO
+#define IsMIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,4) //RO
+#define IsSIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,3) //RO
+#define IsEnableTX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,2) //RO
+#define IsEnableRX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,1) //RO
+#define Is16CRC(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,0) //RO
+//***************************I_CF_3
+#define DisableAdjacentPulseWidth(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,5,val) //1 disable
+#define DisablePulseWidthAdjust(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,4,val) //1 disable
+#define UseOneRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,1,val) //0 use two RX
+#define SlowIRRXLowActive(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,0,val) //0 show RX high=1 in SIR
+//***************************H_CT
+#define EnAllInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,7,val)
+#define TXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,6,val)
+#define RXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,5,val)
+#define ClearRXInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,4,val) // 1 clear
+//*****************H_ST
+#define IsRXInt(BaseAddr) CheckRegBit(BaseAddr,H_ST,4)
+#define GetIntIndentify(BaseAddr) ((ReadReg(BaseAddr,H_ST)&0xf1) >>1)
+#define IsHostBusy(BaseAddr) CheckRegBit(BaseAddr,H_ST,0)
+#define GetHostStatus(BaseAddr) ReadReg(BaseAddr,H_ST) //RO
+//**************************M_CT
+#define EnTXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,7,val)
+#define EnRXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,6,val)
+#define SwapDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,5,val)
+#define EnInternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,4,val)
+#define EnExternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,3,val)
+//**************************TX_CT_1
+#define EnTXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,4,val) //half empty int (1 half)
+#define EnTXFIFOUnderrunEOMInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,5,val)
+#define EnTXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,6,val) //int when reach it threshold (setting by bit 4)
+//**************************TX_CT_2
+#define ForceUnderrun(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,7,val) // force an underrun int
+#define EnTXCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,6,val) //1 for FIR,MIR...0 (not SIR)
+#define ForceBADCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,5,val) //force an bad CRC
+#define SendSIP(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,4,val) //send indication pulse for prevent SIR disturb
+#define ClearEnTX(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,3,val) // opposite to EnTX
+//*****************TX_ST
+#define GetTXStatus(BaseAddr) ReadReg(BaseAddr,TX_ST) //RO
+//**************************RX_CT
+#define EnRXSpecInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,0,val)
+#define EnRXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,1,val) //enable int when reach it threshold (setting by bit 7)
+#define EnRXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,7,val) //enable int when (1) half full...or (0) just not full
+//*****************RX_ST
+#define GetRXStatus(BaseAddr) ReadReg(BaseAddr,RX_ST) //RO
+//***********************P_ADDR
+#define SetPacketAddr(BaseAddr,addr) WriteReg(BaseAddr,P_ADDR,addr)
+//***********************I_CF_4
+#define EnGPIOtoRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,7,val)
+#define EnTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,1,val)
+#define ClearTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,0,val)
+//***********************I_T_C_L
+#define WriteGIO(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,7,val)
+#define ReadGIO(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,7)
+#define ReadRX(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,3) //RO
+#define WriteTX(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,0,val)
+//***********************I_T_C_H
+#define EnRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_H,7,val)
+#define ReadRX2(BaseAddr) CheckRegBit(BaseAddr,I_T_C_H,7)
+//**********************Version
+#define GetFIRVersion(BaseAddr) ReadReg(BaseAddr,VERSION)
+
+
+static void SetTimer(__u16 iobase, __u8 count)
+{
+ EnTimerInt(iobase, OFF);
+ WriteReg(iobase, TIMER, count);
+ EnTimerInt(iobase, ON);
+}
+
+
+static void SetSendByte(__u16 iobase, __u32 count)
+{
+ __u32 low, high;
+
+ if ((count & 0xf000) == 0) {
+ low = count & 0x00ff;
+ high = (count & 0x0f00) >> 8;
+ WriteReg(iobase, TX_C_L, low);
+ WriteReg(iobase, TX_C_H, high);
+ }
+}
+
+static void ResetChip(__u16 iobase, __u8 type)
+{
+ __u8 value;
+
+ value = (type + 2) << 4;
+ WriteReg(iobase, RESET, type);
+}
+
+static int CkRxRecv(__u16 iobase, struct via_ircc_cb *self)
+{
+ __u8 low, high;
+ __u16 wTmp = 0, wTmp1 = 0, wTmp_new = 0;
+
+ low = ReadReg(iobase, RX_C_L);
+ high = ReadReg(iobase, RX_C_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+ udelay(10);
+ low = ReadReg(iobase, RX_C_L);
+ high = ReadReg(iobase, RX_C_H);
+ wTmp1 = high;
+ wTmp_new = (wTmp1 << 8) | low;
+ if (wTmp_new != wTmp)
+ return 1;
+ else
+ return 0;
+
+}
+
+static __u16 RxCurCount(__u16 iobase, struct via_ircc_cb * self)
+{
+ __u8 low, high;
+ __u16 wTmp = 0, wTmp1 = 0;
+
+ low = ReadReg(iobase, RX_P_L);
+ high = ReadReg(iobase, RX_P_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+ return wTmp;
+}
+
+/* This Routine can only use in recevie_complete
+ * for it will update last count.
+ */
+
+static __u16 GetRecvByte(__u16 iobase, struct via_ircc_cb * self)
+{
+ __u8 low, high;
+ __u16 wTmp, wTmp1, ret;
+
+ low = ReadReg(iobase, RX_P_L);
+ high = ReadReg(iobase, RX_P_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+
+
+ if (wTmp >= self->RxLastCount)
+ ret = wTmp - self->RxLastCount;
+ else
+ ret = (0x8000 - self->RxLastCount) + wTmp;
+ self->RxLastCount = wTmp;
+
+/* RX_P is more actually the RX_C
+ low=ReadReg(iobase,RX_C_L);
+ high=ReadReg(iobase,RX_C_H);
+
+ if(!(high&0xe000)) {
+ temp=(high<<8)+low;
+ return temp;
+ }
+ else return 0;
+*/
+ return ret;
+}
+
+static void Sdelay(__u16 scale)
+{
+ __u8 bTmp;
+ int i, j;
+
+ for (j = 0; j < scale; j++) {
+ for (i = 0; i < 0x20; i++) {
+ bTmp = inb(0xeb);
+ outb(bTmp, 0xeb);
+ }
+ }
+}
+
+static void Tdelay(__u16 scale)
+{
+ __u8 bTmp;
+ int i, j;
+
+ for (j = 0; j < scale; j++) {
+ for (i = 0; i < 0x50; i++) {
+ bTmp = inb(0xeb);
+ outb(bTmp, 0xeb);
+ }
+ }
+}
+
+
+static void ActClk(__u16 iobase, __u8 value)
+{
+ __u8 bTmp;
+ bTmp = ReadReg(iobase, 0x34);
+ if (value)
+ WriteReg(iobase, 0x34, bTmp | Clk_bit);
+ else
+ WriteReg(iobase, 0x34, bTmp & ~Clk_bit);
+}
+
+static void ClkTx(__u16 iobase, __u8 Clk, __u8 Tx)
+{
+ __u8 bTmp;
+
+ bTmp = ReadReg(iobase, 0x34);
+ if (Clk == 0)
+ bTmp &= ~Clk_bit;
+ else {
+ if (Clk == 1)
+ bTmp |= Clk_bit;
+ }
+ WriteReg(iobase, 0x34, bTmp);
+ Sdelay(1);
+ if (Tx == 0)
+ bTmp &= ~Tx_bit;
+ else {
+ if (Tx == 1)
+ bTmp |= Tx_bit;
+ }
+ WriteReg(iobase, 0x34, bTmp);
+}
+
+static void Wr_Byte(__u16 iobase, __u8 data)
+{
+ __u8 bData = data;
+// __u8 btmp;
+ int i;
+
+ ClkTx(iobase, 0, 1);
+
+ Tdelay(2);
+ ActClk(iobase, 1);
+ Tdelay(1);
+
+ for (i = 0; i < 8; i++) { //LDN
+
+ if ((bData >> i) & 0x01) {
+ ClkTx(iobase, 0, 1); //bit data = 1;
+ } else {
+ ClkTx(iobase, 0, 0); //bit data = 1;
+ }
+ Tdelay(2);
+ Sdelay(1);
+ ActClk(iobase, 1); //clk hi
+ Tdelay(1);
+ }
+}
+
+static __u8 Rd_Indx(__u16 iobase, __u8 addr, __u8 index)
+{
+ __u8 data = 0, bTmp, data_bit;
+ int i;
+
+ bTmp = addr | (index << 1) | 0;
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ ActClk(iobase, 1);
+ udelay(1);
+ Wr_Byte(iobase, bTmp);
+ Sdelay(1);
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ for (i = 0; i < 10; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(1);
+ ClkTx(iobase, 0, 1);
+ Tdelay(1);
+ bTmp = ReadReg(iobase, 0x34);
+ if (!(bTmp & Rd_Valid))
+ break;
+ }
+ if (!(bTmp & Rd_Valid)) {
+ for (i = 0; i < 8; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ bTmp = ReadReg(iobase, 0x34);
+ data_bit = 1 << i;
+ if (bTmp & RxBit)
+ data |= data_bit;
+ else
+ data &= ~data_bit;
+ Tdelay(2);
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ bTmp = ReadReg(iobase, 0x34);
+ }
+ for (i = 0; i < 1; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ ClkTx(iobase, 0, 0);
+ Tdelay(1);
+ for (i = 0; i < 3; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ return data;
+}
+
+static void Wr_Indx(__u16 iobase, __u8 addr, __u8 index, __u8 data)
+{
+ int i;
+ __u8 bTmp;
+
+ ClkTx(iobase, 0, 0);
+ udelay(2);
+ ActClk(iobase, 1);
+ udelay(1);
+ bTmp = addr | (index << 1) | 1;
+ Wr_Byte(iobase, bTmp);
+ Wr_Byte(iobase, data);
+ for (i = 0; i < 2; i++) {
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ ActClk(iobase, 1);
+ Tdelay(1);
+ }
+ ActClk(iobase, 0);
+}
+
+static void ResetDongle(__u16 iobase)
+{
+ int i;
+ ClkTx(iobase, 0, 0);
+ Tdelay(1);
+ for (i = 0; i < 30; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(1);
+ }
+ ActClk(iobase, 0);
+}
+
+static void SetSITmode(__u16 iobase)
+{
+
+ __u8 bTmp;
+
+ bTmp = ReadLPCReg(0x28);
+ WriteLPCReg(0x28, bTmp | 0x10); //select ITMOFF
+ bTmp = ReadReg(iobase, 0x35);
+ WriteReg(iobase, 0x35, bTmp | 0x40); // Driver ITMOFF
+ WriteReg(iobase, 0x28, bTmp | 0x80); // enable All interrupt
+}
+
+static void SI_SetMode(__u16 iobase, int mode)
+{
+ //__u32 dTmp;
+ __u8 bTmp;
+
+ WriteLPCReg(0x28, 0x70); // S/W Reset
+ SetSITmode(iobase);
+ ResetDongle(iobase);
+ udelay(10);
+ Wr_Indx(iobase, 0x40, 0x0, 0x17); //RX ,APEN enable,Normal power
+ Wr_Indx(iobase, 0x40, 0x1, mode); //Set Mode
+ Wr_Indx(iobase, 0x40, 0x2, 0xff); //Set power to FIR VFIR > 1m
+ bTmp = Rd_Indx(iobase, 0x40, 1);
+}
+
+static void InitCard(__u16 iobase)
+{
+ ResetChip(iobase, 5);
+ WriteReg(iobase, I_ST_CT_0, 0x00); // open CHIP on
+ SetSIRBOF(iobase, 0xc0); // hardware default value
+ SetSIREOF(iobase, 0xc1);
+}
+
+static void CommonInit(__u16 iobase)
+{
+// EnTXCRC(iobase,0);
+ SwapDMA(iobase, OFF);
+ SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
+ EnRXFIFOReadyInt(iobase, OFF);
+ EnRXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOUnderrunEOMInt(iobase, ON);
+// EnTXFIFOReadyInt(iobase,ON);
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+// WriteLPCReg(0xF0,0); //(if VT1211 then do this)
+ if (IsSIROn(iobase)) {
+ SIRFilter(iobase, ON);
+ SIRRecvAny(iobase, ON);
+ } else {
+ SIRFilter(iobase, OFF);
+ SIRRecvAny(iobase, OFF);
+ }
+ EnRXSpecInt(iobase, ON);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+ EnableDMA(iobase, ON);
+}
+
+static void SetBaudRate(__u16 iobase, __u32 rate)
+{
+ __u8 value = 11, temp;
+
+ if (IsSIROn(iobase)) {
+ switch (rate) {
+ case (__u32) (2400L):
+ value = 47;
+ break;
+ case (__u32) (9600L):
+ value = 11;
+ break;
+ case (__u32) (19200L):
+ value = 5;
+ break;
+ case (__u32) (38400L):
+ value = 2;
+ break;
+ case (__u32) (57600L):
+ value = 1;
+ break;
+ case (__u32) (115200L):
+ value = 0;
+ break;
+ default:
+ break;
+ }
+ } else if (IsMIROn(iobase)) {
+ value = 0; // will automatically be fixed in 1.152M
+ } else if (IsFIROn(iobase)) {
+ value = 0; // will automatically be fixed in 4M
+ }
+ temp = (ReadReg(iobase, I_CF_H_1) & 0x03);
+ temp |= value << 2;
+ WriteReg(iobase, I_CF_H_1, temp);
+}
+
+static void SetPulseWidth(__u16 iobase, __u8 width)
+{
+ __u8 temp, temp1, temp2;
+
+ temp = (ReadReg(iobase, I_CF_L_1) & 0x1f);
+ temp1 = (ReadReg(iobase, I_CF_H_1) & 0xfc);
+ temp2 = (width & 0x07) << 5;
+ temp |= temp2;
+ temp2 = (width & 0x18) >> 3;
+ temp1 |= temp2;
+ WriteReg(iobase, I_CF_L_1, temp);
+ WriteReg(iobase, I_CF_H_1, temp1);
+}
+
+static void SetSendPreambleCount(__u16 iobase, __u8 count)
+{
+ __u8 temp;
+
+ temp = ReadReg(iobase, I_CF_L_1) & 0xe0;
+ temp |= count;
+ WriteReg(iobase, I_CF_L_1, temp);
+
+}
+
+static void SetVFIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, val);
+}
+
+static void SetFIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 6, val);
+}
+
+static void SetMIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 5, val);
+}
+
+static void SetSIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 4, val);
+}
+
+#endif /* via_IRCC_H */
diff --git a/drivers/staging/irda/drivers/vlsi_ir.c b/drivers/staging/irda/drivers/vlsi_ir.c
new file mode 100644
index 000000000000..6638784c082e
--- /dev/null
+++ b/drivers/staging/irda/drivers/vlsi_ir.c
@@ -0,0 +1,1872 @@
+/*********************************************************************
+ *
+ * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
+ *
+ * Copyright (c) 2001-2003 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#define DRIVER_NAME "vlsi_ir"
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147"
+#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>"
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/********************************************************/
+
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#include "vlsi_ir.h"
+
+/********************************************************/
+
+static /* const */ char drivername[] = DRIVER_NAME;
+
+static const struct pci_device_id vlsi_irda_table[] = {
+ {
+ .class = PCI_CLASS_WIRELESS_IRDA << 8,
+ .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
+ .vendor = PCI_VENDOR_ID_VLSI,
+ .device = PCI_DEVICE_ID_VLSI_82C147,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
+
+/********************************************************/
+
+/* clksrc: which clock source to be used
+ * 0: auto - try PLL, fallback to 40MHz XCLK
+ * 1: on-chip 48MHz PLL
+ * 2: external 48MHz XCLK
+ * 3: external 40MHz XCLK (HP OB-800)
+ */
+
+static int clksrc = 0; /* default is 0(auto) */
+module_param(clksrc, int, 0);
+MODULE_PARM_DESC(clksrc, "clock input source selection");
+
+/* ringsize: size of the tx and rx descriptor rings
+ * independent for tx and rx
+ * specify as ringsize=tx[,rx]
+ * allowed values: 4, 8, 16, 32, 64
+ * Due to the IrDA 1.x max. allowed window size=7,
+ * there should be no gain when using rings larger than 8
+ */
+
+static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */
+module_param_array(ringsize, int, NULL, 0);
+MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
+
+/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
+ * 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
+ * 1: nominal 3/16 bittime width
+ * note: IrDA compliant peer devices should be happy regardless
+ * which one is used. Primary goal is to save some power
+ * on the sender's side - at 9.6kbaud for example the short
+ * pulse width saves more than 90% of the transmitted IR power.
+ */
+
+static int sirpulse = 1; /* default is 3/16 bittime */
+module_param(sirpulse, int, 0);
+MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
+
+/* qos_mtt_bits: encoded min-turn-time value we require the peer device
+ * to use before transmitting to us. "Type 1" (per-station)
+ * bitfield according to IrLAP definition (section 6.6.8)
+ * Don't know which transceiver is used by my OB800 - the
+ * pretty common HP HDLS-1100 requires 1 msec - so lets use this.
+ */
+
+static int qos_mtt_bits = 0x07; /* default is 1 ms or more */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
+
+/********************************************************/
+
+static void vlsi_reg_debug(unsigned iobase, const char *s)
+{
+ int i;
+
+ printk(KERN_DEBUG "%s: ", s);
+ for (i = 0; i < 0x20; i++)
+ printk("%02x", (unsigned)inb((iobase+i)));
+ printk("\n");
+}
+
+static void vlsi_ring_debug(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i;
+
+ printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
+ atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
+ printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
+ __func__, (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
+
+/********************************************************/
+
+/* needed regardless of CONFIG_PROC_FS */
+static struct proc_dir_entry *vlsi_proc_root = NULL;
+
+#ifdef CONFIG_PROC_FS
+
+static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
+{
+ unsigned iobase = pci_resource_start(pdev, 0);
+ unsigned i;
+
+ seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
+ pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
+ seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
+ seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
+ pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
+ seq_printf(seq, "hw registers: ");
+ for (i = 0; i < 0x20; i++)
+ seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
+ seq_printf(seq, "\n");
+}
+
+static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ u8 byte;
+ u16 word;
+ s32 sec, usec;
+ unsigned iobase = ndev->base_addr;
+
+ seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
+ netif_device_present(ndev) ? "attached" : "detached",
+ netif_running(ndev) ? "running" : "not running",
+ netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
+ netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
+
+ if (!netif_running(ndev))
+ return;
+
+ seq_printf(seq, "\nhw-state:\n");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
+ seq_printf(seq, "IRMISC:%s%s%s uart%s",
+ (byte&IRMISC_IRRAIL) ? " irrail" : "",
+ (byte&IRMISC_IRPD) ? " irpd" : "",
+ (byte&IRMISC_UARTTST) ? " uarttest" : "",
+ (byte&IRMISC_UARTEN) ? "@" : " disabled\n");
+ if (byte&IRMISC_UARTEN) {
+ seq_printf(seq, "0x%s\n",
+ (byte&2) ? ((byte&1) ? "3e8" : "2e8")
+ : ((byte&1) ? "3f8" : "2f8"));
+ }
+ pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
+ seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
+ (byte&CLKCTL_PD_INV) ? "powered" : "down",
+ (byte&CLKCTL_LOCK) ? " locked" : "",
+ (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
+ (byte&CLKCTL_CLKSTP) ? "stopped" : "running",
+ (byte&CLKCTL_WAKE) ? "enabled" : "disabled");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
+ seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
+
+ byte = inb(iobase+VLSI_PIO_IRINTR);
+ seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n",
+ (byte&IRINTR_ACTEN) ? " ACTEN" : "",
+ (byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
+ (byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
+ (byte&IRINTR_OE_EN) ? " OE_EN" : "",
+ (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
+ (byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
+ (byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
+ (byte&IRINTR_OE_INT) ? " OE_INT" : "");
+ word = inw(iobase+VLSI_PIO_RINGPTR);
+ seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
+ word = inw(iobase+VLSI_PIO_RINGBASE);
+ seq_printf(seq, "RINGBASE: busmap=0x%08x\n",
+ ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
+ word = inw(iobase+VLSI_PIO_RINGSIZE);
+ seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
+ RINGSIZE_TO_TXSIZE(word));
+
+ word = inw(iobase+VLSI_PIO_IRCFG);
+ seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (word&IRCFG_LOOP) ? " LOOP" : "",
+ (word&IRCFG_ENTX) ? " ENTX" : "",
+ (word&IRCFG_ENRX) ? " ENRX" : "",
+ (word&IRCFG_MSTR) ? " MSTR" : "",
+ (word&IRCFG_RXANY) ? " RXANY" : "",
+ (word&IRCFG_CRC16) ? " CRC16" : "",
+ (word&IRCFG_FIR) ? " FIR" : "",
+ (word&IRCFG_MIR) ? " MIR" : "",
+ (word&IRCFG_SIR) ? " SIR" : "",
+ (word&IRCFG_SIRFILT) ? " SIRFILT" : "",
+ (word&IRCFG_SIRTEST) ? " SIRTEST" : "",
+ (word&IRCFG_TXPOL) ? " TXPOL" : "",
+ (word&IRCFG_RXPOL) ? " RXPOL" : "");
+ word = inw(iobase+VLSI_PIO_IRENABLE);
+ seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n",
+ (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "",
+ (word&IRENABLE_CFGER) ? " CFGERR" : "",
+ (word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
+ (word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
+ (word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
+ (word&IRENABLE_ENTXST) ? " ENTXST" : "",
+ (word&IRENABLE_ENRXST) ? " ENRXST" : "",
+ (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
+ word = inw(iobase+VLSI_PIO_PHYCTL);
+ seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_NPHYCTL);
+ seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_MAXPKT);
+ seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word);
+ word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
+
+ seq_printf(seq, "\nsw-state:\n");
+ seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
+ (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
+ sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
+ USEC_PER_SEC, &usec);
+ seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
+
+ seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
+ ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
+ ndev->stats.rx_dropped);
+ seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
+ ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
+ ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
+ seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
+ ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
+ ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
+
+}
+
+static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i, j;
+ int h, t;
+
+ seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ h = atomic_read(&r->head) & r->mask;
+ t = atomic_read(&r->tail) & r->mask;
+ seq_printf(seq, "head = %d / tail = %d ", h, t);
+ if (h == t)
+ seq_printf(seq, "(empty)\n");
+ else {
+ if (((t+1)&r->mask) == h)
+ seq_printf(seq, "(full)\n");
+ else
+ seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
+ rd = &r->rd[h];
+ j = (unsigned) rd_get_count(rd);
+ seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n",
+ h, (unsigned)rd_get_status(rd), j);
+ if (j > 0) {
+ seq_printf(seq, " data: %*ph\n",
+ min_t(unsigned, j, 20), rd->buf);
+ }
+ }
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ seq_printf(seq, "> ring descr %u: ", i);
+ seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n",
+ (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
+
+static int vlsi_seq_show(struct seq_file *seq, void *v)
+{
+ struct net_device *ndev = seq->private;
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ unsigned long flags;
+
+ seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
+ seq_printf(seq, "clksrc: %s\n",
+ (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
+ : ((clksrc==1)?"48MHz PLL":"autodetect"));
+ seq_printf(seq, "ringsize: tx=%d / rx=%d\n",
+ ringsize[0], ringsize[1]);
+ seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
+ seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
+
+ spin_lock_irqsave(&idev->lock, flags);
+ if (idev->pdev != NULL) {
+ vlsi_proc_pdev(seq, idev->pdev);
+
+ if (idev->pdev->current_state == 0)
+ vlsi_proc_ndev(seq, ndev);
+ else
+ seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
+ idev->resume_ok);
+ if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
+ seq_printf(seq, "\n--------- RX ring -----------\n\n");
+ vlsi_proc_ring(seq, idev->rx_ring);
+ seq_printf(seq, "\n--------- TX ring -----------\n\n");
+ vlsi_proc_ring(seq, idev->tx_ring);
+ }
+ }
+ seq_printf(seq, "\n");
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ return 0;
+}
+
+static int vlsi_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vlsi_seq_show, PDE_DATA(inode));
+}
+
+static const struct file_operations vlsi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = vlsi_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define VLSI_PROC_FOPS (&vlsi_proc_fops)
+
+#else /* CONFIG_PROC_FS */
+#define VLSI_PROC_FOPS NULL
+#endif
+
+/********************************************************/
+
+static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
+ unsigned size, unsigned len, int dir)
+{
+ struct vlsi_ring *r;
+ struct ring_descr *rd;
+ unsigned i, j;
+ dma_addr_t busaddr;
+
+ if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */
+ return NULL;
+
+ r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
+ if (!r)
+ return NULL;
+ memset(r, 0, sizeof(*r));
+
+ r->pdev = pdev;
+ r->dir = dir;
+ r->len = len;
+ r->rd = (struct ring_descr *)(r+1);
+ r->mask = size - 1;
+ r->size = size;
+ atomic_set(&r->head, 0);
+ atomic_set(&r->tail, 0);
+
+ for (i = 0; i < size; i++) {
+ rd = r->rd + i;
+ memset(rd, 0, sizeof(*rd));
+ rd->hw = hwmap + i;
+ rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
+ if (rd->buf)
+ busaddr = pci_map_single(pdev, rd->buf, len, dir);
+ if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
+ if (rd->buf) {
+ net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
+ __func__, rd->buf);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ for (j = 0; j < i; j++) {
+ rd = r->rd + j;
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ pci_unmap_single(pdev, busaddr, len, dir);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ kfree(r);
+ return NULL;
+ }
+ rd_set_addr_status(rd, busaddr, 0);
+ /* initially, the dma buffer is owned by the CPU */
+ rd->skb = NULL;
+ }
+ return r;
+}
+
+static int vlsi_free_ring(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i;
+ dma_addr_t busaddr;
+
+ for (i = 0; i < r->size; i++) {
+ rd = r->rd + i;
+ if (rd->skb)
+ dev_kfree_skb_any(rd->skb);
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ if (busaddr)
+ pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
+ kfree(rd->buf);
+ }
+ kfree(r);
+ return 0;
+}
+
+static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
+{
+ char *ringarea;
+ struct ring_descr_hw *hwmap;
+
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
+ &idev->busaddr);
+ if (!ringarea)
+ goto out;
+
+ hwmap = (struct ring_descr_hw *)ringarea;
+ idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
+ XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (idev->rx_ring == NULL)
+ goto out_unmap;
+
+ hwmap += MAX_RING_DESCR;
+ idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
+ XFER_BUF_SIZE, PCI_DMA_TODEVICE);
+ if (idev->tx_ring == NULL)
+ goto out_free_rx;
+
+ idev->virtaddr = ringarea;
+ return 0;
+
+out_free_rx:
+ vlsi_free_ring(idev->rx_ring);
+out_unmap:
+ idev->rx_ring = idev->tx_ring = NULL;
+ pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
+ idev->busaddr = 0;
+out:
+ return -ENOMEM;
+}
+
+static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
+{
+ vlsi_free_ring(idev->rx_ring);
+ vlsi_free_ring(idev->tx_ring);
+ idev->rx_ring = idev->tx_ring = NULL;
+
+ if (idev->busaddr)
+ pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
+
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ return 0;
+}
+
+/********************************************************/
+
+static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int crclen, len = 0;
+ struct sk_buff *skb;
+ int ret = 0;
+ struct net_device *ndev = pci_get_drvdata(r->pdev);
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_RX_ERROR) {
+ if (status & RD_RX_OVER)
+ ret |= VLSI_RX_OVER;
+ if (status & RD_RX_LENGTH)
+ ret |= VLSI_RX_LENGTH;
+ if (status & RD_RX_PHYERR)
+ ret |= VLSI_RX_FRAME;
+ if (status & RD_RX_CRCERR)
+ ret |= VLSI_RX_CRC;
+ goto done;
+ }
+
+ len = rd_get_count(rd);
+ crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
+ len -= crclen; /* remove trailing CRC */
+ if (len <= 0) {
+ pr_debug("%s: strange frame (len=%d)\n", __func__, len);
+ ret |= VLSI_RX_DROP;
+ goto done;
+ }
+
+ if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */
+
+ /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the
+ * endian-adjustment there just in place will dirty a cache line
+ * which belongs to the map and thus we must be sure it will
+ * get flushed before giving the buffer back to hardware.
+ * vlsi_fill_rx() will do this anyway - but here we rely on.
+ */
+ le16_to_cpus(rd->buf+len);
+ if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
+ pr_debug("%s: crc error\n", __func__);
+ ret |= VLSI_RX_CRC;
+ goto done;
+ }
+ }
+
+ if (!rd->skb) {
+ net_warn_ratelimited("%s: rx packet lost\n", __func__);
+ ret |= VLSI_RX_DROP;
+ goto done;
+ }
+
+ skb = rd->skb;
+ rd->skb = NULL;
+ skb->dev = ndev;
+ skb_put_data(skb, rd->buf, len);
+ skb_reset_mac_header(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+done:
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ /* buffer still owned by CPU */
+
+ return (ret) ? -ret : len;
+}
+
+static void vlsi_fill_rx(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+
+ for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
+ if (rd_is_active(rd)) {
+ net_warn_ratelimited("%s: driver bug: rx descr race with hw\n",
+ __func__);
+ vlsi_ring_debug(r);
+ break;
+ }
+ if (!rd->skb) {
+ rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
+ if (rd->skb) {
+ skb_reserve(rd->skb,1);
+ rd->skb->protocol = htons(ETH_P_IRDA);
+ }
+ else
+ break; /* probably not worth logging? */
+ }
+ /* give dma buffer back to busmaster */
+ pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ rd_activate(rd);
+ }
+}
+
+static void vlsi_rx_interrupt(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ ndev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ ndev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ ndev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ ndev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ ndev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += ret;
+ }
+ }
+
+ idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
+
+ vlsi_fill_rx(r);
+
+ if (ring_first(r) == NULL) {
+ /* we are in big trouble, if this should ever happen */
+ net_err_ratelimited("%s: rx ring exhausted!\n", __func__);
+ vlsi_ring_debug(r);
+ }
+ else
+ outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
+}
+
+/* caller must have stopped the controller from busmastering */
+
+static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
+{
+ struct net_device *ndev = pci_get_drvdata(idev->pdev);
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ if (rd_get_count(rd)) {
+ pr_debug("%s - dropping rx packet\n", __func__);
+ ret = -VLSI_RX_DROP;
+ }
+ rd_set_count(rd, 0);
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ }
+ else
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ ndev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ ndev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ ndev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ ndev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ ndev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += ret;
+ }
+ }
+}
+
+/********************************************************/
+
+static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int len;
+ int ret;
+
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_TX_UNDRN)
+ ret = VLSI_TX_FIFO;
+ else
+ ret = 0;
+ rd_set_status(rd, 0);
+
+ if (rd->skb) {
+ len = rd->skb->len;
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ else /* tx-skb already freed? - should never happen */
+ len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */
+
+ rd_set_count(rd, 0);
+ /* dma buffer still owned by the CPU */
+
+ return (ret) ? -ret : len;
+}
+
+static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
+{
+ u16 nphyctl;
+ u16 config;
+ unsigned mode;
+ int ret;
+ int baudrate;
+ int fifocnt;
+
+ baudrate = idev->new_baud;
+ pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
+ if (baudrate == 4000000) {
+ mode = IFF_FIR;
+ config = IRCFG_FIR;
+ nphyctl = PHYCTL_FIR;
+ }
+ else if (baudrate == 1152000) {
+ mode = IFF_MIR;
+ config = IRCFG_MIR | IRCFG_CRC16;
+ nphyctl = PHYCTL_MIR(clksrc==3);
+ }
+ else {
+ mode = IFF_SIR;
+ config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
+ switch(baudrate) {
+ default:
+ net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n",
+ __func__, baudrate);
+ baudrate = 9600;
+ /* fallthru */
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
+ break;
+ }
+ }
+ config |= IRCFG_MSTR | IRCFG_ENRX;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt);
+ }
+
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(config, iobase+VLSI_PIO_IRCFG);
+ outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
+ wmb();
+ outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);
+ mb();
+
+ udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */
+
+ /* read back settings for validation */
+
+ config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
+
+ if (mode == IFF_FIR)
+ config ^= IRENABLE_FIR_ON;
+ else if (mode == IFF_MIR)
+ config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);
+ else
+ config ^= IRENABLE_SIR_ON;
+
+ if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
+ net_warn_ratelimited("%s: failed to set %s mode!\n",
+ __func__,
+ mode == IFF_SIR ? "SIR" :
+ mode == IFF_MIR ? "MIR" : "FIR");
+ ret = -1;
+ }
+ else {
+ if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
+ net_warn_ratelimited("%s: failed to apply baudrate %d\n",
+ __func__, baudrate);
+ ret = -1;
+ }
+ else {
+ idev->mode = mode;
+ idev->baud = baudrate;
+ idev->new_baud = 0;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ vlsi_reg_debug(iobase,__func__);
+
+ return ret;
+}
+
+static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned long flags;
+ unsigned iobase = ndev->base_addr;
+ u8 status;
+ u16 config;
+ int mtt, diff;
+ int len, speed;
+ char *msg = NULL;
+
+ speed = irda_get_next_speed(skb);
+ spin_lock_irqsave(&idev->lock, flags);
+ if (speed != -1 && speed != idev->baud) {
+ netif_stop_queue(ndev);
+ idev->new_baud = speed;
+ status = RD_TX_CLRENTX; /* stop tx-ring after this frame */
+ }
+ else
+ status = 0;
+
+ if (skb->len == 0) {
+ /* handle zero packets - should be speed change */
+ if (status == 0) {
+ msg = "bogus zero-length packet";
+ goto drop_unlock;
+ }
+
+ /* due to the completely asynch tx operation we might have
+ * IrLAP racing with the hardware here, f.e. if the controller
+ * is just sending the last packet with current speed while
+ * the LAP is already switching the speed using synchronous
+ * len=0 packet. Immediate execution would lead to hw lockup
+ * requiring a powercycle to reset. Good candidate to trigger
+ * this is the final UA:RSP packet after receiving a DISC:CMD
+ * when getting the LAP down.
+ * Note that we are not protected by the queue_stop approach
+ * because the final UA:RSP arrives _without_ request to apply
+ * new-speed-after-this-packet - hence the driver doesn't know
+ * this was the last packet and doesn't stop the queue. So the
+ * forced switch to default speed from LAP gets through as fast
+ * as only some 10 usec later while the UA:RSP is still processed
+ * by the hardware and we would get screwed.
+ */
+
+ if (ring_first(idev->tx_ring) == NULL) {
+ /* no race - tx-ring already empty */
+ vlsi_set_baud(idev, iobase);
+ netif_wake_queue(ndev);
+ }
+ else
+ ;
+ /* keep the speed change pending like it would
+ * for any len>0 packet. tx completion interrupt
+ * will apply it when the tx ring becomes empty.
+ */
+ spin_unlock_irqrestore(&idev->lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* sanity checks - simply drop the packet */
+
+ rd = ring_last(r);
+ if (!rd) {
+ msg = "ring full, but queue wasn't stopped";
+ goto drop_unlock;
+ }
+
+ if (rd_is_active(rd)) {
+ msg = "entry still owned by hw";
+ goto drop_unlock;
+ }
+
+ if (!rd->buf) {
+ msg = "tx ring entry without pci buffer";
+ goto drop_unlock;
+ }
+
+ if (rd->skb) {
+ msg = "ring entry with old skb still attached";
+ goto drop_unlock;
+ }
+
+ /* no need for serialization or interrupt disable during mtt */
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ if ((mtt = irda_get_mtt(skb)) > 0) {
+ diff = ktime_us_delta(ktime_get(), idev->last_rx);
+ if (mtt > diff)
+ udelay(mtt - diff);
+ /* must not sleep here - called under netif_tx_lock! */
+ }
+
+ /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
+ * after subsequent tx-completion
+ */
+
+ if (idev->mode == IFF_SIR) {
+ status |= RD_TX_DISCRC; /* no hw-crc creation */
+ len = async_wrap_skb(skb, rd->buf, r->len);
+
+ /* Some rare worst case situation in SIR mode might lead to
+ * potential buffer overflow. The wrapper detects this, returns
+ * with a shortened frame (without FCS/EOF) but doesn't provide
+ * any error indication about the invalid packet which we are
+ * going to transmit.
+ * Therefore we log if the buffer got filled to the point, where the
+ * wrapper would abort, i.e. when there are less than 5 bytes left to
+ * allow appending the FCS/EOF.
+ */
+
+ if (len >= r->len-5)
+ net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n",
+ __func__);
+ }
+ else {
+ /* hw deals with MIR/FIR mode wrapping */
+ status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */
+ len = skb->len;
+ if (len > r->len) {
+ msg = "frame exceeds tx buffer length";
+ goto drop;
+ }
+ else
+ skb_copy_from_linear_data(skb, rd->buf, len);
+ }
+
+ rd->skb = skb; /* remember skb for tx-complete stats */
+
+ rd_set_count(rd, len);
+ rd_set_status(rd, status); /* not yet active! */
+
+ /* give dma buffer back to busmaster-hw (flush caches to make
+ * CPU-driven changes visible from the pci bus).
+ */
+
+ pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
+
+/* Switching to TX mode here races with the controller
+ * which may stop TX at any time when fetching an inactive descriptor
+ * or one with CLR_ENTX set. So we switch on TX only, if TX was not running
+ * _after_ the new descriptor was activated on the ring. This ensures
+ * we will either find TX already stopped or we can be sure, there
+ * will be a TX-complete interrupt even if the chip stopped doing
+ * TX just after we found it still running. The ISR will then find
+ * the non-empty ring and restart TX processing. The enclosing
+ * spinlock provides the correct serialization to prevent race with isr.
+ */
+
+ spin_lock_irqsave(&idev->lock,flags);
+
+ rd_activate(rd);
+
+ if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
+ }
+
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ mb();
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ wmb();
+ outw(0, iobase+VLSI_PIO_PROMPT);
+ }
+
+ if (ring_put(r) == NULL) {
+ netif_stop_queue(ndev);
+ pr_debug("%s: tx ring full - queue stopped\n", __func__);
+ }
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ return NETDEV_TX_OK;
+
+drop_unlock:
+ spin_unlock_irqrestore(&idev->lock, flags);
+drop:
+ net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg);
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_errors++;
+ ndev->stats.tx_dropped++;
+ /* Don't even think about returning NET_XMIT_DROP (=1) here!
+ * In fact any retval!=0 causes the packet scheduler to requeue the
+ * packet for later retry of transmission - which isn't exactly
+ * what we want after we've just called dev_kfree_skb_any ;-)
+ */
+ return NETDEV_TX_OK;
+}
+
+static void vlsi_tx_interrupt(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned iobase;
+ int ret;
+ u16 config;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ ndev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ ndev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += ret;
+ }
+ }
+
+ iobase = ndev->base_addr;
+
+ if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */
+ vlsi_set_baud(idev, iobase);
+
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ if (rd == NULL) /* tx ring empty: re-enable rx */
+ outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
+
+ else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
+ }
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ }
+
+ outw(0, iobase+VLSI_PIO_PROMPT);
+
+ if (netif_queue_stopped(ndev) && !idev->new_baud) {
+ netif_wake_queue(ndev);
+ pr_debug("%s: queue awoken\n", __func__);
+ }
+}
+
+/* caller must have stopped the controller from busmastering */
+
+static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
+{
+ struct net_device *ndev = pci_get_drvdata(idev->pdev);
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ pr_debug("%s - dropping tx packet\n", __func__);
+ ret = -VLSI_TX_DROP;
+ }
+ else
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ ndev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ ndev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += ret;
+ }
+ }
+
+}
+
+/********************************************************/
+
+static int vlsi_start_clock(struct pci_dev *pdev)
+{
+ u8 clkctl, lock;
+ int i, count;
+
+ if (clksrc < 2) { /* auto or PLL: try PLL */
+ clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* procedure to detect PLL lock synchronisation:
+ * after 0.5 msec initial delay we expect to find 3 PLL lock
+ * indications within 10 msec for successful PLL detection.
+ */
+ udelay(500);
+ count = 0;
+ for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
+ if (lock&CLKCTL_LOCK) {
+ if (++count >= 3)
+ break;
+ }
+ udelay(50);
+ }
+ if (count < 3) {
+ if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
+ net_err_ratelimited("%s: no PLL or failed to lock!\n",
+ __func__);
+ clkctl = CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ return -1;
+ }
+ else /* was: clksrc=0(auto) */
+ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
+
+ pr_debug("%s: PLL not locked, fallback to clksrc=%d\n",
+ __func__, clksrc);
+ }
+ else
+ clksrc = 1; /* got successful PLL lock */
+ }
+
+ if (clksrc != 1) {
+ /* we get here if either no PLL detected in auto-mode or
+ an external clock source was explicitly specified */
+
+ clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
+ if (clksrc == 3)
+ clkctl |= CLKCTL_XCKSEL;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* no way to test for working XCLK */
+ }
+ else
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+
+ /* ok, now going to connect the chip with the clock source */
+
+ clkctl &= ~CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ return 0;
+}
+
+static void vlsi_stop_clock(struct pci_dev *pdev)
+{
+ u8 clkctl;
+
+ /* disconnect chip from clock source */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+ clkctl |= CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* disable all clock sources */
+ clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+}
+
+/********************************************************/
+
+/* writing all-zero to the VLSI PCI IO register area seems to prevent
+ * some occasional situations where the hardware fails (symptoms are
+ * what appears as stalled tx/rx state machines, i.e. everything ok for
+ * receive or transmit but hw makes no progress or is unable to access
+ * the bus memory locations).
+ * Best place to call this is immediately after/before the internal clock
+ * gets started/stopped.
+ */
+
+static inline void vlsi_clear_regs(unsigned iobase)
+{
+ unsigned i;
+ const unsigned chip_io_extent = 32;
+
+ for (i = 0; i < chip_io_extent; i += sizeof(u16))
+ outw(0, iobase + i);
+}
+
+static int vlsi_init_chip(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ unsigned iobase;
+ u16 ptr;
+
+ /* start the clock and clean the registers */
+
+ if (vlsi_start_clock(pdev)) {
+ net_err_ratelimited("%s: no valid clock source\n", __func__);
+ return -1;
+ }
+ iobase = ndev->base_addr;
+ vlsi_clear_regs(iobase);
+
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
+
+ outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
+
+ /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
+
+ outw(0, iobase+VLSI_PIO_IRCFG);
+ wmb();
+
+ outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
+
+ outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
+
+ outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
+ iobase+VLSI_PIO_RINGSIZE);
+
+ ptr = inw(iobase+VLSI_PIO_RINGPTR);
+ atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
+ atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
+
+ vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */
+
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
+ wmb();
+
+ /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
+ * basically every received pulse fires an ACTIVITY-INT
+ * leading to >>1000 INT's per second instead of few 10
+ */
+
+ outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
+
+ return 0;
+}
+
+static int vlsi_start_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ u8 byte;
+
+ /* we don't use the legacy UART, disable its address decoding */
+
+ pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
+ byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
+ pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
+
+ /* enable PCI busmaster access to our 16MB page */
+
+ pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
+ pci_set_master(pdev);
+
+ if (vlsi_init_chip(pdev) < 0) {
+ pci_disable_device(pdev);
+ return -1;
+ }
+
+ vlsi_fill_rx(idev->rx_ring);
+
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
+
+ outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
+
+ return 0;
+}
+
+static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idev->lock,flags);
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
+
+ /* disable and w/c irqs */
+ outb(0, iobase+VLSI_PIO_IRINTR);
+ wmb();
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ vlsi_unarm_tx(idev);
+ vlsi_unarm_rx(idev);
+
+ vlsi_clear_regs(iobase);
+ vlsi_stop_clock(pdev);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**************************************************************/
+
+static void vlsi_tx_timeout(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+
+
+ vlsi_reg_debug(ndev->base_addr, __func__);
+ vlsi_ring_debug(idev->tx_ring);
+
+ if (netif_running(ndev))
+ netif_stop_queue(ndev);
+
+ vlsi_stop_hw(idev);
+
+ /* now simply restart the whole thing */
+
+ if (!idev->new_baud)
+ idev->new_baud = idev->baud; /* keep current baudrate */
+
+ if (vlsi_start_hw(idev))
+ net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n",
+ __func__, pci_name(idev->pdev), ndev->name);
+ else
+ netif_start_queue(ndev);
+}
+
+static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ unsigned long flags;
+ u16 fifocnt;
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&idev->lock, flags);
+ idev->new_baud = irq->ifr_baudrate;
+ /* when called from userland there might be a minor race window here
+ * if the stack tries to change speed concurrently - which would be
+ * pretty strange anyway with the userland having full control...
+ */
+ vlsi_set_baud(idev, ndev->base_addr);
+ spin_unlock_irqrestore(&idev->lock, flags);
+ break;
+ case SIOCSMEDIABUSY:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ irda_device_set_media_busy(ndev, TRUE);
+ break;
+ case SIOCGRECEIVING:
+ /* the best we can do: check whether there are any bytes in rx fifo.
+ * The trustable window (in case some data arrives just afterwards)
+ * may be as short as 1usec or so at 4Mbps.
+ */
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
+ break;
+ default:
+ net_warn_ratelimited("%s: notsupp - cmd=%04x\n",
+ __func__, cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/********************************************************/
+
+static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *ndev = dev_instance;
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ unsigned iobase;
+ u8 irintr;
+ int boguscount = 5;
+ unsigned long flags;
+ int handled = 0;
+
+ iobase = ndev->base_addr;
+ spin_lock_irqsave(&idev->lock,flags);
+ do {
+ irintr = inb(iobase+VLSI_PIO_IRINTR);
+ mb();
+ outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
+
+ if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
+ break;
+
+ handled = 1;
+
+ if (unlikely(!(irintr & ~IRINTR_ACTIVITY)))
+ break; /* nothing todo if only activity */
+
+ if (irintr&IRINTR_RPKTINT)
+ vlsi_rx_interrupt(ndev);
+
+ if (irintr&IRINTR_TPKTINT)
+ vlsi_tx_interrupt(ndev);
+
+ } while (--boguscount > 0);
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ if (boguscount <= 0)
+ net_info_ratelimited("%s: too much work in interrupt!\n",
+ __func__);
+ return IRQ_RETVAL(handled);
+}
+
+/********************************************************/
+
+static int vlsi_open(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ int err = -EAGAIN;
+ char hwname[32];
+
+ if (pci_request_regions(idev->pdev, drivername)) {
+ net_warn_ratelimited("%s: io resource busy\n", __func__);
+ goto errout;
+ }
+ ndev->base_addr = pci_resource_start(idev->pdev,0);
+ ndev->irq = idev->pdev->irq;
+
+ /* under some rare occasions the chip apparently comes up with
+ * IRQ's pending. We better w/c pending IRQ and disable them all
+ */
+
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+ if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
+ drivername, ndev)) {
+ net_warn_ratelimited("%s: couldn't get IRQ: %d\n",
+ __func__, ndev->irq);
+ goto errout_io;
+ }
+
+ if ((err = vlsi_create_hwif(idev)) != 0)
+ goto errout_irq;
+
+ sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
+ idev->irlap = irlap_open(ndev,&idev->qos,hwname);
+ if (!idev->irlap)
+ goto errout_free_ring;
+
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
+
+ idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
+
+ if ((err = vlsi_start_hw(idev)) != 0)
+ goto errout_close_irlap;
+
+ netif_start_queue(ndev);
+
+ net_info_ratelimited("%s: device %s operational\n",
+ __func__, ndev->name);
+
+ return 0;
+
+errout_close_irlap:
+ irlap_close(idev->irlap);
+errout_free_ring:
+ vlsi_destroy_hwif(idev);
+errout_irq:
+ free_irq(ndev->irq,ndev);
+errout_io:
+ pci_release_regions(idev->pdev);
+errout:
+ return err;
+}
+
+static int vlsi_close(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ if (idev->irlap)
+ irlap_close(idev->irlap);
+ idev->irlap = NULL;
+
+ vlsi_stop_hw(idev);
+
+ vlsi_destroy_hwif(idev);
+
+ free_irq(ndev->irq,ndev);
+
+ pci_release_regions(idev->pdev);
+
+ net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name);
+
+ return 0;
+}
+
+static const struct net_device_ops vlsi_netdev_ops = {
+ .ndo_open = vlsi_open,
+ .ndo_stop = vlsi_close,
+ .ndo_start_xmit = vlsi_hard_start_xmit,
+ .ndo_do_ioctl = vlsi_ioctl,
+ .ndo_tx_timeout = vlsi_tx_timeout,
+};
+
+static int vlsi_irda_init(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct pci_dev *pdev = idev->pdev;
+
+ ndev->irq = pdev->irq;
+ ndev->base_addr = pci_resource_start(pdev,0);
+
+ /* PCI busmastering
+ * see include file for details why we need these 2 masks, in this order!
+ */
+
+ if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
+ pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
+ net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n",
+ __func__);
+ return -1;
+ }
+
+ irda_init_max_qos_capabilies(&idev->qos);
+
+ /* the VLSI82C147 does not support 576000! */
+
+ idev->qos.baud_rate.bits = IR_2400 | IR_9600
+ | IR_19200 | IR_38400 | IR_57600 | IR_115200
+ | IR_1152000 | (IR_4000000 << 8);
+
+ idev->qos.min_turn_time.bits = qos_mtt_bits;
+
+ irda_qos_bits_to_value(&idev->qos);
+
+ /* currently no public media definitions for IrDA */
+
+ ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
+ ndev->if_port = IF_PORT_UNKNOWN;
+
+ ndev->netdev_ops = &vlsi_netdev_ops;
+ ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ return 0;
+}
+
+/**************************************************************/
+
+static int
+vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ vlsi_irda_dev_t *idev;
+
+ if (pci_enable_device(pdev))
+ goto out;
+ else
+ pdev->current_state = 0; /* hw must be running now */
+
+ net_info_ratelimited("%s: IrDA PCI controller %s detected\n",
+ drivername, pci_name(pdev));
+
+ if ( !pci_resource_start(pdev,0) ||
+ !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
+ net_err_ratelimited("%s: bar 0 invalid", __func__);
+ goto out_disable;
+ }
+
+ ndev = alloc_irdadev(sizeof(*idev));
+ if (ndev==NULL) {
+ net_err_ratelimited("%s: Unable to allocate device memory.\n",
+ __func__);
+ goto out_disable;
+ }
+
+ idev = netdev_priv(ndev);
+
+ spin_lock_init(&idev->lock);
+ mutex_init(&idev->mtx);
+ mutex_lock(&idev->mtx);
+ idev->pdev = pdev;
+
+ if (vlsi_irda_init(ndev) < 0)
+ goto out_freedev;
+
+ if (register_netdev(ndev) < 0) {
+ net_err_ratelimited("%s: register_netdev failed\n", __func__);
+ goto out_freedev;
+ }
+
+ if (vlsi_proc_root != NULL) {
+ struct proc_dir_entry *ent;
+
+ ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
+ vlsi_proc_root, VLSI_PROC_FOPS, ndev);
+ if (!ent) {
+ net_warn_ratelimited("%s: failed to create proc entry\n",
+ __func__);
+ } else {
+ proc_set_size(ent, 0);
+ }
+ idev->proc_entry = ent;
+ }
+ net_info_ratelimited("%s: registered device %s\n",
+ drivername, ndev->name);
+
+ pci_set_drvdata(pdev, ndev);
+ mutex_unlock(&idev->mtx);
+
+ return 0;
+
+out_freedev:
+ mutex_unlock(&idev->mtx);
+ free_netdev(ndev);
+out_disable:
+ pci_disable_device(pdev);
+out:
+ return -ENODEV;
+}
+
+static void vlsi_irda_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ net_err_ratelimited("%s: lost netdevice?\n", drivername);
+ return;
+ }
+
+ unregister_netdev(ndev);
+
+ idev = netdev_priv(ndev);
+ mutex_lock(&idev->mtx);
+ if (idev->proc_entry) {
+ remove_proc_entry(ndev->name, vlsi_proc_root);
+ idev->proc_entry = NULL;
+ }
+ mutex_unlock(&idev->mtx);
+
+ free_netdev(ndev);
+
+ net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev));
+}
+
+#ifdef CONFIG_PM
+
+/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
+ * Some of the Linux PCI-PM code however depends on this, for example in
+ * pci_set_power_state(). So we have to take care to perform the required
+ * operations on our own (particularly reflecting the pdev->current_state)
+ * otherwise we might get cheated by pci-pm.
+ */
+
+
+static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
+ return 0;
+ }
+ idev = netdev_priv(ndev);
+ mutex_lock(&idev->mtx);
+ if (pdev->current_state != 0) { /* already suspended */
+ if (state.event > pdev->current_state) { /* simply go deeper */
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pdev->current_state = state.event;
+ }
+ else
+ net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n",
+ __func__, pci_name(pdev),
+ pdev->current_state, state.event);
+ mutex_unlock(&idev->mtx);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ vlsi_stop_hw(idev);
+ pci_save_state(pdev);
+ if (!idev->new_baud)
+ /* remember speed settings to restore on resume */
+ idev->new_baud = idev->baud;
+ }
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pdev->current_state = state.event;
+ idev->resume_ok = 1;
+ mutex_unlock(&idev->mtx);
+ return 0;
+}
+
+static int vlsi_irda_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
+ return 0;
+ }
+ idev = netdev_priv(ndev);
+ mutex_lock(&idev->mtx);
+ if (pdev->current_state == 0) {
+ mutex_unlock(&idev->mtx);
+ net_warn_ratelimited("%s - %s: already resumed\n",
+ __func__, pci_name(pdev));
+ return 0;
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pdev->current_state = PM_EVENT_ON;
+
+ if (!idev->resume_ok) {
+ /* should be obsolete now - but used to happen due to:
+ * - pci layer initially setting pdev->current_state = 4 (unknown)
+ * - pci layer did not walk the save_state-tree (might be APM problem)
+ * so we could not refuse to suspend from undefined state
+ * - vlsi_irda_suspend detected invalid state and refused to save
+ * configuration for resume - but was too late to stop suspending
+ * - vlsi_irda_resume got screwed when trying to resume from garbage
+ *
+ * now we explicitly set pdev->current_state = 0 after enabling the
+ * device and independently resume_ok should catch any garbage config.
+ */
+ net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__);
+ mutex_unlock(&idev->mtx);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ pci_restore_state(pdev);
+ vlsi_start_hw(idev);
+ netif_device_attach(ndev);
+ }
+ idev->resume_ok = 0;
+ mutex_unlock(&idev->mtx);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/*********************************************************/
+
+static struct pci_driver vlsi_irda_driver = {
+ .name = drivername,
+ .id_table = vlsi_irda_table,
+ .probe = vlsi_irda_probe,
+ .remove = vlsi_irda_remove,
+#ifdef CONFIG_PM
+ .suspend = vlsi_irda_suspend,
+ .resume = vlsi_irda_resume,
+#endif
+};
+
+#define PROC_DIR ("driver/" DRIVER_NAME)
+
+static int __init vlsi_mod_init(void)
+{
+ int i, ret;
+
+ if (clksrc < 0 || clksrc > 3) {
+ net_err_ratelimited("%s: invalid clksrc=%d\n",
+ drivername, clksrc);
+ return -1;
+ }
+
+ for (i = 0; i < 2; i++) {
+ switch(ringsize[i]) {
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n",
+ drivername,
+ i ? "rx" : "tx",
+ ringsize[i]);
+ ringsize[i] = 8;
+ break;
+ }
+ }
+
+ sirpulse = !!sirpulse;
+
+ /* proc_mkdir returns NULL if !CONFIG_PROC_FS.
+ * Failure to create the procfs entry is handled like running
+ * without procfs - it's not required for the driver to work.
+ */
+ vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
+
+ ret = pci_register_driver(&vlsi_irda_driver);
+
+ if (ret && vlsi_proc_root)
+ remove_proc_entry(PROC_DIR, NULL);
+ return ret;
+
+}
+
+static void __exit vlsi_mod_exit(void)
+{
+ pci_unregister_driver(&vlsi_irda_driver);
+ if (vlsi_proc_root)
+ remove_proc_entry(PROC_DIR, NULL);
+}
+
+module_init(vlsi_mod_init);
+module_exit(vlsi_mod_exit);
diff --git a/drivers/staging/irda/drivers/vlsi_ir.h b/drivers/staging/irda/drivers/vlsi_ir.h
new file mode 100644
index 000000000000..f9db2ce4c5c6
--- /dev/null
+++ b/drivers/staging/irda/drivers/vlsi_ir.h
@@ -0,0 +1,757 @@
+
+/*********************************************************************
+ *
+ * vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
+ *
+ * Version: 0.5
+ *
+ * Copyright (c) 2001-2003 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRDA_VLSI_FIR_H
+#define IRDA_VLSI_FIR_H
+
+/* ================================================================
+ * compatibility stuff
+ */
+
+/* definitions not present in pci_ids.h */
+
+#ifndef PCI_CLASS_WIRELESS_IRDA
+#define PCI_CLASS_WIRELESS_IRDA 0x0d00
+#endif
+
+#ifndef PCI_CLASS_SUBCLASS_MASK
+#define PCI_CLASS_SUBCLASS_MASK 0xffff
+#endif
+
+/* ================================================================ */
+
+/* non-standard PCI registers */
+
+enum vlsi_pci_regs {
+ VLSI_PCI_CLKCTL = 0x40, /* chip clock input control */
+ VLSI_PCI_MSTRPAGE = 0x41, /* addr [31:24] for all busmaster cycles */
+ VLSI_PCI_IRMISC = 0x42 /* mainly legacy UART related */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PCI_CLKCTL: Clock Control Register (u8, rw) */
+
+/* Three possible clock sources: either on-chip 48MHz PLL or
+ * external clock applied to EXTCLK pin. External clock may
+ * be either 48MHz or 40MHz, which is indicated by XCKSEL.
+ * CLKSTP controls whether the selected clock source gets
+ * connected to the IrDA block.
+ *
+ * On my HP OB-800 the BIOS sets external 40MHz clock as source
+ * when IrDA enabled and I've never detected any PLL lock success.
+ * Apparently the 14.3...MHz OSC input required for the PLL to work
+ * is not connected and the 40MHz EXTCLK is provided externally.
+ * At least this is what makes the driver working for me.
+ */
+
+enum vlsi_pci_clkctl {
+
+ /* PLL control */
+
+ CLKCTL_PD_INV = 0x04, /* PD#: inverted power down signal,
+ * i.e. PLL is powered, if PD_INV set */
+ CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
+
+ /* clock source selection */
+
+ CLKCTL_EXTCLK = 0x20, /* set to select external clock input, not PLL */
+ CLKCTL_XCKSEL = 0x10, /* set to indicate EXTCLK is 40MHz, not 48MHz */
+
+ /* IrDA block control */
+
+ CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
+ CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
+ * is detected, PD_INV gets set(?) and CLKSTP cleared */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PCI_MSTRPAGE: Master Page Register (u8, rw) and busmastering stuff */
+
+#define DMA_MASK_USED_BY_HW 0xffffffff
+#define DMA_MASK_MSTRPAGE 0x00ffffff
+#define MSTRPAGE_VALUE (DMA_MASK_MSTRPAGE >> 24)
+
+ /* PCI busmastering is somewhat special for this guy - in short:
+ *
+ * We select to operate using fixed MSTRPAGE=0, use ISA DMA
+ * address restrictions to make the PCI BM api aware of this,
+ * but ensure the hardware is dealing with real 32bit access.
+ *
+ * In detail:
+ * The chip executes normal 32bit busmaster cycles, i.e.
+ * drives all 32 address lines. These addresses however are
+ * composed of [0:23] taken from various busaddr-pointers
+ * and [24:31] taken from the MSTRPAGE register in the VLSI82C147
+ * config space. Therefore _all_ busmastering must be
+ * targeted to/from one single 16MB (busaddr-) superpage!
+ * The point is to make sure all the allocations for memory
+ * locations with busmaster access (ring descriptors, buffers)
+ * are indeed bus-mappable to the same 16MB range (for x86 this
+ * means they must reside in the same 16MB physical memory address
+ * range). The only constraint we have which supports "several objects
+ * mappable to common 16MB range" paradigma, is the old ISA DMA
+ * restriction to the first 16MB of physical address range.
+ * Hence the approach here is to enable PCI busmaster support using
+ * the correct 32bit dma-mask used by the chip. Afterwards the device's
+ * dma-mask gets restricted to 24bit, which must be honoured somehow by
+ * all allocations for memory areas to be exposed to the chip ...
+ *
+ * Note:
+ * Don't be surprised to get "Setting latency timer..." messages every
+ * time when PCI busmastering is enabled for the chip.
+ * The chip has its PCI latency timer RO fixed at 0 - which is not a
+ * problem here, because it is never requesting _burst_ transactions.
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PCIIRMISC: IR Miscellaneous Register (u8, rw) */
+
+/* legacy UART emulation - not used by this driver - would require:
+ * (see below for some register-value definitions)
+ *
+ * - IRMISC_UARTEN must be set to enable UART address decoding
+ * - IRMISC_UARTSEL configured
+ * - IRCFG_MASTER must be cleared
+ * - IRCFG_SIR must be set
+ * - IRENABLE_PHYANDCLOCK must be asserted 0->1 (and hence IRENABLE_SIR_ON)
+ */
+
+enum vlsi_pci_irmisc {
+
+ /* IR transceiver control */
+
+ IRMISC_IRRAIL = 0x40, /* (ro?) IR rail power indication (and control?)
+ * 0=3.3V / 1=5V. Probably set during power-on?
+ * unclear - not touched by driver */
+ IRMISC_IRPD = 0x08, /* transceiver power down, if set */
+
+ /* legacy UART control */
+
+ IRMISC_UARTTST = 0x80, /* UART test mode - "always write 0" */
+ IRMISC_UARTEN = 0x04, /* enable UART address decoding */
+
+ /* bits [1:0] IRMISC_UARTSEL to select legacy UART address */
+
+ IRMISC_UARTSEL_3f8 = 0x00,
+ IRMISC_UARTSEL_2f8 = 0x01,
+ IRMISC_UARTSEL_3e8 = 0x02,
+ IRMISC_UARTSEL_2e8 = 0x03
+};
+
+/* ================================================================ */
+
+/* registers mapped to 32 byte PCI IO space */
+
+/* note: better access all registers at the indicated u8/u16 size
+ * although some of them contain only 1 byte of information.
+ * some of them (particaluarly PROMPT and IRCFG) ignore
+ * access when using the wrong addressing mode!
+ */
+
+enum vlsi_pio_regs {
+ VLSI_PIO_IRINTR = 0x00, /* interrupt enable/request (u8, rw) */
+ VLSI_PIO_RINGPTR = 0x02, /* rx/tx ring pointer (u16, ro) */
+ VLSI_PIO_RINGBASE = 0x04, /* [23:10] of ring address (u16, rw) */
+ VLSI_PIO_RINGSIZE = 0x06, /* rx/tx ring size (u16, rw) */
+ VLSI_PIO_PROMPT = 0x08, /* triggers ring processing (u16, wo) */
+ /* 0x0a-0x0f: reserved / duplicated UART regs */
+ VLSI_PIO_IRCFG = 0x10, /* configuration select (u16, rw) */
+ VLSI_PIO_SIRFLAG = 0x12, /* BOF/EOF for filtered SIR (u16, ro) */
+ VLSI_PIO_IRENABLE = 0x14, /* enable and status register (u16, rw/ro) */
+ VLSI_PIO_PHYCTL = 0x16, /* physical layer current status (u16, ro) */
+ VLSI_PIO_NPHYCTL = 0x18, /* next physical layer select (u16, rw) */
+ VLSI_PIO_MAXPKT = 0x1a, /* [11:0] max len for packet receive (u16, rw) */
+ VLSI_PIO_RCVBCNT = 0x1c /* current receive-FIFO byte count (u16, ro) */
+ /* 0x1e-0x1f: reserved / duplicated UART regs */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRINTR: Interrupt Register (u8, rw) */
+
+/* enable-bits:
+ * 1 = enable / 0 = disable
+ * interrupt condition bits:
+ * set according to corresponding interrupt source
+ * (regardless of the state of the enable bits)
+ * enable bit status indicates whether interrupt gets raised
+ * write-to-clear
+ * note: RPKTINT and TPKTINT behave different in legacy UART mode (which we don't use :-)
+ */
+
+enum vlsi_pio_irintr {
+ IRINTR_ACTEN = 0x80, /* activity interrupt enable */
+ IRINTR_ACTIVITY = 0x40, /* activity monitor (traffic detected) */
+ IRINTR_RPKTEN = 0x20, /* receive packet interrupt enable*/
+ IRINTR_RPKTINT = 0x10, /* rx-packet transferred from fifo to memory finished */
+ IRINTR_TPKTEN = 0x08, /* transmit packet interrupt enable */
+ IRINTR_TPKTINT = 0x04, /* last bit of tx-packet+crc shifted to ir-pulser */
+ IRINTR_OE_EN = 0x02, /* UART rx fifo overrun error interrupt enable */
+ IRINTR_OE_INT = 0x01 /* UART rx fifo overrun error (read LSR to clear) */
+};
+
+/* we use this mask to check whether the (shared PCI) interrupt is ours */
+
+#define IRINTR_INT_MASK (IRINTR_ACTIVITY|IRINTR_RPKTINT|IRINTR_TPKTINT)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGPTR: Ring Pointer Read-Back Register (u16, ro) */
+
+/* _both_ ring pointers are indices relative to the _entire_ rx,tx-ring!
+ * i.e. the referenced descriptor is located
+ * at RINGBASE + PTR * sizeof(descr) for rx and tx
+ * therefore, the tx-pointer has offset MAX_RING_DESCR
+ */
+
+#define MAX_RING_DESCR 64 /* tx, rx rings may contain up to 64 descr each */
+
+#define RINGPTR_RX_MASK (MAX_RING_DESCR-1)
+#define RINGPTR_TX_MASK ((MAX_RING_DESCR-1)<<8)
+
+#define RINGPTR_GET_RX(p) ((p)&RINGPTR_RX_MASK)
+#define RINGPTR_GET_TX(p) (((p)&RINGPTR_TX_MASK)>>8)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGBASE: Ring Pointer Base Address Register (u16, ro) */
+
+/* Contains [23:10] part of the ring base (bus-) address
+ * which must be 1k-alinged. [31:24] is taken from
+ * VLSI_PCI_MSTRPAGE above.
+ * The controller initiates non-burst PCI BM cycles to
+ * fetch and update the descriptors in the ring.
+ * Once fetched, the descriptor remains cached onchip
+ * until it gets closed and updated due to the ring
+ * processing state machine.
+ * The entire ring area is split in rx and tx areas with each
+ * area consisting of 64 descriptors of 8 bytes each.
+ * The rx(tx) ring is located at ringbase+0 (ringbase+64*8).
+ */
+
+#define BUS_TO_RINGBASE(p) (((p)>>10)&0x3fff)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGSIZE: Ring Size Register (u16, rw) */
+
+/* bit mask to indicate the ring size to be used for rx and tx.
+ * possible values encoded bits
+ * 4 0000
+ * 8 0001
+ * 16 0011
+ * 32 0111
+ * 64 1111
+ * located at [15:12] for tx and [11:8] for rx ([7:0] unused)
+ *
+ * note: probably a good idea to have IRCFG_MSTR cleared when writing
+ * this so the state machines are stopped and the RINGPTR is reset!
+ */
+
+#define SIZE_TO_BITS(num) ((((num)-1)>>2)&0x0f)
+#define TX_RX_TO_RINGSIZE(tx,rx) ((SIZE_TO_BITS(tx)<<12)|(SIZE_TO_BITS(rx)<<8))
+#define RINGSIZE_TO_RXSIZE(rs) ((((rs)&0x0f00)>>6)+4)
+#define RINGSIZE_TO_TXSIZE(rs) ((((rs)&0xf000)>>10)+4)
+
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_PROMPT: Ring Prompting Register (u16, write-to-start) */
+
+/* writing any value kicks the ring processing state machines
+ * for both tx, rx rings as follows:
+ * - active rings (currently owning an active descriptor)
+ * ignore the prompt and continue
+ * - idle rings fetch the next descr from the ring and start
+ * their processing
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRCFG: IR Config Register (u16, rw) */
+
+/* notes:
+ * - not more than one SIR/MIR/FIR bit must be set at any time
+ * - SIR, MIR, FIR and CRC16 select the configuration which will
+ * be applied on next 0->1 transition of IRENABLE_PHYANDCLOCK (see below).
+ * - besides allowing the PCI interface to execute busmaster cycles
+ * and therefore the ring SM to operate, the MSTR bit has side-effects:
+ * when MSTR is cleared, the RINGPTR's get reset and the legacy UART mode
+ * (in contrast to busmaster access mode) gets enabled.
+ * - clearing ENRX or setting ENTX while data is received may stall the
+ * receive fifo until ENRX reenabled _and_ another packet arrives
+ * - SIRFILT means the chip performs the required unwrapping of hardware
+ * headers (XBOF's, BOF/EOF) and un-escaping in the _receive_ direction.
+ * Only the resulting IrLAP payload is copied to the receive buffers -
+ * but with the 16bit FCS still encluded. Question remains, whether it
+ * was already checked or we should do it before passing the packet to IrLAP?
+ */
+
+enum vlsi_pio_ircfg {
+ IRCFG_LOOP = 0x4000, /* enable loopback test mode */
+ IRCFG_ENTX = 0x1000, /* transmit enable */
+ IRCFG_ENRX = 0x0800, /* receive enable */
+ IRCFG_MSTR = 0x0400, /* master enable */
+ IRCFG_RXANY = 0x0200, /* receive any packet */
+ IRCFG_CRC16 = 0x0080, /* 16bit (not 32bit) CRC select for MIR/FIR */
+ IRCFG_FIR = 0x0040, /* FIR 4PPM encoding mode enable */
+ IRCFG_MIR = 0x0020, /* MIR HDLC encoding mode enable */
+ IRCFG_SIR = 0x0010, /* SIR encoding mode enable */
+ IRCFG_SIRFILT = 0x0008, /* enable SIR decode filter (receiver unwrapping) */
+ IRCFG_SIRTEST = 0x0004, /* allow SIR decode filter when not in SIR mode */
+ IRCFG_TXPOL = 0x0002, /* invert tx polarity when set */
+ IRCFG_RXPOL = 0x0001 /* invert rx polarity when set */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_SIRFLAG: SIR Flag Register (u16, ro) */
+
+/* register contains hardcoded BOF=0xc0 at [7:0] and EOF=0xc1 at [15:8]
+ * which is used for unwrapping received frames in SIR decode-filter mode
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRENABLE: IR Enable Register (u16, rw/ro) */
+
+/* notes:
+ * - IREN acts as gate for latching the configured IR mode information
+ * from IRCFG and IRPHYCTL when IREN=reset and applying them when
+ * IREN gets set afterwards.
+ * - ENTXST reflects IRCFG_ENTX
+ * - ENRXST = IRCFG_ENRX && (!IRCFG_ENTX || IRCFG_LOOP)
+ */
+
+enum vlsi_pio_irenable {
+ IRENABLE_PHYANDCLOCK = 0x8000, /* enable IR phy and gate the mode config (rw) */
+ IRENABLE_CFGER = 0x4000, /* mode configuration error (ro) */
+ IRENABLE_FIR_ON = 0x2000, /* FIR on status (ro) */
+ IRENABLE_MIR_ON = 0x1000, /* MIR on status (ro) */
+ IRENABLE_SIR_ON = 0x0800, /* SIR on status (ro) */
+ IRENABLE_ENTXST = 0x0400, /* transmit enable status (ro) */
+ IRENABLE_ENRXST = 0x0200, /* Receive enable status (ro) */
+ IRENABLE_CRC16_ON = 0x0100 /* 16bit (not 32bit) CRC enabled status (ro) */
+};
+
+#define IRENABLE_MASK 0xff00 /* Read mask */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_PHYCTL: IR Physical Layer Current Control Register (u16, ro) */
+
+/* read-back of the currently applied physical layer status.
+ * applied from VLSI_PIO_NPHYCTL at rising edge of IRENABLE_PHYANDCLOCK
+ * contents identical to VLSI_PIO_NPHYCTL (see below)
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_NPHYCTL: IR Physical Layer Next Control Register (u16, rw) */
+
+/* latched during IRENABLE_PHYANDCLOCK=0 and applied at 0-1 transition
+ *
+ * consists of BAUD[15:10], PLSWID[9:5] and PREAMB[4:0] bits defined as follows:
+ *
+ * SIR-mode: BAUD = (115.2kHz / baudrate) - 1
+ * PLSWID = (pulsetime * freq / (BAUD+1)) - 1
+ * where pulsetime is the requested IrPHY pulse width
+ * and freq is 8(16)MHz for 40(48)MHz primary input clock
+ * PREAMB: don't care for SIR
+ *
+ * The nominal SIR pulse width is 3/16 bit time so we have PLSWID=12
+ * fixed for all SIR speeds at 40MHz input clock (PLSWID=24 at 48MHz).
+ * IrPHY also allows shorter pulses down to the nominal pulse duration
+ * at 115.2kbaud (minus some tolerance) which is 1.41 usec.
+ * Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by two for 48MHz)
+ * we get the minimum acceptable PLSWID values according to the VLSI
+ * specification, which provides 1.5 usec pulse width for all speeds (except
+ * for 2.4kbaud getting 6usec). This is fine with IrPHY v1.3 specs and
+ * reduces the transceiver power which drains the battery. At 9.6kbaud for
+ * example this amounts to more than 90% battery power saving!
+ *
+ * MIR-mode: BAUD = 0
+ * PLSWID = 9(10) for 40(48) MHz input clock
+ * to get nominal MIR pulse width
+ * PREAMB = 1
+ *
+ * FIR-mode: BAUD = 0
+ * PLSWID: don't care
+ * PREAMB = 15
+ */
+
+#define PHYCTL_BAUD_SHIFT 10
+#define PHYCTL_BAUD_MASK 0xfc00
+#define PHYCTL_PLSWID_SHIFT 5
+#define PHYCTL_PLSWID_MASK 0x03e0
+#define PHYCTL_PREAMB_SHIFT 0
+#define PHYCTL_PREAMB_MASK 0x001f
+
+#define PHYCTL_TO_BAUD(bwp) (((bwp)&PHYCTL_BAUD_MASK)>>PHYCTL_BAUD_SHIFT)
+#define PHYCTL_TO_PLSWID(bwp) (((bwp)&PHYCTL_PLSWID_MASK)>>PHYCTL_PLSWID_SHIFT)
+#define PHYCTL_TO_PREAMB(bwp) (((bwp)&PHYCTL_PREAMB_MASK)>>PHYCTL_PREAMB_SHIFT)
+
+#define BWP_TO_PHYCTL(b,w,p) ((((b)<<PHYCTL_BAUD_SHIFT)&PHYCTL_BAUD_MASK) \
+ | (((w)<<PHYCTL_PLSWID_SHIFT)&PHYCTL_PLSWID_MASK) \
+ | (((p)<<PHYCTL_PREAMB_SHIFT)&PHYCTL_PREAMB_MASK))
+
+#define BAUD_BITS(br) ((115200/(br))-1)
+
+static inline unsigned
+calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
+{
+ unsigned tmp;
+
+ if (widthselect) /* nominal 3/16 puls width */
+ return (clockselect) ? 12 : 24;
+
+ tmp = ((clockselect) ? 12 : 24) / (BAUD_BITS(baudrate)+1);
+
+ /* intermediate result of integer division needed here */
+
+ return (tmp>0) ? (tmp-1) : 0;
+}
+
+#define PHYCTL_SIR(br,ws,cs) BWP_TO_PHYCTL(BAUD_BITS(br),calc_width_bits((br),(ws),(cs)),0)
+#define PHYCTL_MIR(cs) BWP_TO_PHYCTL(0,((cs)?9:10),1)
+#define PHYCTL_FIR BWP_TO_PHYCTL(0,0,15)
+
+/* quite ugly, I know. But implementing these calculations here avoids
+ * having magic numbers in the code and allows some playing with pulsewidths
+ * without risk to violate the standards.
+ * FWIW, here is the table for reference:
+ *
+ * baudrate BAUD min-PLSWID nom-PLSWID PREAMB
+ * 2400 47 0(0) 12(24) 0
+ * 9600 11 0(0) 12(24) 0
+ * 19200 5 1(2) 12(24) 0
+ * 38400 2 3(6) 12(24) 0
+ * 57600 1 5(10) 12(24) 0
+ * 115200 0 11(22) 12(24) 0
+ * MIR 0 - 9(10) 1
+ * FIR 0 - 0 15
+ *
+ * note: x(y) means x-value for 40MHz / y-value for 48MHz primary input clock
+ */
+
+/* ------------------------------------------ */
+
+
+/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
+
+/* maximum acceptable length for received packets */
+
+/* hw imposed limitation - register uses only [11:0] */
+#define MAX_PACKET_LENGTH 0x0fff
+
+/* IrLAP I-field (apparently not defined elsewhere) */
+#define IRDA_MTU 2048
+
+/* complete packet consists of A(1)+C(1)+I(<=IRDA_MTU) */
+#define IRLAP_SKB_ALLOCSIZE (1+1+IRDA_MTU)
+
+/* the buffers we use to exchange frames with the hardware need to be
+ * larger than IRLAP_SKB_ALLOCSIZE because we may have up to 4 bytes FCS
+ * appended and, in SIR mode, a lot of frame wrapping bytes. The worst
+ * case appears to be a SIR packet with I-size==IRDA_MTU and all bytes
+ * requiring to be escaped to provide transparency. Furthermore, the peer
+ * might ask for quite a number of additional XBOFs:
+ * up to 115+48 XBOFS 163
+ * regular BOF 1
+ * A-field 1
+ * C-field 1
+ * I-field, IRDA_MTU, all escaped 4096
+ * FCS (16 bit at SIR, escaped) 4
+ * EOF 1
+ * AFAICS nothing in IrLAP guarantees A/C field not to need escaping
+ * (f.e. 0xc0/0xc1 - i.e. BOF/EOF - are legal values there) so in the
+ * worst case we have 4269 bytes total frame size.
+ * However, the VLSI uses 12 bits only for all buffer length values,
+ * which limits the maximum useable buffer size <= 4095.
+ * Note this is not a limitation in the receive case because we use
+ * the SIR filtering mode where the hw unwraps the frame and only the
+ * bare packet+fcs is stored into the buffer - in contrast to the SIR
+ * tx case where we have to pass frame-wrapped packets to the hw.
+ * If this would ever become an issue in real life, the only workaround
+ * I see would be using the legacy UART emulation in SIR mode.
+ */
+
+#define XFER_BUF_SIZE MAX_PACKET_LENGTH
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RCVBCNT: Receive Byte Count Register (u16, ro) */
+
+/* receive packet counter gets incremented on every non-filtered
+ * byte which was put in the receive fifo and reset for each
+ * new packet. Used to decide whether we are just in the middle
+ * of receiving
+ */
+
+/* better apply the [11:0] mask when reading, as some docs say the
+ * reserved [15:12] would return 1 when reading - which is wrong AFAICS
+ */
+#define RCVBCNT_MASK 0x0fff
+
+/******************************************************************/
+
+/* descriptors for rx/tx ring
+ *
+ * accessed by hardware - don't change!
+ *
+ * the descriptor is owned by hardware, when the ACTIVE status bit
+ * is set and nothing (besides reading status to test the bit)
+ * shall be done. The bit gets cleared by hw, when the descriptor
+ * gets closed. Premature reaping of descriptors owned be the chip
+ * can be achieved by disabling IRCFG_MSTR
+ *
+ * Attention: Writing addr overwrites status!
+ *
+ * ### FIXME: depends on endianess (but there ain't no non-i586 ob800 ;-)
+ */
+
+struct ring_descr_hw {
+ volatile __le16 rd_count; /* tx/rx count [11:0] */
+ __le16 reserved;
+ union {
+ __le32 addr; /* [23:0] of the buffer's busaddress */
+ struct {
+ u8 addr_res[3];
+ volatile u8 status; /* descriptor status */
+ } __packed rd_s;
+ } __packed rd_u;
+} __packed;
+
+#define rd_addr rd_u.addr
+#define rd_status rd_u.rd_s.status
+
+/* ring descriptor status bits */
+
+#define RD_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
+
+/* TX ring descriptor status */
+
+#define RD_TX_DISCRC 0x40 /* do not send CRC (for SIR) */
+#define RD_TX_BADCRC 0x20 /* force a bad CRC */
+#define RD_TX_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
+#define RD_TX_FRCEUND 0x08 /* force underrun */
+#define RD_TX_CLRENTX 0x04 /* clear ENTX after this frame */
+#define RD_TX_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
+
+/* RX ring descriptor status */
+
+#define RD_RX_PHYERR 0x40 /* physical encoding error */
+#define RD_RX_CRCERR 0x20 /* CRC error (MIR/FIR) */
+#define RD_RX_LENGTH 0x10 /* frame exceeds buffer length */
+#define RD_RX_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
+#define RD_RX_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
+
+#define RD_RX_ERROR 0x7c /* any error in received frame */
+
+/* the memory required to hold the 2 descriptor rings */
+#define HW_RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr_hw))
+
+/******************************************************************/
+
+/* sw-ring descriptors consists of a bus-mapped transfer buffer with
+ * associated skb and a pointer to the hw entry descriptor
+ */
+
+struct ring_descr {
+ struct ring_descr_hw *hw;
+ struct sk_buff *skb;
+ void *buf;
+};
+
+/* wrappers for operations on hw-exposed ring descriptors
+ * access to the hw-part of the descriptors must use these.
+ */
+
+static inline int rd_is_active(struct ring_descr *rd)
+{
+ return (rd->hw->rd_status & RD_ACTIVE) != 0;
+}
+
+static inline void rd_activate(struct ring_descr *rd)
+{
+ rd->hw->rd_status |= RD_ACTIVE;
+}
+
+static inline void rd_set_status(struct ring_descr *rd, u8 s)
+{
+ rd->hw->rd_status = s; /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
+{
+ /* order is important for two reasons:
+ * - overlayed: writing addr overwrites status
+ * - we want to write status last so we have valid address in
+ * case status has RD_ACTIVE set
+ */
+
+ if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
+ net_err_ratelimited("%s: pci busaddr inconsistency!\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
+ * to status - just in case MSTRPAGE_VALUE!=0
+ */
+ rd->hw->rd_addr = cpu_to_le32(a);
+ wmb();
+ rd_set_status(rd, s); /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_count(struct ring_descr *rd, u16 c)
+{
+ rd->hw->rd_count = cpu_to_le16(c);
+}
+
+static inline u8 rd_get_status(struct ring_descr *rd)
+{
+ return rd->hw->rd_status;
+}
+
+static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
+{
+ dma_addr_t a;
+
+ a = le32_to_cpu(rd->hw->rd_addr);
+ return (a & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
+}
+
+static inline u16 rd_get_count(struct ring_descr *rd)
+{
+ return le16_to_cpu(rd->hw->rd_count);
+}
+
+/******************************************************************/
+
+/* sw descriptor rings for rx, tx:
+ *
+ * operations follow producer-consumer paradigm, with the hw
+ * in the middle doing the processing.
+ * ring size must be power of two.
+ *
+ * producer advances r->tail after inserting for processing
+ * consumer advances r->head after removing processed rd
+ * ring is empty if head==tail / full if (tail+1)==head
+ */
+
+struct vlsi_ring {
+ struct pci_dev *pdev;
+ int dir;
+ unsigned len;
+ unsigned size;
+ unsigned mask;
+ atomic_t head, tail;
+ struct ring_descr *rd;
+};
+
+/* ring processing helpers */
+
+static inline struct ring_descr *ring_last(struct vlsi_ring *r)
+{
+ int t;
+
+ t = atomic_read(&r->tail) & r->mask;
+ return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
+}
+
+static inline struct ring_descr *ring_put(struct vlsi_ring *r)
+{
+ atomic_inc(&r->tail);
+ return ring_last(r);
+}
+
+static inline struct ring_descr *ring_first(struct vlsi_ring *r)
+{
+ int h;
+
+ h = atomic_read(&r->head) & r->mask;
+ return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
+}
+
+static inline struct ring_descr *ring_get(struct vlsi_ring *r)
+{
+ atomic_inc(&r->head);
+ return ring_first(r);
+}
+
+/******************************************************************/
+
+/* our private compound VLSI-PCI-IRDA device information */
+
+typedef struct vlsi_irda_dev {
+ struct pci_dev *pdev;
+
+ struct irlap_cb *irlap;
+
+ struct qos_info qos;
+
+ unsigned mode;
+ int baud, new_baud;
+
+ dma_addr_t busaddr;
+ void *virtaddr;
+ struct vlsi_ring *tx_ring, *rx_ring;
+
+ ktime_t last_rx;
+
+ spinlock_t lock;
+ struct mutex mtx;
+
+ u8 resume_ok;
+ struct proc_dir_entry *proc_entry;
+
+} vlsi_irda_dev_t;
+
+/********************************************************/
+
+/* the remapped error flags we use for returning from frame
+ * post-processing in vlsi_process_tx/rx() after it was completed
+ * by the hardware. These functions either return the >=0 number
+ * of transferred bytes in case of success or the negative (-)
+ * of the or'ed error flags.
+ */
+
+#define VLSI_TX_DROP 0x0001
+#define VLSI_TX_FIFO 0x0002
+
+#define VLSI_RX_DROP 0x0100
+#define VLSI_RX_OVER 0x0200
+#define VLSI_RX_LENGTH 0x0400
+#define VLSI_RX_FRAME 0x0800
+#define VLSI_RX_CRC 0x1000
+
+/********************************************************/
+
+#endif /* IRDA_VLSI_FIR_H */
+
diff --git a/drivers/staging/irda/drivers/w83977af.h b/drivers/staging/irda/drivers/w83977af.h
new file mode 100644
index 000000000000..04476c2e9121
--- /dev/null
+++ b/drivers/staging/irda/drivers/w83977af.h
@@ -0,0 +1,53 @@
+#ifndef W83977AF_H
+#define W83977AF_H
+
+#define W977_EFIO_BASE 0x370
+#define W977_EFIO2_BASE 0x3f0
+#define W977_DEVICE_IR 0x06
+
+
+/*
+ * Enter extended function mode
+ */
+static inline void w977_efm_enter(unsigned int efio)
+{
+ outb(0x87, efio);
+ outb(0x87, efio);
+}
+
+/*
+ * Select a device to configure
+ */
+
+static inline void w977_select_device(__u8 devnum, unsigned int efio)
+{
+ outb(0x07, efio);
+ outb(devnum, efio+1);
+}
+
+/*
+ * Write a byte to a register
+ */
+static inline void w977_write_reg(__u8 reg, __u8 value, unsigned int efio)
+{
+ outb(reg, efio);
+ outb(value, efio+1);
+}
+
+/*
+ * read a byte from a register
+ */
+static inline __u8 w977_read_reg(__u8 reg, unsigned int efio)
+{
+ outb(reg, efio);
+ return inb(efio+1);
+}
+
+/*
+ * Exit extended function mode
+ */
+static inline void w977_efm_exit(unsigned int efio)
+{
+ outb(0xAA, efio);
+}
+#endif
diff --git a/drivers/staging/irda/drivers/w83977af_ir.c b/drivers/staging/irda/drivers/w83977af_ir.c
new file mode 100644
index 000000000000..282b6c9ae05b
--- /dev/null
+++ b/drivers/staging/irda/drivers/w83977af_ir.c
@@ -0,0 +1,1285 @@
+/*********************************************************************
+ *
+ * Filename: w83977af_ir.c
+ * Version: 1.0
+ * Description: FIR driver for the Winbond W83977AF Super I/O chip
+ * Status: Experimental.
+ * Author: Paul VanderSpek
+ * Created at: Wed Nov 4 11:46:16 1998
+ * Modified at: Fri Jan 28 12:10:59 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999 Rebel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
+ * warranty for any of this software. This material is provided "AS-IS"
+ * and at no charge.
+ *
+ * If you find bugs in this file, its very likely that the same bug
+ * will also be in pc87108.c since the implementations are quite
+ * similar.
+ *
+ * Notice that all functions that needs to access the chip in _any_
+ * way, must save BSR register on entry, and restore it on exit.
+ * It is _very_ important to follow this policy!
+ *
+ * __u8 bank;
+ *
+ * bank = inb( iobase+BSR);
+ *
+ * do_your_stuff_here();
+ *
+ * outb( bank, iobase+BSR);
+ *
+ ********************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include "w83977af.h"
+#include "w83977af_ir.h"
+
+#define CONFIG_USE_W977_PNP /* Currently needed */
+#define PIO_MAX_SPEED 115200
+
+static char *driver_name = "w83977af_ir";
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+#define CHIP_IO_EXTENT 8
+
+static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
+#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
+static unsigned int irq[] = { 6, 0, 0, 0 };
+#else
+static unsigned int irq[] = { 11, 0, 0, 0 };
+#endif
+static unsigned int dma[] = { 1, 0, 0, 0 };
+static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
+static unsigned int efio = W977_EFIO_BASE;
+
+static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
+
+/* Some prototypes */
+static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma);
+static int w83977af_close(struct w83977af_ir *self);
+static int w83977af_probe(int iobase, int irq, int dma);
+static int w83977af_dma_receive(struct w83977af_ir *self);
+static int w83977af_dma_receive_complete(struct w83977af_ir *self);
+static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
+static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
+static int w83977af_is_receiving(struct w83977af_ir *self);
+
+static int w83977af_net_open(struct net_device *dev);
+static int w83977af_net_close(struct net_device *dev);
+static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+/*
+ * Function w83977af_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init w83977af_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
+ if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/*
+ * Function w83977af_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit w83977af_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
+ if (dev_self[i])
+ w83977af_close(dev_self[i]);
+ }
+}
+
+static const struct net_device_ops w83977_netdev_ops = {
+ .ndo_open = w83977af_net_open,
+ .ndo_stop = w83977af_net_close,
+ .ndo_start_xmit = w83977af_hard_xmit,
+ .ndo_do_ioctl = w83977af_net_ioctl,
+};
+
+/*
+ * Function w83977af_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma)
+{
+ struct net_device *dev;
+ struct w83977af_ir *self;
+ int err;
+
+ /* Lock the port that we need */
+ if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
+ pr_debug("%s: can't get iobase of 0x%03x\n",
+ __func__, iobase);
+ return -ENODEV;
+ }
+
+ if (w83977af_probe(iobase, irq, dma) == -1) {
+ err = -1;
+ goto err_out;
+ }
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct w83977af_ir));
+ if (!dev) {
+ pr_err("IrDA: Can't allocate memory for IrDA control block!\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ self = netdev_priv(dev);
+ spin_lock_init(&self->lock);
+
+ /* Initialize IO */
+ self->io.fir_base = iobase;
+ self->io.irq = irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = dma;
+ self->io.fifo_size = 32;
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+
+ /* FIXME: The HP HDLS-1100 does not support 1152000! */
+ self->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | IR_57600 |
+ IR_115200 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
+
+ /* The HP HDLS-1100 needs 1 ms according to the specs */
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 4000;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (!self->rx_buff.head) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (!self->tx_buff.head) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+ self->netdev = dev;
+
+ dev->netdev_ops = &w83977_netdev_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ net_err_ratelimited("%s:, register_netdevice() failed!\n",
+ __func__);
+ goto err_out3;
+ }
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+
+ return 0;
+err_out3:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+err_out2:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+err_out1:
+ free_netdev(dev);
+err_out:
+ release_region(iobase, CHIP_IO_EXTENT);
+ return err;
+}
+
+/*
+ * Function w83977af_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int w83977af_close(struct w83977af_ir *self)
+{
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+#ifdef CONFIG_USE_W977_PNP
+ /* enter PnP configuration mode */
+ w977_efm_enter(efio);
+
+ w977_select_device(W977_DEVICE_IR, efio);
+
+ /* Deactivate device */
+ w977_write_reg(0x30, 0x00, efio);
+
+ w977_efm_exit(efio);
+#endif /* CONFIG_USE_W977_PNP */
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s: Releasing Region %03x\n", __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+static int w83977af_probe(int iobase, int irq, int dma)
+{
+ int version;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+#ifdef CONFIG_USE_W977_PNP
+ /* Enter PnP configuration mode */
+ w977_efm_enter(efbase[i]);
+
+ w977_select_device(W977_DEVICE_IR, efbase[i]);
+
+ /* Configure PnP port, IRQ, and DMA channel */
+ w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
+ w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
+
+ w977_write_reg(0x70, irq, efbase[i]);
+#ifdef CONFIG_ARCH_NETWINDER
+ /* Netwinder uses 1 higher than Linux */
+ w977_write_reg(0x74, dma + 1, efbase[i]);
+#else
+ w977_write_reg(0x74, dma, efbase[i]);
+#endif /* CONFIG_ARCH_NETWINDER */
+ w977_write_reg(0x75, 0x04, efbase[i]);/* Disable Tx DMA */
+
+ /* Set append hardware CRC, enable IR bank selection */
+ w977_write_reg(0xf0, APEDCRC | ENBNKSEL, efbase[i]);
+
+ /* Activate device */
+ w977_write_reg(0x30, 0x01, efbase[i]);
+
+ w977_efm_exit(efbase[i]);
+#endif /* CONFIG_USE_W977_PNP */
+ /* Disable Advanced mode */
+ switch_bank(iobase, SET2);
+ outb(iobase + 2, 0x00);
+
+ /* Turn on UART (global) interrupts */
+ switch_bank(iobase, SET0);
+ outb(HCR_EN_IRQ, iobase + HCR);
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, SET2);
+ outb(inb(iobase + ADCR1) | ADCR1_ADV_SL, iobase + ADCR1);
+
+ /* Set default IR-mode */
+ switch_bank(iobase, SET0);
+ outb(HCR_SIR, iobase + HCR);
+
+ /* Read the Advanced IR ID */
+ switch_bank(iobase, SET3);
+ version = inb(iobase + AUID);
+
+ /* Should be 0x1? */
+ if (0x10 == (version & 0xf0)) {
+ efio = efbase[i];
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL | UFR_TXTL | UFR_TXF_RST | UFR_RXF_RST |
+ UFR_EN_FIFO, iobase + UFR);
+
+ /* Receiver frame length */
+ switch_bank(iobase, SET4);
+ outb(2048 & 0xff, iobase + 6);
+ outb((2048 >> 8) & 0x1f, iobase + 7);
+
+ /*
+ * Init HP HSDL-1100 transceiver.
+ *
+ * Set IRX_MSL since we have 2 * receive paths IRRX,
+ * and IRRXH. Clear IRSL0D since we want IRSL0 * to
+ * be a input pin used for IRRXH
+ *
+ * IRRX pin 37 connected to receiver
+ * IRTX pin 38 connected to transmitter
+ * FIRRX pin 39 connected to receiver (IRSL0)
+ * CIRRX pin 40 connected to pin 37
+ */
+ switch_bank(iobase, SET7);
+ outb(0x40, iobase + 7);
+
+ net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
+ version);
+
+ return 0;
+ } else {
+ /* Try next extented function register address */
+ pr_debug("%s: Wrong chip version\n", __func__);
+ }
+ }
+ return -1;
+}
+
+static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
+{
+ int ir_mode = HCR_SIR;
+ int iobase;
+ __u8 set;
+
+ iobase = self->io.fir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Save current bank */
+ set = inb(iobase + SSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, SET0);
+ outb(0, iobase + ICR);
+
+ /* Select Set 2 */
+ switch_bank(iobase, SET2);
+ outb(0x00, iobase + ABHL);
+
+ switch (speed) {
+ case 9600: outb(0x0c, iobase + ABLL); break;
+ case 19200: outb(0x06, iobase + ABLL); break;
+ case 38400: outb(0x03, iobase + ABLL); break;
+ case 57600: outb(0x02, iobase + ABLL); break;
+ case 115200: outb(0x01, iobase + ABLL); break;
+ case 576000:
+ ir_mode = HCR_MIR_576;
+ pr_debug("%s: handling baud of 576000\n", __func__);
+ break;
+ case 1152000:
+ ir_mode = HCR_MIR_1152;
+ pr_debug("%s: handling baud of 1152000\n", __func__);
+ break;
+ case 4000000:
+ ir_mode = HCR_FIR;
+ pr_debug("%s: handling baud of 4000000\n", __func__);
+ break;
+ default:
+ ir_mode = HCR_FIR;
+ pr_debug("%s: unknown baud rate of %d\n", __func__, speed);
+ break;
+ }
+
+ /* Set speed mode */
+ switch_bank(iobase, SET0);
+ outb(ir_mode, iobase + HCR);
+
+ /* set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
+
+ /* set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(0x00, iobase + UFR); /* Reset */
+ outb(UFR_EN_FIFO, iobase + UFR); /* First we must enable FIFO */
+ outb(0xa7, iobase + UFR);
+
+ netif_wake_queue(self->netdev);
+
+ /* Enable some interrupts so we can receive frames */
+ switch_bank(iobase, SET0);
+ if (speed > PIO_MAX_SPEED) {
+ outb(ICR_EFSFI, iobase + ICR);
+ w83977af_dma_receive(self);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
+
+ /* Restore SSR */
+ outb(set, iobase + SSR);
+}
+
+/*
+ * Function w83977af_hard_xmit (skb, dev)
+ *
+ * Sets up a DMA transfer to send the current frame.
+ *
+ */
+static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ __s32 speed;
+ int iobase;
+ __u8 set;
+ int mtt;
+
+ self = netdev_priv(dev);
+
+ iobase = self->io.fir_base;
+
+ pr_debug("%s: %ld, skb->len=%d\n", __func__, jiffies, (int)skb->len);
+
+ /* Lock transmit buffer */
+ netif_stop_queue(dev);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ w83977af_change_speed(self, speed);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ self->new_speed = speed;
+ }
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Decide if we should use PIO or DMA transfer */
+ if (self->io.speed > PIO_MAX_SPEED) {
+ self->tx_buff.data = self->tx_buff.head;
+ skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
+ self->tx_buff.len = skb->len;
+
+ mtt = irda_get_mtt(skb);
+ pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
+ if (mtt > 1000)
+ mdelay(mtt / 1000);
+ else if (mtt)
+ udelay(mtt);
+
+ /* Enable DMA interrupt */
+ switch_bank(iobase, SET0);
+ outb(ICR_EDMAI, iobase + ICR);
+ w83977af_dma_write(self, iobase);
+ } else {
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ /* Add interrupt on tx low level (will fire immediately) */
+ switch_bank(iobase, SET0);
+ outb(ICR_ETXTHI, iobase + ICR);
+ }
+ dev_kfree_skb(skb);
+
+ /* Restore set register */
+ outb(set, iobase + SSR);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function w83977af_dma_write (self, iobase)
+ *
+ * Send frame using DMA
+ *
+ */
+static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
+{
+ __u8 set;
+
+ pr_debug("%s: len=%d\n", __func__, self->tx_buff.len);
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
+ /* Choose transmit DMA channel */
+ switch_bank(iobase, SET2);
+ outb(ADCR1_D_CHSW | /*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase + ADCR1);
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_MODE_WRITE);
+ self->io.direction = IO_XMIT;
+
+ /* Enable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) | HCR_EN_DMA | HCR_TX_WT, iobase + HCR);
+
+ /* Restore set register */
+ outb(set, iobase + SSR);
+}
+
+/*
+ * Function w83977af_pio_write (iobase, buf, len, fifo_size)
+ *
+ *
+ *
+ */
+static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
+{
+ int actual = 0;
+ __u8 set;
+
+ /* Save current bank */
+ set = inb(iobase + SSR);
+
+ switch_bank(iobase, SET0);
+ if (!(inb_p(iobase + USR) & USR_TSRE)) {
+ pr_debug("%s: warning, FIFO not empty yet!\n", __func__);
+
+ fifo_size -= 17;
+ pr_debug("%s: %d bytes left in tx fifo\n", __func__, fifo_size);
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual++], iobase + TBR);
+ }
+
+ pr_debug("%s: fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
+
+ /* Restore bank */
+ outb(set, iobase + SSR);
+
+ return actual;
+}
+
+/*
+ * Function w83977af_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. So do the necessary things
+ *
+ *
+ */
+static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
+{
+ int iobase;
+ __u8 set;
+
+ pr_debug("%s: %ld\n", __func__, jiffies);
+
+ IRDA_ASSERT(self, return;);
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
+ /* Check for underrun! */
+ if (inb(iobase + AUDR) & AUDR_UNDR) {
+ pr_debug("%s: Transmit underrun!\n", __func__);
+
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+
+ /* Clear bit, by writing 1 to it */
+ outb(AUDR_UNDR, iobase + AUDR);
+ } else {
+ self->netdev->stats.tx_packets++;
+ }
+
+ if (self->new_speed) {
+ w83977af_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Unlock tx_buff and request another frame */
+ /* Tell the network layer, that we want more frames */
+ netif_wake_queue(self->netdev);
+
+ /* Restore set */
+ outb(set, iobase + SSR);
+}
+
+/*
+ * Function w83977af_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int w83977af_dma_receive(struct w83977af_ir *self)
+{
+ int iobase;
+ __u8 set;
+#ifdef CONFIG_ARCH_NETWINDER
+ unsigned long flags;
+ __u8 hcr;
+#endif
+ IRDA_ASSERT(self, return -1;);
+
+ pr_debug("%s\n", __func__);
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
+ /* Choose DMA Rx, DMA Fairness, and Advanced mode */
+ switch_bank(iobase, SET2);
+ outb((inb(iobase + ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL,
+ iobase + ADCR1);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+#ifdef CONFIG_ARCH_NETWINDER
+ spin_lock_irqsave(&self->lock, flags);
+
+ disable_dma(self->io.dma);
+ clear_dma_ff(self->io.dma);
+ set_dma_mode(self->io.dma, DMA_MODE_READ);
+ set_dma_addr(self->io.dma, self->rx_buff_dma);
+ set_dma_count(self->io.dma, self->rx_buff.truesize);
+#else
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_MODE_READ);
+#endif
+ /*
+ * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
+ * important that we don't reset the Tx FIFO since it might not
+ * be finished transmitting yet
+ */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL | UFR_TXTL | UFR_RXF_RST | UFR_EN_FIFO, iobase + UFR);
+ self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
+
+ /* Enable DMA */
+ switch_bank(iobase, SET0);
+#ifdef CONFIG_ARCH_NETWINDER
+ hcr = inb(iobase + HCR);
+ outb(hcr | HCR_EN_DMA, iobase + HCR);
+ enable_dma(self->io.dma);
+ spin_unlock_irqrestore(&self->lock, flags);
+#else
+ outb(inb(iobase + HCR) | HCR_EN_DMA, iobase + HCR);
+#endif
+ /* Restore set */
+ outb(set, iobase + SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_receive_complete (self)
+ *
+ * Finished with receiving a frame
+ *
+ */
+static int w83977af_dma_receive_complete(struct w83977af_ir *self)
+{
+ struct sk_buff *skb;
+ struct st_fifo *st_fifo;
+ int len;
+ int iobase;
+ __u8 set;
+ __u8 status;
+
+ pr_debug("%s\n", __func__);
+
+ st_fifo = &self->st_fifo;
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ iobase = self->io.fir_base;
+
+ /* Read status FIFO */
+ switch_bank(iobase, SET5);
+ while ((status = inb(iobase + FS_FO)) & FS_FO_FSFDR) {
+ st_fifo->entries[st_fifo->tail].status = status;
+
+ st_fifo->entries[st_fifo->tail].len = inb(iobase + RFLFL);
+ st_fifo->entries[st_fifo->tail].len |= inb(iobase + RFLFH) << 8;
+
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+
+ while (st_fifo->len) {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if (status & FS_FO_ERR_MSK) {
+ if (status & FS_FO_LST_FR) {
+ /* Add number of lost frames to stats */
+ self->netdev->stats.rx_errors += len;
+ } else {
+ /* Skip frame */
+ self->netdev->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & FS_FO_MX_LEX)
+ self->netdev->stats.rx_length_errors++;
+
+ if (status & FS_FO_PHY_ERR)
+ self->netdev->stats.rx_frame_errors++;
+
+ if (status & FS_FO_CRC_ERR)
+ self->netdev->stats.rx_crc_errors++;
+ }
+ /* The errors below can be reported in both cases */
+ if (status & FS_FO_RX_OV)
+ self->netdev->stats.rx_fifo_errors++;
+
+ if (status & FS_FO_FSF_OV)
+ self->netdev->stats.rx_fifo_errors++;
+
+ } else {
+ /* Check if we have transferred all data to memory */
+ switch_bank(iobase, SET0);
+ if (inb(iobase + USR) & USR_RDR)
+ udelay(80); /* Should be enough!? */
+
+ skb = dev_alloc_skb(len + 1);
+ if (!skb) {
+ pr_info("%s: memory squeeze, dropping frame\n",
+ __func__);
+ /* Restore set register */
+ outb(set, iobase + SSR);
+
+ return FALSE;
+ }
+
+ /* Align to 20 bytes */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC */
+ if (self->io.speed < 4000000) {
+ skb_put(skb, len - 2);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 2);
+ } else {
+ skb_put(skb, len - 4);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 4);
+ }
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ }
+ }
+ /* Restore set register */
+ outb(set, iobase + SSR);
+
+ return TRUE;
+}
+
+/*
+ * Function pc87108_pio_receive (self)
+ *
+ * Receive all data in receiver FIFO
+ *
+ */
+static void w83977af_pio_receive(struct w83977af_ir *self)
+{
+ __u8 byte = 0x00;
+ int iobase;
+
+ IRDA_ASSERT(self, return;);
+
+ iobase = self->io.fir_base;
+
+ /* Receive all characters in Rx FIFO */
+ do {
+ byte = inb(iobase + RBR);
+ async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
+ byte);
+ } while (inb(iobase + USR) & USR_RDR); /* Data available */
+}
+
+/*
+ * Function w83977af_sir_interrupt (self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
+{
+ int actual;
+ __u8 new_icr = 0;
+ __u8 set;
+ int iobase;
+
+ pr_debug("%s: isr=%#x\n", __func__, isr);
+
+ iobase = self->io.fir_base;
+ /* Transmit FIFO low on data */
+ if (isr & ISR_TXTH_I) {
+ /* Write data left in transmit buffer */
+ actual = w83977af_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
+ self->io.fifo_size);
+
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+
+ self->io.direction = IO_XMIT;
+
+ /* Check if finished */
+ if (self->tx_buff.len > 0) {
+ new_icr |= ICR_ETXTHI;
+ } else {
+ set = inb(iobase + SSR);
+ switch_bank(iobase, SET0);
+ outb(AUDR_SFEND, iobase + AUDR);
+ outb(set, iobase + SSR);
+
+ self->netdev->stats.tx_packets++;
+
+ /* Feed me more packets */
+ netif_wake_queue(self->netdev);
+ new_icr |= ICR_ETBREI;
+ }
+ }
+ /* Check if transmission has completed */
+ if (isr & ISR_TXEMP_I) {
+ /* Check if we need to change the speed? */
+ if (self->new_speed) {
+ pr_debug("%s: Changing speed!\n", __func__);
+ w83977af_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Turn around and get ready to receive some data */
+ self->io.direction = IO_RECV;
+ new_icr |= ICR_ERBRI;
+ }
+
+ /* Rx FIFO threshold or timeout */
+ if (isr & ISR_RXTH_I) {
+ w83977af_pio_receive(self);
+
+ /* Keep receiving */
+ new_icr |= ICR_ERBRI;
+ }
+ return new_icr;
+}
+
+/*
+ * Function pc87108_fir_interrupt (self, eir)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
+{
+ __u8 new_icr = 0;
+ __u8 set;
+ int iobase;
+
+ iobase = self->io.fir_base;
+ set = inb(iobase + SSR);
+
+ /* End of frame detected in FIFO */
+ if (isr & (ISR_FEND_I | ISR_FSF_I)) {
+ if (w83977af_dma_receive_complete(self)) {
+ /* Wait for next status FIFO interrupt */
+ new_icr |= ICR_EFSFI;
+ } else {
+ /* DMA not finished yet */
+
+ /* Set timer value, resolution 1 ms */
+ switch_bank(iobase, SET4);
+ outb(0x01, iobase + TMRL); /* 1 ms */
+ outb(0x00, iobase + TMRH);
+
+ /* Start timer */
+ outb(IR_MSL_EN_TMR, iobase + IR_MSL);
+
+ new_icr |= ICR_ETMRI;
+ }
+ }
+ /* Timer finished */
+ if (isr & ISR_TMR_I) {
+ /* Disable timer */
+ switch_bank(iobase, SET4);
+ outb(0, iobase + IR_MSL);
+
+ /* Clear timer event */
+ /* switch_bank(iobase, SET0); */
+/* outb(ASCR_CTE, iobase+ASCR); */
+
+ /* Check if this is a TX timer interrupt */
+ if (self->io.direction == IO_XMIT) {
+ w83977af_dma_write(self, iobase);
+
+ new_icr |= ICR_EDMAI;
+ } else {
+ /* Check if DMA has now finished */
+ w83977af_dma_receive_complete(self);
+
+ new_icr |= ICR_EFSFI;
+ }
+ }
+ /* Finished with DMA */
+ if (isr & ISR_DMA_I) {
+ w83977af_dma_xmit_complete(self);
+
+ /* Check if there are more frames to be transmitted */
+ /* if (irda_device_txqueue_empty(self)) { */
+
+ /* Prepare for receive
+ *
+ * ** Netwinder Tx DMA likes that we do this anyway **
+ */
+ w83977af_dma_receive(self);
+ new_icr = ICR_EFSFI;
+ /* } */
+ }
+
+ /* Restore set */
+ outb(set, iobase + SSR);
+
+ return new_icr;
+}
+
+/*
+ * Function w83977af_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct w83977af_ir *self;
+ __u8 set, icr, isr;
+ int iobase;
+
+ self = netdev_priv(dev);
+
+ iobase = self->io.fir_base;
+
+ /* Save current bank */
+ set = inb(iobase + SSR);
+ switch_bank(iobase, SET0);
+
+ icr = inb(iobase + ICR);
+ isr = inb(iobase + ISR) & icr; /* Mask out the interesting ones */
+
+ outb(0, iobase + ICR); /* Disable interrupts */
+
+ if (isr) {
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > PIO_MAX_SPEED)
+ icr = w83977af_fir_interrupt(self, isr);
+ else
+ icr = w83977af_sir_interrupt(self, isr);
+ }
+
+ outb(icr, iobase + ICR); /* Restore (new) interrupts */
+ outb(set, iobase + SSR); /* Restore bank register */
+ return IRQ_RETVAL(isr);
+}
+
+/*
+ * Function w83977af_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int w83977af_is_receiving(struct w83977af_ir *self)
+{
+ int status = FALSE;
+ int iobase;
+ __u8 set;
+
+ IRDA_ASSERT(self, return FALSE;);
+
+ if (self->io.speed > 115200) {
+ iobase = self->io.fir_base;
+
+ /* Check if rx FIFO is not empty */
+ set = inb(iobase + SSR);
+ switch_bank(iobase, SET2);
+ if ((inb(iobase + RXFDTH) & 0x3f) != 0) {
+ /* We are receiving something */
+ status = TRUE;
+ }
+ outb(set, iobase + SSR);
+ } else {
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+ }
+
+ return status;
+}
+
+/*
+ * Function w83977af_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int w83977af_net_open(struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ int iobase;
+ char hwname[32];
+ __u8 set;
+
+ IRDA_ASSERT(dev, return -1;);
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Enable some interrupts so we can receive frames again */
+ switch_bank(iobase, SET0);
+ if (self->io.speed > 115200) {
+ outb(ICR_EFSFI, iobase + ICR);
+ w83977af_dma_receive(self);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
+
+ /* Restore bank register */
+ outb(set, iobase + SSR);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /* Give self a hardware name */
+ sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int w83977af_net_close(struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ int iobase;
+ __u8 set;
+
+ IRDA_ASSERT(dev, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ disable_dma(self->io.dma);
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, SET0);
+ outb(0, iobase + ICR);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ /* Restore bank register */
+ outb(set, iobase + SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
+ struct w83977af_ir *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self, return -1;);
+
+ pr_debug("%s: %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ w83977af_change_speed(self, irq->ifr_baudrate);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = w83977af_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+out:
+ spin_unlock_irqrestore(&self->lock, flags);
+ return ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+
+/*
+ * Function init_module (void)
+ *
+ *
+ *
+ */
+module_init(w83977af_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ *
+ *
+ */
+module_exit(w83977af_cleanup);
diff --git a/drivers/staging/irda/drivers/w83977af_ir.h b/drivers/staging/irda/drivers/w83977af_ir.h
new file mode 100644
index 000000000000..fefe9b11e200
--- /dev/null
+++ b/drivers/staging/irda/drivers/w83977af_ir.h
@@ -0,0 +1,198 @@
+/*********************************************************************
+ *
+ * Filename: w83977af_ir.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Paul VanderSpek
+ * Created at: Thu Nov 19 13:55:34 1998
+ * Modified at: Tue Jan 11 13:08:19 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef W83977AF_IR_H
+#define W83977AF_IR_H
+
+#include <asm/io.h>
+#include <linux/types.h>
+
+/* Flags for configuration register CRF0 */
+#define ENBNKSEL 0x01
+#define APEDCRC 0x02
+#define TXW4C 0x04
+#define RXW4C 0x08
+
+/* Bank 0 */
+#define RBR 0x00 /* Receiver buffer register */
+#define TBR 0x00 /* Transmitter buffer register */
+
+#define ICR 0x01 /* Interrupt configuration register */
+#define ICR_ERBRI 0x01 /* Receiver buffer register interrupt */
+#define ICR_ETBREI 0x02 /* Transeiver empty interrupt */
+#define ICR_EUSRI 0x04//* IR status interrupt */
+#define ICR_EHSRI 0x04
+#define ICR_ETXURI 0x04 /* Tx underrun */
+#define ICR_EDMAI 0x10 /* DMA interrupt */
+#define ICR_ETXTHI 0x20 /* Transmitter threshold interrupt */
+#define ICR_EFSFI 0x40 /* Frame status FIFO interrupt */
+#define ICR_ETMRI 0x80 /* Timer interrupt */
+
+#define UFR 0x02 /* FIFO control register */
+#define UFR_EN_FIFO 0x01 /* Enable FIFO's */
+#define UFR_RXF_RST 0x02 /* Reset Rx FIFO */
+#define UFR_TXF_RST 0x04 /* Reset Tx FIFO */
+#define UFR_RXTL 0x80 /* Rx FIFO threshold (set to 16) */
+#define UFR_TXTL 0x20 /* Tx FIFO threshold (set to 17) */
+
+#define ISR 0x02 /* Interrupt status register */
+#define ISR_RXTH_I 0x01 /* Receive threshold interrupt */
+#define ISR_TXEMP_I 0x02 /* Transmitter empty interrupt */
+#define ISR_FEND_I 0x04
+#define ISR_DMA_I 0x10
+#define ISR_TXTH_I 0x20 /* Transmitter threshold interrupt */
+#define ISR_FSF_I 0x40
+#define ISR_TMR_I 0x80 /* Timer interrupt */
+
+#define UCR 0x03 /* Uart control register */
+#define UCR_DLS8 0x03 /* 8N1 */
+
+#define SSR 0x03 /* Sets select register */
+#define SET0 UCR_DLS8 /* Make sure we keep 8N1 */
+#define SET1 (0x80|UCR_DLS8) /* Make sure we keep 8N1 */
+#define SET2 0xE0
+#define SET3 0xE4
+#define SET4 0xE8
+#define SET5 0xEC
+#define SET6 0xF0
+#define SET7 0xF4
+
+#define HCR 0x04
+#define HCR_MODE_MASK ~(0xD0)
+#define HCR_SIR 0x60
+#define HCR_MIR_576 0x20
+#define HCR_MIR_1152 0x80
+#define HCR_FIR 0xA0
+#define HCR_EN_DMA 0x04
+#define HCR_EN_IRQ 0x08
+#define HCR_TX_WT 0x08
+
+#define USR 0x05 /* IR status register */
+#define USR_RDR 0x01 /* Receive data ready */
+#define USR_TSRE 0x40 /* Transmitter empty? */
+
+#define AUDR 0x07
+#define AUDR_SFEND 0x08 /* Set a frame end */
+#define AUDR_RXBSY 0x20 /* Rx busy */
+#define AUDR_UNDR 0x40 /* Transeiver underrun */
+
+/* Set 2 */
+#define ABLL 0x00 /* Advanced baud rate divisor latch (low byte) */
+#define ABHL 0x01 /* Advanced baud rate divisor latch (high byte) */
+
+#define ADCR1 0x02
+#define ADCR1_ADV_SL 0x01
+#define ADCR1_D_CHSW 0x08 /* the specs are wrong. its bit 3, not 4 */
+#define ADCR1_DMA_F 0x02
+
+#define ADCR2 0x04
+#define ADCR2_TXFS32 0x01
+#define ADCR2_RXFS32 0x04
+
+#define RXFDTH 0x07
+
+/* Set 3 */
+#define AUID 0x00
+
+/* Set 4 */
+#define TMRL 0x00 /* Timer value register (low byte) */
+#define TMRH 0x01 /* Timer value register (high byte) */
+
+#define IR_MSL 0x02 /* Infrared mode select */
+#define IR_MSL_EN_TMR 0x01 /* Enable timer */
+
+#define TFRLL 0x04 /* Transmitter frame length (low byte) */
+#define TFRLH 0x05 /* Transmitter frame length (high byte) */
+#define RFRLL 0x06 /* Receiver frame length (low byte) */
+#define RFRLH 0x07 /* Receiver frame length (high byte) */
+
+/* Set 5 */
+
+#define FS_FO 0x05 /* Frame status FIFO */
+#define FS_FO_FSFDR 0x80 /* Frame status FIFO data ready */
+#define FS_FO_LST_FR 0x40 /* Frame lost */
+#define FS_FO_MX_LEX 0x10 /* Max frame len exceeded */
+#define FS_FO_PHY_ERR 0x08 /* Physical layer error */
+#define FS_FO_CRC_ERR 0x04
+#define FS_FO_RX_OV 0x02 /* Receive overrun */
+#define FS_FO_FSF_OV 0x01 /* Frame status FIFO overrun */
+#define FS_FO_ERR_MSK 0x5f /* Error mask */
+
+#define RFLFL 0x06
+#define RFLFH 0x07
+
+/* Set 6 */
+#define IR_CFG2 0x00
+#define IR_CFG2_DIS_CRC 0x02
+
+/* Set 7 */
+#define IRM_CR 0x07 /* Infrared module control register */
+#define IRM_CR_IRX_MSL 0x40
+#define IRM_CR_AF_MNT 0x80 /* Automatic format */
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[10];
+ int head;
+ int tail;
+ int len;
+};
+
+/* Private data for each instance */
+struct w83977af_ir {
+ struct st_fifo st_fifo;
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ /* Note : currently locking is *very* incomplete, but this
+ * will get you started. Check in nsc-ircc.c for a proper
+ * locking strategy. - Jean II */
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+};
+
+static inline void switch_bank( int iobase, int set)
+{
+ outb(set, iobase+SSR);
+}
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/af_irda.h b/drivers/staging/irda/include/net/irda/af_irda.h
new file mode 100644
index 000000000000..0df574931522
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/af_irda.h
@@ -0,0 +1,87 @@
+/*********************************************************************
+ *
+ * Filename: af_irda.h
+ * Version: 1.0
+ * Description: IrDA sockets declarations
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:13:12 1997
+ * Modified at: Fri Jan 28 13:16:32 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef AF_IRDA_H
+#define AF_IRDA_H
+
+#include <linux/irda.h>
+#include <net/irda/irda.h>
+#include <net/irda/iriap.h> /* struct iriap_cb */
+#include <net/irda/irias_object.h> /* struct ias_value */
+#include <net/irda/irlmp.h> /* struct lsap_cb */
+#include <net/irda/irttp.h> /* struct tsap_cb */
+#include <net/irda/discovery.h> /* struct discovery_t */
+#include <net/sock.h>
+
+/* IrDA Socket */
+struct irda_sock {
+ /* struct sock has to be the first member of irda_sock */
+ struct sock sk;
+ __u32 saddr; /* my local address */
+ __u32 daddr; /* peer address */
+
+ struct lsap_cb *lsap; /* LSAP used by Ultra */
+ __u8 pid; /* Protocol IP (PID) used by Ultra */
+
+ struct tsap_cb *tsap; /* TSAP used by this connection */
+ __u8 dtsap_sel; /* remote TSAP address */
+ __u8 stsap_sel; /* local TSAP address */
+
+ __u32 max_sdu_size_rx;
+ __u32 max_sdu_size_tx;
+ __u32 max_data_size;
+ __u8 max_header_size;
+ struct qos_info qos_tx;
+
+ __u16_host_order mask; /* Hint bits mask */
+ __u16_host_order hints; /* Hint bits */
+
+ void *ckey; /* IrLMP client handle */
+ void *skey; /* IrLMP service handle */
+
+ struct ias_object *ias_obj; /* Our service name + lsap in IAS */
+ struct iriap_cb *iriap; /* Used to query remote IAS */
+ struct ias_value *ias_result; /* Result of remote IAS query */
+
+ hashbin_t *cachelog; /* Result of discovery query */
+ __u32 cachedaddr; /* Result of selective discovery query */
+
+ int nslots; /* Number of slots to use for discovery */
+
+ int errno; /* status of the IAS query */
+
+ wait_queue_head_t query_wait; /* Wait for the answer to a query */
+ struct timer_list watchdog; /* Timeout for discovery */
+
+ LOCAL_FLOW tx_flow;
+ LOCAL_FLOW rx_flow;
+};
+
+static inline struct irda_sock *irda_sk(struct sock *sk)
+{
+ return (struct irda_sock *)sk;
+}
+
+#endif /* AF_IRDA_H */
diff --git a/drivers/staging/irda/include/net/irda/crc.h b/drivers/staging/irda/include/net/irda/crc.h
new file mode 100644
index 000000000000..f202296df9bb
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/crc.h
@@ -0,0 +1,29 @@
+/*********************************************************************
+ *
+ * Filename: crc.h
+ * Version:
+ * Description: CRC routines
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Sun May 2 20:25:23 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ ********************************************************************/
+
+#ifndef IRDA_CRC_H
+#define IRDA_CRC_H
+
+#include <linux/types.h>
+#include <linux/crc-ccitt.h>
+
+#define INIT_FCS 0xffff /* Initial FCS value */
+#define GOOD_FCS 0xf0b8 /* Good final FCS value */
+
+/* Recompute the FCS with one more character appended. */
+#define irda_fcs(fcs, c) crc_ccitt_byte(fcs, c)
+
+/* Recompute the FCS with len bytes appended. */
+#define irda_calc_crc16(fcs, buf, len) crc_ccitt(fcs, buf, len)
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/discovery.h b/drivers/staging/irda/include/net/irda/discovery.h
new file mode 100644
index 000000000000..63ae32530567
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/discovery.h
@@ -0,0 +1,95 @@
+/*********************************************************************
+ *
+ * Filename: discovery.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Apr 6 16:53:53 1999
+ * Modified at: Tue Oct 5 10:05:10 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef DISCOVERY_H
+#define DISCOVERY_H
+
+#include <asm/param.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h> /* irda_queue_t */
+#include <net/irda/irlap_event.h> /* LAP_REASON */
+
+#define DISCOVERY_EXPIRE_TIMEOUT (2*sysctl_discovery_timeout*HZ)
+#define DISCOVERY_DEFAULT_SLOTS 0
+
+/*
+ * This type is used by the protocols that transmit 16 bits words in
+ * little endian format. A little endian machine stores MSB of word in
+ * byte[1] and LSB in byte[0]. A big endian machine stores MSB in byte[0]
+ * and LSB in byte[1].
+ *
+ * This structure is used in the code for things that are endian neutral
+ * but that fit in a word so that we can manipulate them efficiently.
+ * By endian neutral, I mean things that are really an array of bytes,
+ * and always used as such, for example the hint bits. Jean II
+ */
+typedef union {
+ __u16 word;
+ __u8 byte[2];
+} __u16_host_order;
+
+/* Types of discovery */
+typedef enum {
+ DISCOVERY_LOG, /* What's in our discovery log */
+ DISCOVERY_ACTIVE, /* Doing our own discovery on the medium */
+ DISCOVERY_PASSIVE, /* Peer doing discovery on the medium */
+ EXPIRY_TIMEOUT, /* Entry expired due to timeout */
+} DISCOVERY_MODE;
+
+#define NICKNAME_MAX_LEN 21
+
+/* Basic discovery information about a peer */
+typedef struct irda_device_info discinfo_t; /* linux/irda.h */
+
+/*
+ * The DISCOVERY structure is used for both discovery requests and responses
+ */
+typedef struct discovery_t {
+ irda_queue_t q; /* Must be first! */
+
+ discinfo_t data; /* Basic discovery information */
+ int name_len; /* Length of nickname */
+
+ LAP_REASON condition; /* More info about the discovery */
+ int gen_addr_bit; /* Need to generate a new device
+ * address? */
+ int nslots; /* Number of slots to use when
+ * discovering */
+ unsigned long timestamp; /* Last time discovered */
+ unsigned long firststamp; /* First time discovered */
+} discovery_t;
+
+void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *discovery);
+void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log);
+void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force);
+struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
+ __u16 mask, int old_entries);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/ircomm_core.h b/drivers/staging/irda/include/net/irda/ircomm_core.h
new file mode 100644
index 000000000000..2a580ce9edad
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_core.h
@@ -0,0 +1,106 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_core.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 08:58:43 1999
+ * Modified at: Mon Dec 13 11:52:29 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_CORE_H
+#define IRCOMM_CORE_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h>
+#include <net/irda/ircomm_event.h>
+
+#define IRCOMM_MAGIC 0x98347298
+#define IRCOMM_HEADER_SIZE 1
+
+struct ircomm_cb; /* Forward decl. */
+
+/*
+ * A small call-table, so we don't have to check the service-type whenever
+ * we want to do something
+ */
+typedef struct {
+ int (*data_request)(struct ircomm_cb *, struct sk_buff *, int clen);
+ int (*connect_request)(struct ircomm_cb *, struct sk_buff *,
+ struct ircomm_info *);
+ int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
+ int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
+ struct ircomm_info *);
+} call_t;
+
+struct ircomm_cb {
+ irda_queue_t queue;
+ magic_t magic;
+
+ notify_t notify;
+ call_t issue;
+
+ int state;
+ int line; /* Which TTY line we are using */
+
+ struct tsap_cb *tsap;
+ struct lsap_cb *lsap;
+
+ __u8 dlsap_sel; /* Destination LSAP/TSAP selector */
+ __u8 slsap_sel; /* Source LSAP/TSAP selector */
+
+ __u32 saddr; /* Source device address (link we are using) */
+ __u32 daddr; /* Destination device address */
+
+ int max_header_size; /* Header space we must reserve for each frame */
+ int max_data_size; /* The amount of data we can fill in each frame */
+
+ LOCAL_FLOW flow_status; /* Used by ircomm_lmp */
+ int pkt_count; /* Number of frames we have sent to IrLAP */
+
+ __u8 service_type;
+};
+
+extern hashbin_t *ircomm;
+
+struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line);
+int ircomm_close(struct ircomm_cb *self);
+
+int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb);
+void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb);
+void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb);
+int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb);
+int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr, struct sk_buff *skb,
+ __u8 service_type);
+void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info);
+void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info);
+int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata);
+int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata);
+void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info);
+void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow);
+
+#define ircomm_is_connected(self) (self->state == IRCOMM_CONN)
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/ircomm_event.h b/drivers/staging/irda/include/net/irda/ircomm_event.h
new file mode 100644
index 000000000000..5bbc32998d57
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_event.h
@@ -0,0 +1,83 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_event.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 23:51:13 1999
+ * Modified at: Thu Jun 10 08:36:25 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_EVENT_H
+#define IRCOMM_EVENT_H
+
+#include <net/irda/irmod.h>
+
+typedef enum {
+ IRCOMM_IDLE,
+ IRCOMM_WAITI,
+ IRCOMM_WAITR,
+ IRCOMM_CONN,
+} IRCOMM_STATE;
+
+/* IrCOMM Events */
+typedef enum {
+ IRCOMM_CONNECT_REQUEST,
+ IRCOMM_CONNECT_RESPONSE,
+ IRCOMM_TTP_CONNECT_INDICATION,
+ IRCOMM_LMP_CONNECT_INDICATION,
+ IRCOMM_TTP_CONNECT_CONFIRM,
+ IRCOMM_LMP_CONNECT_CONFIRM,
+
+ IRCOMM_LMP_DISCONNECT_INDICATION,
+ IRCOMM_TTP_DISCONNECT_INDICATION,
+ IRCOMM_DISCONNECT_REQUEST,
+
+ IRCOMM_TTP_DATA_INDICATION,
+ IRCOMM_LMP_DATA_INDICATION,
+ IRCOMM_DATA_REQUEST,
+ IRCOMM_CONTROL_REQUEST,
+ IRCOMM_CONTROL_INDICATION,
+} IRCOMM_EVENT;
+
+/*
+ * Used for passing information through the state-machine
+ */
+struct ircomm_info {
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ __u8 dlsap_sel;
+ LM_REASON reason; /* Reason for disconnect */
+ __u32 max_data_size;
+ __u32 max_header_size;
+
+ struct qos_info *qos;
+};
+
+extern const char *const ircomm_state[];
+
+struct ircomm_cb; /* Forward decl. */
+
+int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info);
+void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/ircomm_lmp.h b/drivers/staging/irda/include/net/irda/ircomm_lmp.h
new file mode 100644
index 000000000000..5042a5021a04
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_lmp.h
@@ -0,0 +1,36 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_lmp.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 10:06:07 1999
+ * Modified at: Fri Aug 13 07:32:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_LMP_H
+#define IRCOMM_LMP_H
+
+#include <net/irda/ircomm_core.h>
+
+int ircomm_open_lsap(struct ircomm_cb *self);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/ircomm_param.h b/drivers/staging/irda/include/net/irda/ircomm_param.h
new file mode 100644
index 000000000000..1f67432321c4
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_param.h
@@ -0,0 +1,147 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_param.h
+ * Version: 1.0
+ * Description: Parameter handling for the IrCOMM protocol
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Jun 7 08:47:28 1999
+ * Modified at: Wed Aug 25 13:46:33 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_PARAMS_H
+#define IRCOMM_PARAMS_H
+
+#include <net/irda/parameters.h>
+
+/* Parameters common to all service types */
+#define IRCOMM_SERVICE_TYPE 0x00
+#define IRCOMM_PORT_TYPE 0x01 /* Only used in LM-IAS */
+#define IRCOMM_PORT_NAME 0x02 /* Only used in LM-IAS */
+
+/* Parameters for both 3 wire and 9 wire */
+#define IRCOMM_DATA_RATE 0x10
+#define IRCOMM_DATA_FORMAT 0x11
+#define IRCOMM_FLOW_CONTROL 0x12
+#define IRCOMM_XON_XOFF 0x13
+#define IRCOMM_ENQ_ACK 0x14
+#define IRCOMM_LINE_STATUS 0x15
+#define IRCOMM_BREAK 0x16
+
+/* Parameters for 9 wire */
+#define IRCOMM_DTE 0x20
+#define IRCOMM_DCE 0x21
+#define IRCOMM_POLL 0x22
+
+/* Service type (details) */
+#define IRCOMM_3_WIRE_RAW 0x01
+#define IRCOMM_3_WIRE 0x02
+#define IRCOMM_9_WIRE 0x04
+#define IRCOMM_CENTRONICS 0x08
+
+/* Port type (details) */
+#define IRCOMM_SERIAL 0x00
+#define IRCOMM_PARALLEL 0x01
+
+/* Data format (details) */
+#define IRCOMM_WSIZE_5 0x00
+#define IRCOMM_WSIZE_6 0x01
+#define IRCOMM_WSIZE_7 0x02
+#define IRCOMM_WSIZE_8 0x03
+
+#define IRCOMM_1_STOP_BIT 0x00
+#define IRCOMM_2_STOP_BIT 0x04 /* 1.5 if char len 5 */
+
+#define IRCOMM_PARITY_DISABLE 0x00
+#define IRCOMM_PARITY_ENABLE 0x08
+
+#define IRCOMM_PARITY_ODD 0x00
+#define IRCOMM_PARITY_EVEN 0x10
+#define IRCOMM_PARITY_MARK 0x20
+#define IRCOMM_PARITY_SPACE 0x30
+
+/* Flow control */
+#define IRCOMM_XON_XOFF_IN 0x01
+#define IRCOMM_XON_XOFF_OUT 0x02
+#define IRCOMM_RTS_CTS_IN 0x04
+#define IRCOMM_RTS_CTS_OUT 0x08
+#define IRCOMM_DSR_DTR_IN 0x10
+#define IRCOMM_DSR_DTR_OUT 0x20
+#define IRCOMM_ENQ_ACK_IN 0x40
+#define IRCOMM_ENQ_ACK_OUT 0x80
+
+/* Line status */
+#define IRCOMM_OVERRUN_ERROR 0x02
+#define IRCOMM_PARITY_ERROR 0x04
+#define IRCOMM_FRAMING_ERROR 0x08
+
+/* DTE (Data terminal equipment) line settings */
+#define IRCOMM_DELTA_DTR 0x01
+#define IRCOMM_DELTA_RTS 0x02
+#define IRCOMM_DTR 0x04
+#define IRCOMM_RTS 0x08
+
+/* DCE (Data communications equipment) line settings */
+#define IRCOMM_DELTA_CTS 0x01 /* Clear to send has changed */
+#define IRCOMM_DELTA_DSR 0x02 /* Data set ready has changed */
+#define IRCOMM_DELTA_RI 0x04 /* Ring indicator has changed */
+#define IRCOMM_DELTA_CD 0x08 /* Carrier detect has changed */
+#define IRCOMM_CTS 0x10 /* Clear to send is high */
+#define IRCOMM_DSR 0x20 /* Data set ready is high */
+#define IRCOMM_RI 0x40 /* Ring indicator is high */
+#define IRCOMM_CD 0x80 /* Carrier detect is high */
+#define IRCOMM_DCE_DELTA_ANY 0x0f
+
+/*
+ * Parameter state
+ */
+struct ircomm_params {
+ /* General control params */
+ __u8 service_type;
+ __u8 port_type;
+ char port_name[32];
+
+ /* Control params for 3- and 9-wire service type */
+ __u32 data_rate; /* Data rate in bps */
+ __u8 data_format;
+ __u8 flow_control;
+ char xonxoff[2];
+ char enqack[2];
+ __u8 line_status;
+ __u8 _break;
+
+ __u8 null_modem;
+
+ /* Control params for 9-wire service type */
+ __u8 dte;
+ __u8 dce;
+ __u8 poll;
+
+ /* Control params for Centronics service type */
+};
+
+struct ircomm_tty_cb; /* Forward decl. */
+
+int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush);
+
+extern pi_param_info_t ircomm_param_info;
+
+#endif /* IRCOMM_PARAMS_H */
+
diff --git a/drivers/staging/irda/include/net/irda/ircomm_ttp.h b/drivers/staging/irda/include/net/irda/ircomm_ttp.h
new file mode 100644
index 000000000000..c5627288bca3
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_ttp.h
@@ -0,0 +1,37 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_ttp.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 10:06:07 1999
+ * Modified at: Fri Aug 13 07:32:22 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_TTP_H
+#define IRCOMM_TTP_H
+
+#include <net/irda/ircomm_core.h>
+
+int ircomm_open_tsap(struct ircomm_cb *self);
+
+#endif
+
diff --git a/drivers/staging/irda/include/net/irda/ircomm_tty.h b/drivers/staging/irda/include/net/irda/ircomm_tty.h
new file mode 100644
index 000000000000..8d4f588974bc
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_tty.h
@@ -0,0 +1,121 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 23:24:22 1999
+ * Modified at: Fri Jan 28 13:16:57 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_TTY_H
+#define IRCOMM_TTY_H
+
+#include <linux/serial.h>
+#include <linux/termios.h>
+#include <linux/timer.h>
+#include <linux/tty.h> /* struct tty_struct */
+
+#include <net/irda/irias_object.h>
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_param.h>
+
+#define IRCOMM_TTY_PORTS 32
+#define IRCOMM_TTY_MAGIC 0x3432
+#define IRCOMM_TTY_MAJOR 161
+#define IRCOMM_TTY_MINOR 0
+
+/* This is used as an initial value to max_header_size before the proper
+ * value is filled in (5 for ttp, 4 for lmp). This allow us to detect
+ * the state of the underlying connection. - Jean II */
+#define IRCOMM_TTY_HDR_UNINITIALISED 16
+/* Same for payload size. See qos.c for the smallest max data size */
+#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
+
+/*
+ * IrCOMM TTY driver state
+ */
+struct ircomm_tty_cb {
+ irda_queue_t queue; /* Must be first */
+ struct tty_port port;
+ magic_t magic;
+
+ int state; /* Connect state */
+
+ struct ircomm_cb *ircomm; /* IrCOMM layer instance */
+
+ struct sk_buff *tx_skb; /* Transmit buffer */
+ struct sk_buff *ctrl_skb; /* Control data buffer */
+
+ /* Parameters */
+ struct ircomm_params settings;
+
+ __u8 service_type; /* The service that we support */
+ int client; /* True if we are a client */
+ LOCAL_FLOW flow; /* IrTTP flow status */
+
+ int line;
+
+ __u8 dlsap_sel;
+ __u8 slsap_sel;
+
+ __u32 saddr;
+ __u32 daddr;
+
+ __u32 max_data_size; /* Max data we can transmit in one packet */
+ __u32 max_header_size; /* The amount of header space we must reserve */
+ __u32 tx_data_size; /* Max data size of current tx_skb */
+
+ struct iriap_cb *iriap; /* Instance used for querying remote IAS */
+ struct ias_object* obj;
+ void *skey;
+ void *ckey;
+
+ struct timer_list watchdog_timer;
+ struct work_struct tqueue;
+
+ /* Protect concurent access to :
+ * o self->ctrl_skb
+ * o self->tx_skb
+ * Maybe other things may gain to be protected as well...
+ * Jean II */
+ spinlock_t spinlock;
+};
+
+void ircomm_tty_start(struct tty_struct *tty);
+void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
+
+int ircomm_tty_tiocmget(struct tty_struct *tty);
+int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear);
+int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+void ircomm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios);
+
+#endif
+
+
+
+
+
+
+
diff --git a/drivers/staging/irda/include/net/irda/ircomm_tty_attach.h b/drivers/staging/irda/include/net/irda/ircomm_tty_attach.h
new file mode 100644
index 000000000000..20dcbdf258cf
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/ircomm_tty_attach.h
@@ -0,0 +1,92 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty_attach.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Jun 9 15:55:18 1999
+ * Modified at: Fri Dec 10 21:04:55 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRCOMM_TTY_ATTACH_H
+#define IRCOMM_TTY_ATTACH_H
+
+#include <net/irda/ircomm_tty.h>
+
+typedef enum {
+ IRCOMM_TTY_IDLE,
+ IRCOMM_TTY_SEARCH,
+ IRCOMM_TTY_QUERY_PARAMETERS,
+ IRCOMM_TTY_QUERY_LSAP_SEL,
+ IRCOMM_TTY_SETUP,
+ IRCOMM_TTY_READY,
+} IRCOMM_TTY_STATE;
+
+/* IrCOMM TTY Events */
+typedef enum {
+ IRCOMM_TTY_ATTACH_CABLE,
+ IRCOMM_TTY_DETACH_CABLE,
+ IRCOMM_TTY_DATA_REQUEST,
+ IRCOMM_TTY_DATA_INDICATION,
+ IRCOMM_TTY_DISCOVERY_REQUEST,
+ IRCOMM_TTY_DISCOVERY_INDICATION,
+ IRCOMM_TTY_CONNECT_CONFIRM,
+ IRCOMM_TTY_CONNECT_INDICATION,
+ IRCOMM_TTY_DISCONNECT_REQUEST,
+ IRCOMM_TTY_DISCONNECT_INDICATION,
+ IRCOMM_TTY_WD_TIMER_EXPIRED,
+ IRCOMM_TTY_GOT_PARAMETERS,
+ IRCOMM_TTY_GOT_LSAPSEL,
+} IRCOMM_TTY_EVENT;
+
+/* Used for passing information through the state-machine */
+struct ircomm_tty_info {
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ __u8 dlsap_sel;
+};
+
+extern const char *const ircomm_state[];
+extern const char *const ircomm_tty_state[];
+
+int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb, struct ircomm_tty_info *info);
+
+
+int ircomm_tty_attach_cable(struct ircomm_tty_cb *self);
+void ircomm_tty_detach_cable(struct ircomm_tty_cb *self);
+void ircomm_tty_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+void ircomm_tty_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb);
+void ircomm_tty_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self);
+void ircomm_tty_link_established(struct ircomm_tty_cb *self);
+
+#endif /* IRCOMM_TTY_ATTACH_H */
diff --git a/drivers/staging/irda/include/net/irda/irda.h b/drivers/staging/irda/include/net/irda/irda.h
new file mode 100644
index 000000000000..92c8fb575213
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irda.h
@@ -0,0 +1,115 @@
+/*********************************************************************
+ *
+ * Filename: irda.h
+ * Version: 1.0
+ * Description: IrDA common include file for kernel internal use
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:13:12 1997
+ * Modified at: Fri Jan 28 13:16:32 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef NET_IRDA_H
+#define NET_IRDA_H
+
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/kernel.h>
+#include <linux/if.h> /* sa_family_t in <linux/irda.h> */
+#include <linux/irda.h>
+
+typedef __u32 magic_t;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* Hack to do small backoff when setting media busy in IrLAP */
+#ifndef SMALL
+#define SMALL 5
+#endif
+
+#ifndef IRDA_MIN /* Lets not mix this MIN with other header files */
+#define IRDA_MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef IRDA_ALIGN
+# define IRDA_ALIGN __attribute__((aligned))
+#endif
+
+#ifdef CONFIG_IRDA_DEBUG
+#define IRDA_ASSERT(expr, func) \
+do { if(!(expr)) { \
+ printk( "Assertion failed! %s:%s:%d %s\n", \
+ __FILE__,__func__,__LINE__,(#expr) ); \
+ func } } while (0)
+#define IRDA_ASSERT_LABEL(label) label
+#else
+#define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0)
+#define IRDA_ASSERT_LABEL(label)
+#endif /* CONFIG_IRDA_DEBUG */
+
+/*
+ * Magic numbers used by Linux-IrDA. Random numbers which must be unique to
+ * give the best protection
+ */
+
+#define IRTTY_MAGIC 0x2357
+#define LAP_MAGIC 0x1357
+#define LMP_MAGIC 0x4321
+#define LMP_LSAP_MAGIC 0x69333
+#define LMP_LAP_MAGIC 0x3432
+#define IRDA_DEVICE_MAGIC 0x63454
+#define IAS_MAGIC 0x007
+#define TTP_MAGIC 0x241169
+#define TTP_TSAP_MAGIC 0x4345
+#define IROBEX_MAGIC 0x341324
+#define HB_MAGIC 0x64534
+#define IRLAN_MAGIC 0x754
+#define IAS_OBJECT_MAGIC 0x34234
+#define IAS_ATTRIB_MAGIC 0x45232
+#define IRDA_TASK_MAGIC 0x38423
+
+#define IAS_DEVICE_ID 0x0000 /* Defined by IrDA, IrLMP section 4.1 (page 68) */
+#define IAS_PNP_ID 0xd342
+#define IAS_OBEX_ID 0x34323
+#define IAS_IRLAN_ID 0x34234
+#define IAS_IRCOMM_ID 0x2343
+#define IAS_IRLPT_ID 0x9876
+
+struct net_device;
+struct packet_type;
+
+void irda_proc_register(void);
+void irda_proc_unregister(void);
+
+int irda_sysctl_register(void);
+void irda_sysctl_unregister(void);
+
+int irsock_init(void);
+void irsock_cleanup(void);
+
+int irda_nl_register(void);
+void irda_nl_unregister(void);
+
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev);
+
+#endif /* NET_IRDA_H */
diff --git a/drivers/staging/irda/include/net/irda/irda_device.h b/drivers/staging/irda/include/net/irda/irda_device.h
new file mode 100644
index 000000000000..664bf8178412
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irda_device.h
@@ -0,0 +1,285 @@
+/*********************************************************************
+ *
+ * Filename: irda_device.h
+ * Version: 0.9
+ * Description: Contains various declarations used by the drivers
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Apr 14 12:41:42 1998
+ * Modified at: Mon Mar 20 09:08:57 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+/*
+ * This header contains all the IrDA definitions a driver really
+ * needs, and therefore the driver should not need to include
+ * any other IrDA headers - Jean II
+ */
+
+#ifndef IRDA_DEVICE_H
+#define IRDA_DEVICE_H
+
+#include <linux/tty.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/irda.h>
+#include <linux/types.h>
+
+#include <net/pkt_sched.h>
+#include <net/irda/irda.h>
+#include <net/irda/qos.h> /* struct qos_info */
+#include <net/irda/irqueue.h> /* irda_queue_t */
+
+/* A few forward declarations (to make compiler happy) */
+struct irlap_cb;
+
+/* Some non-standard interface flags (should not conflict with any in if.h) */
+#define IFF_SIR 0x0001 /* Supports SIR speeds */
+#define IFF_MIR 0x0002 /* Supports MIR speeds */
+#define IFF_FIR 0x0004 /* Supports FIR speeds */
+#define IFF_VFIR 0x0008 /* Supports VFIR speeds */
+#define IFF_PIO 0x0010 /* Supports PIO transfer of data */
+#define IFF_DMA 0x0020 /* Supports DMA transfer of data */
+#define IFF_SHM 0x0040 /* Supports shared memory data transfers */
+#define IFF_DONGLE 0x0080 /* Interface has a dongle attached */
+#define IFF_AIR 0x0100 /* Supports Advanced IR (AIR) standards */
+
+#define IO_XMIT 0x01
+#define IO_RECV 0x02
+
+typedef enum {
+ IRDA_IRLAP, /* IrDA mode, and deliver to IrLAP */
+ IRDA_RAW, /* IrDA mode */
+ SHARP_ASK,
+ TV_REMOTE, /* Also known as Consumer Electronics IR */
+} INFRARED_MODE;
+
+typedef enum {
+ IRDA_TASK_INIT, /* All tasks are initialized with this state */
+ IRDA_TASK_DONE, /* Signals that the task is finished */
+ IRDA_TASK_WAIT,
+ IRDA_TASK_WAIT1,
+ IRDA_TASK_WAIT2,
+ IRDA_TASK_WAIT3,
+ IRDA_TASK_CHILD_INIT, /* Initializing child task */
+ IRDA_TASK_CHILD_WAIT, /* Waiting for child task to finish */
+ IRDA_TASK_CHILD_DONE /* Child task is finished */
+} IRDA_TASK_STATE;
+
+struct irda_task;
+typedef int (*IRDA_TASK_CALLBACK) (struct irda_task *task);
+
+struct irda_task {
+ irda_queue_t q;
+ magic_t magic;
+
+ IRDA_TASK_STATE state;
+ IRDA_TASK_CALLBACK function;
+ IRDA_TASK_CALLBACK finished;
+
+ struct irda_task *parent;
+ struct timer_list timer;
+
+ void *instance; /* Instance being called */
+ void *param; /* Parameter to be used by instance */
+};
+
+/* Dongle info */
+struct dongle_reg;
+typedef struct {
+ struct dongle_reg *issue; /* Registration info */
+ struct net_device *dev; /* Device we are attached to */
+ struct irda_task *speed_task; /* Task handling speed change */
+ struct irda_task *reset_task; /* Task handling reset */
+ __u32 speed; /* Current speed */
+
+ /* Callbacks to the IrDA device driver */
+ int (*set_mode)(struct net_device *, int mode);
+ int (*read)(struct net_device *dev, __u8 *buf, int len);
+ int (*write)(struct net_device *dev, __u8 *buf, int len);
+ int (*set_dtr_rts)(struct net_device *dev, int dtr, int rts);
+} dongle_t;
+
+/* Dongle registration info */
+struct dongle_reg {
+ irda_queue_t q; /* Must be first */
+ IRDA_DONGLE type;
+
+ void (*open)(dongle_t *dongle, struct qos_info *qos);
+ void (*close)(dongle_t *dongle);
+ int (*reset)(struct irda_task *task);
+ int (*change_speed)(struct irda_task *task);
+ struct module *owner;
+};
+
+/*
+ * Per-packet information we need to hide inside sk_buff
+ * (must not exceed 48 bytes, check with struct sk_buff)
+ * The default_qdisc_pad field is a temporary hack.
+ */
+struct irda_skb_cb {
+ unsigned int default_qdisc_pad;
+ magic_t magic; /* Be sure that we can trust the information */
+ __u32 next_speed; /* The Speed to be set *after* this frame */
+ __u16 mtt; /* Minimum turn around time */
+ __u16 xbofs; /* Number of xbofs required, used by SIR mode */
+ __u16 next_xbofs; /* Number of xbofs required *after* this frame */
+ void *context; /* May be used by drivers */
+ void (*destructor)(struct sk_buff *skb); /* Used for flow control */
+ __u16 xbofs_delay; /* Number of xbofs used for generating the mtt */
+ __u8 line; /* Used by IrCOMM in IrLPT mode */
+};
+
+/* Chip specific info */
+typedef struct {
+ int cfg_base; /* Config register IO base */
+ int sir_base; /* SIR IO base */
+ int fir_base; /* FIR IO base */
+ int mem_base; /* Shared memory base */
+ int sir_ext; /* Length of SIR iobase */
+ int fir_ext; /* Length of FIR iobase */
+ int irq, irq2; /* Interrupts used */
+ int dma, dma2; /* DMA channel(s) used */
+ int fifo_size; /* FIFO size */
+ int irqflags; /* interrupt flags (ie, IRQF_SHARED) */
+ int direction; /* Link direction, used by some FIR drivers */
+ int enabled; /* Powered on? */
+ int suspended; /* Suspended by APM */
+ __u32 speed; /* Currently used speed */
+ __u32 new_speed; /* Speed we must change to when Tx is finished */
+ int dongle_id; /* Dongle or transceiver currently used */
+} chipio_t;
+
+/* IO buffer specific info (inspired by struct sk_buff) */
+typedef struct {
+ int state; /* Receiving state (transmit state not used) */
+ int in_frame; /* True if receiving frame */
+
+ __u8 *head; /* start of buffer */
+ __u8 *data; /* start of data in buffer */
+
+ int len; /* current length of data */
+ int truesize; /* total allocated size of buffer */
+ __u16 fcs;
+
+ struct sk_buff *skb; /* ZeroCopy Rx in async_unwrap_char() */
+} iobuff_t;
+
+/* Maximum SIR frame (skb) that we expect to receive *unwrapped*.
+ * Max LAP MTU (I field) is 2048 bytes max (IrLAP 1.1, chapt 6.6.5, p40).
+ * Max LAP header is 2 bytes (for now).
+ * Max CRC is 2 bytes at SIR, 4 bytes at FIR.
+ * Need 1 byte for skb_reserve() to align IP header for IrLAN.
+ * Add a few extra bytes just to be safe (buffer is power of two anyway)
+ * Jean II */
+#define IRDA_SKB_MAX_MTU 2064
+/* Maximum SIR frame that we expect to send, wrapped (i.e. with XBOFS
+ * and escaped characters on top of above). */
+#define IRDA_SIR_MAX_FRAME 4269
+
+/* The SIR unwrapper async_unwrap_char() will use a Rx-copy-break mechanism
+ * when using the optional ZeroCopy Rx, where only small frames are memcpy
+ * to a smaller skb to save memory. This is the threshold under which copy
+ * will happen (and over which it won't happen).
+ * Some FIR drivers may use this #define as well...
+ * This is the same value as various Ethernet drivers. - Jean II */
+#define IRDA_RX_COPY_THRESHOLD 256
+
+/* Function prototypes */
+int irda_device_init(void);
+void irda_device_cleanup(void);
+
+/* IrLAP entry points used by the drivers.
+ * We declare them here to avoid the driver pulling a whole bunch stack
+ * headers they don't really need - Jean II */
+struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
+ const char *hw_name);
+void irlap_close(struct irlap_cb *self);
+
+/* Interface to be uses by IrLAP */
+void irda_device_set_media_busy(struct net_device *dev, int status);
+int irda_device_is_media_busy(struct net_device *dev);
+int irda_device_is_receiving(struct net_device *dev);
+
+/* Interface for internal use */
+static inline int irda_device_txqueue_empty(const struct net_device *dev)
+{
+ return qdisc_all_tx_empty(dev);
+}
+int irda_device_set_raw_mode(struct net_device* self, int status);
+struct net_device *alloc_irdadev(int sizeof_priv);
+
+void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode);
+
+/*
+ * Function irda_get_mtt (skb)
+ *
+ * Utility function for getting the minimum turnaround time out of
+ * the skb, where it has been hidden in the cb field.
+ */
+static inline __u16 irda_get_mtt(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->mtt : 10000;
+}
+
+/*
+ * Function irda_get_next_speed (skb)
+ *
+ * Extract the speed that should be set *after* this frame from the skb
+ *
+ * Note : return -1 for user space frames
+ */
+static inline __u32 irda_get_next_speed(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->next_speed : -1;
+}
+
+/*
+ * Function irda_get_next_xbofs (skb)
+ *
+ * Extract the xbofs that should be set for this frame from the skb
+ *
+ * Note : default to 10 for user space frames
+ */
+static inline __u16 irda_get_xbofs(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->xbofs : 10;
+}
+
+/*
+ * Function irda_get_next_xbofs (skb)
+ *
+ * Extract the xbofs that should be set *after* this frame from the skb
+ *
+ * Note : return -1 for user space frames
+ */
+static inline __u16 irda_get_next_xbofs(const struct sk_buff *skb)
+{
+ const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
+ return (cb->magic == LAP_MAGIC) ? cb->next_xbofs : -1;
+}
+#endif /* IRDA_DEVICE_H */
+
+
diff --git a/drivers/staging/irda/include/net/irda/iriap.h b/drivers/staging/irda/include/net/irda/iriap.h
new file mode 100644
index 000000000000..fcc896491a95
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/iriap.h
@@ -0,0 +1,108 @@
+/*********************************************************************
+ *
+ * Filename: iriap.h
+ * Version: 0.5
+ * Description: Information Access Protocol (IAP)
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Aug 21 00:02:07 1997
+ * Modified at: Sat Dec 25 16:42:09 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRIAP_H
+#define IRIAP_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <net/irda/iriap_event.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/irqueue.h> /* irda_queue_t */
+#include <net/irda/timer.h> /* struct timer_list */
+
+#define IAP_LST 0x80
+#define IAP_ACK 0x40
+
+#define IAS_SERVER 0
+#define IAS_CLIENT 1
+
+/* IrIAP Op-codes */
+#define GET_INFO_BASE 0x01
+#define GET_OBJECTS 0x02
+#define GET_VALUE 0x03
+#define GET_VALUE_BY_CLASS 0x04
+#define GET_OBJECT_INFO 0x05
+#define GET_ATTRIB_NAMES 0x06
+
+#define IAS_SUCCESS 0
+#define IAS_CLASS_UNKNOWN 1
+#define IAS_ATTRIB_UNKNOWN 2
+#define IAS_DISCONNECT 10
+
+typedef void (*CONFIRM_CALLBACK)(int result, __u16 obj_id,
+ struct ias_value *value, void *priv);
+
+struct iriap_cb {
+ irda_queue_t q; /* Must be first */
+ magic_t magic; /* Magic cookie */
+
+ int mode; /* Client or server */
+
+ __u32 saddr;
+ __u32 daddr;
+ __u8 operation;
+
+ struct sk_buff *request_skb;
+ struct lsap_cb *lsap;
+ __u8 slsap_sel;
+
+ /* Client states */
+ IRIAP_STATE client_state;
+ IRIAP_STATE call_state;
+
+ /* Server states */
+ IRIAP_STATE server_state;
+ IRIAP_STATE r_connect_state;
+
+ CONFIRM_CALLBACK confirm;
+ void *priv; /* Used to identify client */
+
+ __u8 max_header_size;
+ __u32 max_data_size;
+
+ struct timer_list watchdog_timer;
+};
+
+int iriap_init(void);
+void iriap_cleanup(void);
+
+struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
+ CONFIRM_CALLBACK callback);
+void iriap_close(struct iriap_cb *self);
+
+int iriap_getvaluebyclass_request(struct iriap_cb *self,
+ __u32 saddr, __u32 daddr,
+ char *name, char *attr);
+void iriap_connect_request(struct iriap_cb *self);
+void iriap_send_ack( struct iriap_cb *self);
+void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb);
+
+void iriap_register_server(void);
+
+#endif
+
+
diff --git a/drivers/staging/irda/include/net/irda/iriap_event.h b/drivers/staging/irda/include/net/irda/iriap_event.h
new file mode 100644
index 000000000000..89747f06d9eb
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/iriap_event.h
@@ -0,0 +1,85 @@
+/*********************************************************************
+ *
+ * Filename: iriap_event.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Sun Oct 31 22:02:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRIAP_FSM_H
+#define IRIAP_FSM_H
+
+/* Forward because of circular include dependecies */
+struct iriap_cb;
+
+/* IrIAP states */
+typedef enum {
+ /* Client */
+ S_DISCONNECT,
+ S_CONNECTING,
+ S_CALL,
+
+ /* S-Call */
+ S_MAKE_CALL,
+ S_CALLING,
+ S_OUTSTANDING,
+ S_REPLYING,
+ S_WAIT_FOR_CALL,
+ S_WAIT_ACTIVE,
+
+ /* Server */
+ R_DISCONNECT,
+ R_CALL,
+
+ /* R-Connect */
+ R_WAITING,
+ R_WAIT_ACTIVE,
+ R_RECEIVING,
+ R_EXECUTE,
+ R_RETURNING,
+} IRIAP_STATE;
+
+typedef enum {
+ IAP_CALL_REQUEST,
+ IAP_CALL_REQUEST_GVBC,
+ IAP_CALL_RESPONSE,
+ IAP_RECV_F_LST,
+ IAP_LM_DISCONNECT_INDICATION,
+ IAP_LM_CONNECT_INDICATION,
+ IAP_LM_CONNECT_CONFIRM,
+} IRIAP_EVENT;
+
+void iriap_next_client_state (struct iriap_cb *self, IRIAP_STATE state);
+void iriap_next_call_state (struct iriap_cb *self, IRIAP_STATE state);
+void iriap_next_server_state (struct iriap_cb *self, IRIAP_STATE state);
+void iriap_next_r_connect_state(struct iriap_cb *self, IRIAP_STATE state);
+
+
+void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+void iriap_do_call_event (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+void iriap_do_server_event (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+#endif /* IRIAP_FSM_H */
+
diff --git a/drivers/staging/irda/include/net/irda/irias_object.h b/drivers/staging/irda/include/net/irda/irias_object.h
new file mode 100644
index 000000000000..83f78081799c
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irias_object.h
@@ -0,0 +1,108 @@
+/*********************************************************************
+ *
+ * Filename: irias_object.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Oct 1 22:49:50 1998
+ * Modified at: Wed Dec 15 11:20:57 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef LM_IAS_OBJECT_H
+#define LM_IAS_OBJECT_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h>
+
+/* LM-IAS Attribute types */
+#define IAS_MISSING 0
+#define IAS_INTEGER 1
+#define IAS_OCT_SEQ 2
+#define IAS_STRING 3
+
+/* Object ownership of attributes (user or kernel) */
+#define IAS_KERNEL_ATTR 0
+#define IAS_USER_ATTR 1
+
+/*
+ * LM-IAS Object
+ */
+struct ias_object {
+ irda_queue_t q; /* Must be first! */
+ magic_t magic;
+
+ char *name;
+ int id;
+ hashbin_t *attribs;
+};
+
+/*
+ * Values used by LM-IAS attributes
+ */
+struct ias_value {
+ __u8 type; /* Value description */
+ __u8 owner; /* Managed from user/kernel space */
+ int charset; /* Only used by string type */
+ int len;
+
+ /* Value */
+ union {
+ int integer;
+ char *string;
+ __u8 *oct_seq;
+ } t;
+};
+
+/*
+ * Attributes used by LM-IAS objects
+ */
+struct ias_attrib {
+ irda_queue_t q; /* Must be first! */
+ int magic;
+
+ char *name; /* Attribute name */
+ struct ias_value *value; /* Attribute value */
+};
+
+struct ias_object *irias_new_object(char *name, int id);
+void irias_insert_object(struct ias_object *obj);
+int irias_delete_object(struct ias_object *obj);
+int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib,
+ int cleanobject);
+void __irias_delete_object(struct ias_object *obj);
+
+void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
+ int user);
+void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
+ int user);
+void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
+ int len, int user);
+int irias_object_change_attribute(char *obj_name, char *attrib_name,
+ struct ias_value *new_value);
+struct ias_object *irias_find_object(char *name);
+struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name);
+
+struct ias_value *irias_new_string_value(char *string);
+struct ias_value *irias_new_integer_value(int integer);
+struct ias_value *irias_new_octseq_value(__u8 *octseq , int len);
+struct ias_value *irias_new_missing_value(void);
+void irias_delete_value(struct ias_value *value);
+
+extern struct ias_value irias_missing;
+extern hashbin_t *irias_objects;
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlan_client.h b/drivers/staging/irda/include/net/irda/irlan_client.h
new file mode 100644
index 000000000000..fa8455eda280
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlan_client.h
@@ -0,0 +1,42 @@
+/*********************************************************************
+ *
+ * Filename: irlan_client.h
+ * Version: 0.3
+ * Description: IrDA LAN access layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Thu Apr 22 14:13:34 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_CLIENT_H
+#define IRLAN_CLIENT_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irias_object.h>
+#include <net/irda/irlan_event.h>
+
+void irlan_client_discovery_indication(discinfo_t *, DISCOVERY_MODE, void *);
+void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr);
+
+void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb);
+void irlan_client_get_value_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv);
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlan_common.h b/drivers/staging/irda/include/net/irda/irlan_common.h
new file mode 100644
index 000000000000..550c2d6ec7ff
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlan_common.h
@@ -0,0 +1,230 @@
+/*********************************************************************
+ *
+ * Filename: irlan_common.h
+ * Version: 0.8
+ * Description: IrDA LAN access layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sun Oct 31 19:41:24 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_H
+#define IRLAN_H
+
+#include <asm/param.h> /* for HZ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+
+#include <net/irda/irttp.h>
+
+#define IRLAN_MTU 1518
+#define IRLAN_TIMEOUT 10*HZ /* 10 seconds */
+
+/* Command packet types */
+#define CMD_GET_PROVIDER_INFO 0
+#define CMD_GET_MEDIA_CHAR 1
+#define CMD_OPEN_DATA_CHANNEL 2
+#define CMD_CLOSE_DATA_CHAN 3
+#define CMD_RECONNECT_DATA_CHAN 4
+#define CMD_FILTER_OPERATION 5
+
+/* Some responses */
+#define RSP_SUCCESS 0
+#define RSP_INSUFFICIENT_RESOURCES 1
+#define RSP_INVALID_COMMAND_FORMAT 2
+#define RSP_COMMAND_NOT_SUPPORTED 3
+#define RSP_PARAM_NOT_SUPPORTED 4
+#define RSP_VALUE_NOT_SUPPORTED 5
+#define RSP_NOT_OPEN 6
+#define RSP_AUTHENTICATION_REQUIRED 7
+#define RSP_INVALID_PASSWORD 8
+#define RSP_PROTOCOL_ERROR 9
+#define RSP_ASYNCHRONOUS_ERROR 255
+
+/* Media types */
+#define MEDIA_802_3 1
+#define MEDIA_802_5 2
+
+/* Filter parameters */
+#define DATA_CHAN 1
+#define FILTER_TYPE 2
+#define FILTER_MODE 3
+
+/* Filter types */
+#define IRLAN_DIRECTED 0x01
+#define IRLAN_FUNCTIONAL 0x02
+#define IRLAN_GROUP 0x04
+#define IRLAN_MAC_FRAME 0x08
+#define IRLAN_MULTICAST 0x10
+#define IRLAN_BROADCAST 0x20
+#define IRLAN_IPX_SOCKET 0x40
+
+/* Filter modes */
+#define ALL 1
+#define FILTER 2
+#define NONE 3
+
+/* Filter operations */
+#define GET 1
+#define CLEAR 2
+#define ADD 3
+#define REMOVE 4
+#define DYNAMIC 5
+
+/* Access types */
+#define ACCESS_DIRECT 1
+#define ACCESS_PEER 2
+#define ACCESS_HOSTED 3
+
+#define IRLAN_BYTE 0
+#define IRLAN_SHORT 1
+#define IRLAN_ARRAY 2
+
+/* IrLAN sits on top if IrTTP */
+#define IRLAN_MAX_HEADER (TTP_HEADER+LMP_HEADER)
+/* 1 byte for the command code and 1 byte for the parameter count */
+#define IRLAN_CMD_HEADER 2
+
+#define IRLAN_STRING_PARAMETER_LEN(name, value) (1 + strlen((name)) + 2 \
+ + strlen ((value)))
+#define IRLAN_BYTE_PARAMETER_LEN(name) (1 + strlen((name)) + 2 + 1)
+#define IRLAN_SHORT_PARAMETER_LEN(name) (1 + strlen((name)) + 2 + 2)
+
+/*
+ * IrLAN client
+ */
+struct irlan_client_cb {
+ int state;
+
+ int open_retries;
+
+ struct tsap_cb *tsap_ctrl;
+ __u32 max_sdu_size;
+ __u8 max_header_size;
+
+ int access_type; /* Access type of provider */
+ __u8 reconnect_key[255];
+ __u8 key_len;
+
+ __u16 recv_arb_val;
+ __u16 max_frame;
+ int filter_type;
+
+ int unicast_open;
+ int broadcast_open;
+
+ int tx_busy;
+ struct sk_buff_head txq; /* Transmit control queue */
+
+ struct iriap_cb *iriap;
+
+ struct timer_list kick_timer;
+};
+
+/*
+ * IrLAN provider
+ */
+struct irlan_provider_cb {
+ int state;
+
+ struct tsap_cb *tsap_ctrl;
+ __u32 max_sdu_size;
+ __u8 max_header_size;
+
+ /*
+ * Store some values here which are used by the provider to parse
+ * the filter operations
+ */
+ int data_chan;
+ int filter_type;
+ int filter_mode;
+ int filter_operation;
+ int filter_entry;
+ int access_type; /* Access type */
+ __u16 send_arb_val;
+
+ __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */
+};
+
+/*
+ * IrLAN control block
+ */
+struct irlan_cb {
+ int magic;
+ struct list_head dev_list;
+ struct net_device *dev; /* Ethernet device structure*/
+
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+ int disconnect_reason; /* Why we got disconnected */
+
+ int media; /* Media type */
+ __u8 version[2]; /* IrLAN version */
+
+ struct tsap_cb *tsap_data; /* Data TSAP */
+
+ int use_udata; /* Use Unit Data transfers */
+
+ __u8 stsap_sel_data; /* Source data TSAP selector */
+ __u8 dtsap_sel_data; /* Destination data TSAP selector */
+ __u8 dtsap_sel_ctrl; /* Destination ctrl TSAP selector */
+
+ struct irlan_client_cb client; /* Client specific fields */
+ struct irlan_provider_cb provider; /* Provider specific fields */
+
+ __u32 max_sdu_size;
+ __u8 max_header_size;
+
+ wait_queue_head_t open_wait;
+ struct timer_list watchdog_timer;
+};
+
+void irlan_close(struct irlan_cb *self);
+void irlan_close_tsaps(struct irlan_cb *self);
+
+int irlan_register_netdev(struct irlan_cb *self);
+void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel);
+void irlan_start_watchdog_timer(struct irlan_cb *self, int timeout);
+
+void irlan_open_data_tsap(struct irlan_cb *self);
+
+int irlan_run_ctrl_tx_queue(struct irlan_cb *self);
+
+struct irlan_cb *irlan_get_any(void);
+void irlan_get_provider_info(struct irlan_cb *self);
+void irlan_get_media_char(struct irlan_cb *self);
+void irlan_open_data_channel(struct irlan_cb *self);
+void irlan_close_data_channel(struct irlan_cb *self);
+void irlan_set_multicast_filter(struct irlan_cb *self, int status);
+void irlan_set_broadcast_filter(struct irlan_cb *self, int status);
+
+int irlan_insert_byte_param(struct sk_buff *skb, char *param, __u8 value);
+int irlan_insert_short_param(struct sk_buff *skb, char *param, __u16 value);
+int irlan_insert_string_param(struct sk_buff *skb, char *param, char *value);
+int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *value,
+ __u16 value_len);
+
+int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len);
+
+#endif
+
+
diff --git a/drivers/staging/irda/include/net/irda/irlan_eth.h b/drivers/staging/irda/include/net/irda/irlan_eth.h
new file mode 100644
index 000000000000..de5c81691f33
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlan_eth.h
@@ -0,0 +1,32 @@
+/*********************************************************************
+ *
+ * Filename: irlan_eth.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Oct 15 08:36:58 1998
+ * Modified at: Fri May 14 23:29:00 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_ETH_H
+#define IRLAN_ETH_H
+
+struct net_device *alloc_irlandev(const char *name);
+int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb);
+
+void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow);
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlan_event.h b/drivers/staging/irda/include/net/irda/irlan_event.h
new file mode 100644
index 000000000000..018b5a77e610
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlan_event.h
@@ -0,0 +1,81 @@
+/*********************************************************************
+ *
+ * Filename: irlan_event.h
+ * Version:
+ * Description: LAN access
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Tue Feb 2 09:45:17 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_EVENT_H
+#define IRLAN_EVENT_H
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+#include <net/irda/irlan_common.h>
+
+typedef enum {
+ IRLAN_IDLE,
+ IRLAN_QUERY,
+ IRLAN_CONN,
+ IRLAN_INFO,
+ IRLAN_MEDIA,
+ IRLAN_OPEN,
+ IRLAN_WAIT,
+ IRLAN_ARB,
+ IRLAN_DATA,
+ IRLAN_CLOSE,
+ IRLAN_SYNC
+} IRLAN_STATE;
+
+typedef enum {
+ IRLAN_DISCOVERY_INDICATION,
+ IRLAN_IAS_PROVIDER_AVAIL,
+ IRLAN_IAS_PROVIDER_NOT_AVAIL,
+ IRLAN_LAP_DISCONNECT,
+ IRLAN_LMP_DISCONNECT,
+ IRLAN_CONNECT_COMPLETE,
+ IRLAN_DATA_INDICATION,
+ IRLAN_DATA_CONNECT_INDICATION,
+ IRLAN_RETRY_CONNECT,
+
+ IRLAN_CONNECT_INDICATION,
+ IRLAN_GET_INFO_CMD,
+ IRLAN_GET_MEDIA_CMD,
+ IRLAN_OPEN_DATA_CMD,
+ IRLAN_FILTER_CONFIG_CMD,
+
+ IRLAN_CHECK_CON_ARB,
+ IRLAN_PROVIDER_SIGNAL,
+
+ IRLAN_WATCHDOG_TIMEOUT,
+} IRLAN_EVENT;
+
+extern const char * const irlan_state[];
+
+void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state);
+void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlan_filter.h b/drivers/staging/irda/include/net/irda/irlan_filter.h
new file mode 100644
index 000000000000..a5a2539485bd
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlan_filter.h
@@ -0,0 +1,35 @@
+/*********************************************************************
+ *
+ * Filename: irlan_filter.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Jan 29 15:24:08 1999
+ * Modified at: Sun Feb 7 23:35:31 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_FILTER_H
+#define IRLAN_FILTER_H
+
+void irlan_check_command_param(struct irlan_cb *self, char *param,
+ char *value);
+void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
+#ifdef CONFIG_PROC_FS
+void irlan_print_filter(struct seq_file *seq, int filter_type);
+#endif
+
+#endif /* IRLAN_FILTER_H */
diff --git a/drivers/staging/irda/include/net/irda/irlan_provider.h b/drivers/staging/irda/include/net/irda/irlan_provider.h
new file mode 100644
index 000000000000..92f3b0e1029b
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlan_provider.h
@@ -0,0 +1,52 @@
+/*********************************************************************
+ *
+ * Filename: irlan_provider.h
+ * Version: 0.1
+ * Description: IrDA LAN access layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sun May 9 12:26:11 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAN_SERVER_H
+#define IRLAN_SERVER_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irlan_common.h>
+
+void irlan_provider_ctrl_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb);
+
+
+void irlan_provider_connect_response(struct irlan_cb *, struct tsap_cb *);
+
+int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb);
+int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
+ struct sk_buff *skb);
+
+void irlan_provider_send_reply(struct irlan_cb *self, int command,
+ int ret_code);
+int irlan_provider_open_ctrl_tsap(struct irlan_cb *self);
+
+#endif
+
+
diff --git a/drivers/staging/irda/include/net/irda/irlap.h b/drivers/staging/irda/include/net/irda/irlap.h
new file mode 100644
index 000000000000..6f23e820618c
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlap.h
@@ -0,0 +1,311 @@
+/*********************************************************************
+ *
+ * Filename: irlap.h
+ * Version: 0.8
+ * Description: An IrDA LAP driver for Linux
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Fri Dec 10 13:21:17 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLAP_H
+#define IRLAP_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+
+#include <net/irda/irqueue.h> /* irda_queue_t */
+#include <net/irda/qos.h> /* struct qos_info */
+#include <net/irda/discovery.h> /* discovery_t */
+#include <net/irda/irlap_event.h> /* IRLAP_STATE, ... */
+#include <net/irda/irmod.h> /* struct notify_t */
+
+#define CONFIG_IRDA_DYNAMIC_WINDOW 1
+
+#define LAP_RELIABLE 1
+#define LAP_UNRELIABLE 0
+
+#define LAP_ADDR_HEADER 1 /* IrLAP Address Header */
+#define LAP_CTRL_HEADER 1 /* IrLAP Control Header */
+
+/* May be different when we get VFIR */
+#define LAP_MAX_HEADER (LAP_ADDR_HEADER + LAP_CTRL_HEADER)
+
+/* Each IrDA device gets a random 32 bits IRLAP device address */
+#define LAP_ALEN 4
+
+#define BROADCAST 0xffffffff /* Broadcast device address */
+#define CBROADCAST 0xfe /* Connection broadcast address */
+#define XID_FORMAT 0x01 /* Discovery XID format */
+
+/* Nobody seems to use this constant. */
+#define LAP_WINDOW_SIZE 8
+/* We keep the LAP queue very small to minimise the amount of buffering.
+ * this improve latency and reduce resource consumption.
+ * This work only because we have synchronous refilling of IrLAP through
+ * the flow control mechanism (via scheduler and IrTTP).
+ * 2 buffers is the minimum we can work with, one that we send while polling
+ * IrTTP, and another to know that we should not send the pf bit.
+ * Jean II */
+#define LAP_HIGH_THRESHOLD 2
+/* Some rare non TTP clients don't implement flow control, and
+ * so don't comply with the above limit (and neither with this one).
+ * For IAP and management, it doesn't matter, because they never transmit much.
+ *.For IrLPT, this should be fixed.
+ * - Jean II */
+#define LAP_MAX_QUEUE 10
+/* Please note that all IrDA management frames (LMP/TTP conn req/disc and
+ * IAS queries) fall in the second category and are sent to LAP even if TTP
+ * is stopped. This means that those frames will wait only a maximum of
+ * two (2) data frames before beeing sent on the "wire", which speed up
+ * new socket setup when the link is saturated.
+ * Same story for two sockets competing for the medium : if one saturates
+ * the LAP, when the other want to transmit it only has to wait for
+ * maximum three (3) packets (2 + one scheduling), which improve performance
+ * of delay sensitive applications.
+ * Jean II */
+
+#define NR_EXPECTED 1
+#define NR_UNEXPECTED 0
+#define NR_INVALID -1
+
+#define NS_EXPECTED 1
+#define NS_UNEXPECTED 0
+#define NS_INVALID -1
+
+/*
+ * Meta information passed within the IrLAP state machine
+ */
+struct irlap_info {
+ __u8 caddr; /* Connection address */
+ __u8 control; /* Frame type */
+ __u8 cmd;
+
+ __u32 saddr;
+ __u32 daddr;
+
+ int pf; /* Poll/final bit set */
+
+ __u8 nr; /* Sequence number of next frame expected */
+ __u8 ns; /* Sequence number of frame sent */
+
+ int S; /* Number of slots */
+ int slot; /* Random chosen slot */
+ int s; /* Current slot */
+
+ discovery_t *discovery; /* Discovery information */
+};
+
+/* Main structure of IrLAP */
+struct irlap_cb {
+ irda_queue_t q; /* Must be first */
+ magic_t magic;
+
+ /* Device we are attached to */
+ struct net_device *netdev;
+ char hw_name[2*IFNAMSIZ + 1];
+
+ /* Connection state */
+ volatile IRLAP_STATE state; /* Current state */
+
+ /* Timers used by IrLAP */
+ struct timer_list query_timer;
+ struct timer_list slot_timer;
+ struct timer_list discovery_timer;
+ struct timer_list final_timer;
+ struct timer_list poll_timer;
+ struct timer_list wd_timer;
+ struct timer_list backoff_timer;
+
+ /* Media busy stuff */
+ struct timer_list media_busy_timer;
+ int media_busy;
+
+ /* Timeouts which will be different with different turn time */
+ int slot_timeout;
+ int poll_timeout;
+ int final_timeout;
+ int wd_timeout;
+
+ struct sk_buff_head txq; /* Frames to be transmitted */
+ struct sk_buff_head txq_ultra;
+
+ __u8 caddr; /* Connection address */
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+
+ int retry_count; /* Times tried to establish connection */
+ int add_wait; /* True if we are waiting for frame */
+
+ __u8 connect_pending;
+ __u8 disconnect_pending;
+
+ /* To send a faster RR if tx queue empty */
+#ifdef CONFIG_IRDA_FAST_RR
+ int fast_RR_timeout;
+ int fast_RR;
+#endif /* CONFIG_IRDA_FAST_RR */
+
+ int N1; /* N1 * F-timer = Negitiated link disconnect warning threshold */
+ int N2; /* N2 * F-timer = Negitiated link disconnect time */
+ int N3; /* Connection retry count */
+
+ int local_busy;
+ int remote_busy;
+ int xmitflag;
+
+ __u8 vs; /* Next frame to be sent */
+ __u8 vr; /* Next frame to be received */
+ __u8 va; /* Last frame acked */
+ int window; /* Nr of I-frames allowed to send */
+ int window_size; /* Current negotiated window size */
+
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ __u32 line_capacity; /* Number of bytes allowed to send */
+ __u32 bytes_left; /* Number of bytes still allowed to transmit */
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+
+ struct sk_buff_head wx_list;
+
+ __u8 ack_required;
+
+ /* XID parameters */
+ __u8 S; /* Number of slots */
+ __u8 slot; /* Random chosen slot */
+ __u8 s; /* Current slot */
+ int frame_sent; /* Have we sent reply? */
+
+ hashbin_t *discovery_log;
+ discovery_t *discovery_cmd;
+
+ __u32 speed; /* Link speed */
+
+ struct qos_info qos_tx; /* QoS requested by peer */
+ struct qos_info qos_rx; /* QoS requested by self */
+ struct qos_info *qos_dev; /* QoS supported by device */
+
+ notify_t notify; /* Callbacks to IrLMP */
+
+ int mtt_required; /* Minimum turnaround time required */
+ int xbofs_delay; /* Nr of XBOF's used to MTT */
+ int bofs_count; /* Negotiated extra BOFs */
+ int next_bofs; /* Negotiated extra BOFs after next frame */
+
+ int mode; /* IrLAP mode (primary, secondary or monitor) */
+};
+
+/*
+ * Function prototypes
+ */
+int irlap_init(void);
+void irlap_cleanup(void);
+
+struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
+ const char *hw_name);
+void irlap_close(struct irlap_cb *self);
+
+void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
+ struct qos_info *qos, int sniff);
+void irlap_connect_response(struct irlap_cb *self, struct sk_buff *skb);
+void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb);
+void irlap_connect_confirm(struct irlap_cb *, struct sk_buff *skb);
+
+void irlap_data_indication(struct irlap_cb *, struct sk_buff *, int unreliable);
+void irlap_data_request(struct irlap_cb *, struct sk_buff *, int unreliable);
+
+#ifdef CONFIG_IRDA_ULTRA
+void irlap_unitdata_request(struct irlap_cb *, struct sk_buff *);
+void irlap_unitdata_indication(struct irlap_cb *, struct sk_buff *);
+#endif /* CONFIG_IRDA_ULTRA */
+
+void irlap_disconnect_request(struct irlap_cb *);
+void irlap_disconnect_indication(struct irlap_cb *, LAP_REASON reason);
+
+void irlap_status_indication(struct irlap_cb *, int quality_of_link);
+
+void irlap_test_request(__u8 *info, int len);
+
+void irlap_discovery_request(struct irlap_cb *, discovery_t *discovery);
+void irlap_discovery_confirm(struct irlap_cb *, hashbin_t *discovery_log);
+void irlap_discovery_indication(struct irlap_cb *, discovery_t *discovery);
+
+void irlap_reset_indication(struct irlap_cb *self);
+void irlap_reset_confirm(void);
+
+void irlap_update_nr_received(struct irlap_cb *, int nr);
+int irlap_validate_nr_received(struct irlap_cb *, int nr);
+int irlap_validate_ns_received(struct irlap_cb *, int ns);
+
+int irlap_generate_rand_time_slot(int S, int s);
+void irlap_initiate_connection_state(struct irlap_cb *);
+void irlap_flush_all_queues(struct irlap_cb *);
+void irlap_wait_min_turn_around(struct irlap_cb *, struct qos_info *);
+
+void irlap_apply_default_connection_parameters(struct irlap_cb *self);
+void irlap_apply_connection_parameters(struct irlap_cb *self, int now);
+
+#define IRLAP_GET_HEADER_SIZE(self) (LAP_MAX_HEADER)
+#define IRLAP_GET_TX_QUEUE_LEN(self) skb_queue_len(&self->txq)
+
+/* Return TRUE if the node is in primary mode (i.e. master)
+ * - Jean II */
+static inline int irlap_is_primary(struct irlap_cb *self)
+{
+ int ret;
+ switch(self->state) {
+ case LAP_XMIT_P:
+ case LAP_NRM_P:
+ ret = 1;
+ break;
+ case LAP_XMIT_S:
+ case LAP_NRM_S:
+ ret = 0;
+ break;
+ default:
+ ret = -1;
+ }
+ return ret;
+}
+
+/* Clear a pending IrLAP disconnect. - Jean II */
+static inline void irlap_clear_disconnect(struct irlap_cb *self)
+{
+ self->disconnect_pending = FALSE;
+}
+
+/*
+ * Function irlap_next_state (self, state)
+ *
+ * Switches state and provides debug information
+ *
+ */
+static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
+{
+ /*
+ if (!self || self->magic != LAP_MAGIC)
+ return;
+
+ pr_debug("next LAP state = %s\n", irlap_state[state]);
+ */
+ self->state = state;
+}
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlap_event.h b/drivers/staging/irda/include/net/irda/irlap_event.h
new file mode 100644
index 000000000000..e4325fee1267
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlap_event.h
@@ -0,0 +1,129 @@
+/*********************************************************************
+ *
+ *
+ * Filename: irlap_event.h
+ * Version: 0.1
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Aug 16 00:59:29 1997
+ * Modified at: Tue Dec 21 11:20:30 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRLAP_EVENT_H
+#define IRLAP_EVENT_H
+
+#include <net/irda/irda.h>
+
+/* A few forward declarations (to make compiler happy) */
+struct irlap_cb;
+struct irlap_info;
+
+/* IrLAP States */
+typedef enum {
+ LAP_NDM, /* Normal disconnected mode */
+ LAP_QUERY,
+ LAP_REPLY,
+ LAP_CONN, /* Connect indication */
+ LAP_SETUP, /* Setting up connection */
+ LAP_OFFLINE, /* A really boring state */
+ LAP_XMIT_P,
+ LAP_PCLOSE,
+ LAP_NRM_P, /* Normal response mode as primary */
+ LAP_RESET_WAIT,
+ LAP_RESET,
+ LAP_NRM_S, /* Normal response mode as secondary */
+ LAP_XMIT_S,
+ LAP_SCLOSE,
+ LAP_RESET_CHECK,
+} IRLAP_STATE;
+
+/* IrLAP Events */
+typedef enum {
+ /* Services events */
+ DISCOVERY_REQUEST,
+ CONNECT_REQUEST,
+ CONNECT_RESPONSE,
+ DISCONNECT_REQUEST,
+ DATA_REQUEST,
+ RESET_REQUEST,
+ RESET_RESPONSE,
+
+ /* Send events */
+ SEND_I_CMD,
+ SEND_UI_FRAME,
+
+ /* Receive events */
+ RECV_DISCOVERY_XID_CMD,
+ RECV_DISCOVERY_XID_RSP,
+ RECV_SNRM_CMD,
+ RECV_TEST_CMD,
+ RECV_TEST_RSP,
+ RECV_UA_RSP,
+ RECV_DM_RSP,
+ RECV_RD_RSP,
+ RECV_I_CMD,
+ RECV_I_RSP,
+ RECV_UI_FRAME,
+ RECV_FRMR_RSP,
+ RECV_RR_CMD,
+ RECV_RR_RSP,
+ RECV_RNR_CMD,
+ RECV_RNR_RSP,
+ RECV_REJ_CMD,
+ RECV_REJ_RSP,
+ RECV_SREJ_CMD,
+ RECV_SREJ_RSP,
+ RECV_DISC_CMD,
+
+ /* Timer events */
+ SLOT_TIMER_EXPIRED,
+ QUERY_TIMER_EXPIRED,
+ FINAL_TIMER_EXPIRED,
+ POLL_TIMER_EXPIRED,
+ DISCOVERY_TIMER_EXPIRED,
+ WD_TIMER_EXPIRED,
+ BACKOFF_TIMER_EXPIRED,
+ MEDIA_BUSY_TIMER_EXPIRED,
+} IRLAP_EVENT;
+
+/*
+ * Disconnect reason code
+ */
+typedef enum { /* FIXME check the two first reason codes */
+ LAP_DISC_INDICATION=1, /* Received a disconnect request from peer */
+ LAP_NO_RESPONSE, /* To many retransmits without response */
+ LAP_RESET_INDICATION, /* To many retransmits, or invalid nr/ns */
+ LAP_FOUND_NONE, /* No devices were discovered */
+ LAP_MEDIA_BUSY,
+ LAP_PRIMARY_CONFLICT,
+} LAP_REASON;
+
+extern const char *const irlap_state[];
+
+void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+void irlap_print_event(IRLAP_EVENT event);
+
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlap_frame.h b/drivers/staging/irda/include/net/irda/irlap_frame.h
new file mode 100644
index 000000000000..cbc12a926e5f
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlap_frame.h
@@ -0,0 +1,167 @@
+/*********************************************************************
+ *
+ * Filename: irlap_frame.h
+ * Version: 0.9
+ * Description: IrLAP frame declarations
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Aug 19 10:27:26 1997
+ * Modified at: Sat Dec 25 21:07:26 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRLAP_FRAME_H
+#define IRLAP_FRAME_H
+
+#include <linux/skbuff.h>
+
+#include <net/irda/irda.h>
+
+/* A few forward declarations (to make compiler happy) */
+struct irlap_cb;
+struct discovery_t;
+
+/* Frame types and templates */
+#define INVALID 0xff
+
+/* Unnumbered (U) commands */
+#define SNRM_CMD 0x83 /* Set Normal Response Mode */
+#define DISC_CMD 0x43 /* Disconnect */
+#define XID_CMD 0x2f /* Exchange Station Identification */
+#define TEST_CMD 0xe3 /* Test */
+
+/* Unnumbered responses */
+#define RNRM_RSP 0x83 /* Request Normal Response Mode */
+#define UA_RSP 0x63 /* Unnumbered Acknowledgement */
+#define FRMR_RSP 0x87 /* Frame Reject */
+#define DM_RSP 0x0f /* Disconnect Mode */
+#define RD_RSP 0x43 /* Request Disconnection */
+#define XID_RSP 0xaf /* Exchange Station Identification */
+#define TEST_RSP 0xe3 /* Test frame */
+
+/* Supervisory (S) */
+#define RR 0x01 /* Receive Ready */
+#define REJ 0x09 /* Reject */
+#define RNR 0x05 /* Receive Not Ready */
+#define SREJ 0x0d /* Selective Reject */
+
+/* Information (I) */
+#define I_FRAME 0x00 /* Information Format */
+#define UI_FRAME 0x03 /* Unnumbered Information */
+
+#define CMD_FRAME 0x01
+#define RSP_FRAME 0x00
+
+#define PF_BIT 0x10 /* Poll/final bit */
+
+/* Some IrLAP field lengths */
+/*
+ * Only baud rate triplet is 4 bytes (PV can be 2 bytes).
+ * All others params (7) are 3 bytes, so that's 7*3 + 1*4 bytes.
+ */
+#define IRLAP_NEGOCIATION_PARAMS_LEN 25
+#define IRLAP_DISCOVERY_INFO_LEN 32
+
+struct disc_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+} __packed;
+
+struct xid_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+ __u8 ident; /* Should always be XID_FORMAT */
+ __le32 saddr; /* Source device address */
+ __le32 daddr; /* Destination device address */
+ __u8 flags; /* Discovery flags */
+ __u8 slotnr;
+ __u8 version;
+} __packed;
+
+struct test_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+ __le32 saddr; /* Source device address */
+ __le32 daddr; /* Destination device address */
+} __packed;
+
+struct ua_frame {
+ __u8 caddr;
+ __u8 control;
+ __le32 saddr; /* Source device address */
+ __le32 daddr; /* Dest device address */
+} __packed;
+
+struct dm_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+} __packed;
+
+struct rd_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+} __packed;
+
+struct rr_frame {
+ __u8 caddr; /* Connection address */
+ __u8 control;
+} __packed;
+
+struct i_frame {
+ __u8 caddr;
+ __u8 control;
+} __packed;
+
+struct snrm_frame {
+ __u8 caddr;
+ __u8 control;
+ __le32 saddr;
+ __le32 daddr;
+ __u8 ncaddr;
+} __packed;
+
+void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
+void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s,
+ __u8 command,
+ struct discovery_t *discovery);
+void irlap_send_snrm_frame(struct irlap_cb *, struct qos_info *);
+void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
+ struct sk_buff *cmd);
+void irlap_send_ua_response_frame(struct irlap_cb *, struct qos_info *);
+void irlap_send_dm_frame(struct irlap_cb *self);
+void irlap_send_rd_frame(struct irlap_cb *self);
+void irlap_send_disc_frame(struct irlap_cb *self);
+void irlap_send_rr_frame(struct irlap_cb *self, int command);
+
+void irlap_send_data_primary(struct irlap_cb *, struct sk_buff *);
+void irlap_send_data_primary_poll(struct irlap_cb *, struct sk_buff *);
+void irlap_send_data_secondary(struct irlap_cb *, struct sk_buff *);
+void irlap_send_data_secondary_final(struct irlap_cb *, struct sk_buff *);
+void irlap_resend_rejected_frames(struct irlap_cb *, int command);
+void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
+
+void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
+ __u8 caddr, int command);
+
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlmp.h b/drivers/staging/irda/include/net/irda/irlmp.h
new file mode 100644
index 000000000000..f132924cc9da
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlmp.h
@@ -0,0 +1,295 @@
+/*********************************************************************
+ *
+ * Filename: irlmp.h
+ * Version: 0.9
+ * Description: IrDA Link Management Protocol (LMP) layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 17 20:54:32 1997
+ * Modified at: Fri Dec 10 13:23:01 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLMP_H
+#define IRLMP_H
+
+#include <asm/param.h> /* for HZ */
+
+#include <linux/types.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/qos.h>
+#include <net/irda/irlap.h> /* LAP_MAX_HEADER, ... */
+#include <net/irda/irlmp_event.h>
+#include <net/irda/irqueue.h>
+#include <net/irda/discovery.h>
+
+/* LSAP-SEL's */
+#define LSAP_MASK 0x7f
+#define LSAP_IAS 0x00
+#define LSAP_ANY 0xff
+#define LSAP_MAX 0x6f /* 0x70-0x7f are reserved */
+#define LSAP_CONNLESS 0x70 /* Connectionless LSAP, mostly used for Ultra */
+
+#define DEV_ADDR_ANY 0xffffffff
+
+#define LMP_HEADER 2 /* Dest LSAP + Source LSAP */
+#define LMP_CONTROL_HEADER 4 /* LMP_HEADER + opcode + parameter */
+#define LMP_PID_HEADER 1 /* Used by Ultra */
+#define LMP_MAX_HEADER (LMP_CONTROL_HEADER+LAP_MAX_HEADER)
+
+#define LM_MAX_CONNECTIONS 10
+
+#define LM_IDLE_TIMEOUT 2*HZ /* 2 seconds for now */
+
+typedef enum {
+ S_PNP = 0,
+ S_PDA,
+ S_COMPUTER,
+ S_PRINTER,
+ S_MODEM,
+ S_FAX,
+ S_LAN,
+ S_TELEPHONY,
+ S_COMM,
+ S_OBEX,
+ S_ANY,
+ S_END,
+} SERVICE;
+
+/* For selective discovery */
+typedef void (*DISCOVERY_CALLBACK1) (discinfo_t *, DISCOVERY_MODE, void *);
+/* For expiry (the same) */
+typedef void (*DISCOVERY_CALLBACK2) (discinfo_t *, DISCOVERY_MODE, void *);
+
+typedef struct {
+ irda_queue_t queue; /* Must be first */
+
+ __u16_host_order hints; /* Hint bits */
+} irlmp_service_t;
+
+typedef struct {
+ irda_queue_t queue; /* Must be first */
+
+ __u16_host_order hint_mask;
+
+ DISCOVERY_CALLBACK1 disco_callback; /* Selective discovery */
+ DISCOVERY_CALLBACK2 expir_callback; /* Selective expiration */
+ void *priv; /* Used to identify client */
+} irlmp_client_t;
+
+/*
+ * Information about each logical LSAP connection
+ */
+struct lsap_cb {
+ irda_queue_t queue; /* Must be first */
+ magic_t magic;
+
+ unsigned long connected; /* set_bit used on this */
+ int persistent;
+
+ __u8 slsap_sel; /* Source (this) LSAP address */
+ __u8 dlsap_sel; /* Destination LSAP address (if connected) */
+#ifdef CONFIG_IRDA_ULTRA
+ __u8 pid; /* Used by connectionless LSAP */
+#endif /* CONFIG_IRDA_ULTRA */
+ struct sk_buff *conn_skb; /* Store skb here while connecting */
+
+ struct timer_list watchdog_timer;
+
+ LSAP_STATE lsap_state; /* Connection state */
+ notify_t notify; /* Indication/Confirm entry points */
+ struct qos_info qos; /* QoS for this connection */
+
+ struct lap_cb *lap; /* Pointer to LAP connection structure */
+};
+
+/*
+ * Used for caching the last slsap->dlsap->handle mapping
+ *
+ * We don't need to keep/match the remote address in the cache because
+ * we are associated with a specific LAP (which implies it).
+ * Jean II
+ */
+typedef struct {
+ int valid;
+
+ __u8 slsap_sel;
+ __u8 dlsap_sel;
+ struct lsap_cb *lsap;
+} CACHE_ENTRY;
+
+/*
+ * Information about each registered IrLAP layer
+ */
+struct lap_cb {
+ irda_queue_t queue; /* Must be first */
+ magic_t magic;
+
+ int reason; /* LAP disconnect reason */
+
+ IRLMP_STATE lap_state;
+
+ struct irlap_cb *irlap; /* Instance of IrLAP layer */
+ hashbin_t *lsaps; /* LSAP associated with this link */
+ struct lsap_cb *flow_next; /* Next lsap to be polled for Tx */
+
+ __u8 caddr; /* Connection address */
+ __u32 saddr; /* Source device address */
+ __u32 daddr; /* Destination device address */
+
+ struct qos_info *qos; /* LAP QoS for this session */
+ struct timer_list idle_timer;
+
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ /* The lsap cache was moved from struct irlmp_cb to here because
+ * it must be associated with the specific LAP. Also, this
+ * improves performance. - Jean II */
+ CACHE_ENTRY cache; /* Caching last slsap->dlsap->handle mapping */
+#endif
+};
+
+/*
+ * Main structure for IrLMP
+ */
+struct irlmp_cb {
+ magic_t magic;
+
+ __u8 conflict_flag;
+
+ discovery_t discovery_cmd; /* Discovery command to use by IrLAP */
+ discovery_t discovery_rsp; /* Discovery response to use by IrLAP */
+
+ /* Last lsap picked automatically by irlmp_find_free_slsap() */
+ int last_lsap_sel;
+
+ struct timer_list discovery_timer;
+
+ hashbin_t *links; /* IrLAP connection table */
+ hashbin_t *unconnected_lsaps;
+ hashbin_t *clients;
+ hashbin_t *services;
+
+ hashbin_t *cachelog; /* Current discovery log */
+
+ int running;
+
+ __u16_host_order hints; /* Hint bits */
+};
+
+/* Prototype declarations */
+int irlmp_init(void);
+void irlmp_cleanup(void);
+struct lsap_cb *irlmp_open_lsap(__u8 slsap, notify_t *notify, __u8 pid);
+void irlmp_close_lsap( struct lsap_cb *self);
+
+__u16 irlmp_service_to_hint(int service);
+void *irlmp_register_service(__u16 hints);
+int irlmp_unregister_service(void *handle);
+void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
+ DISCOVERY_CALLBACK2 expir_clb, void *priv);
+int irlmp_unregister_client(void *handle);
+int irlmp_update_client(void *handle, __u16 hint_mask,
+ DISCOVERY_CALLBACK1 disco_clb,
+ DISCOVERY_CALLBACK2 expir_clb, void *priv);
+
+void irlmp_register_link(struct irlap_cb *, __u32 saddr, notify_t *);
+void irlmp_unregister_link(__u32 saddr);
+
+int irlmp_connect_request(struct lsap_cb *, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *, struct sk_buff *);
+void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb);
+int irlmp_connect_response(struct lsap_cb *, struct sk_buff *);
+void irlmp_connect_confirm(struct lsap_cb *, struct sk_buff *);
+struct lsap_cb *irlmp_dup(struct lsap_cb *self, void *instance);
+
+void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
+ struct sk_buff *userdata);
+int irlmp_disconnect_request(struct lsap_cb *, struct sk_buff *userdata);
+
+void irlmp_discovery_confirm(hashbin_t *discovery_log, DISCOVERY_MODE mode);
+void irlmp_discovery_request(int nslots);
+discinfo_t *irlmp_get_discoveries(int *pn, __u16 mask, int nslots);
+void irlmp_do_expiry(void);
+void irlmp_do_discovery(int nslots);
+discovery_t *irlmp_get_discovery_response(void);
+void irlmp_discovery_expiry(discinfo_t *expiry, int number);
+
+int irlmp_data_request(struct lsap_cb *, struct sk_buff *);
+void irlmp_data_indication(struct lsap_cb *, struct sk_buff *);
+
+int irlmp_udata_request(struct lsap_cb *, struct sk_buff *);
+void irlmp_udata_indication(struct lsap_cb *, struct sk_buff *);
+
+#ifdef CONFIG_IRDA_ULTRA
+int irlmp_connless_data_request(struct lsap_cb *, struct sk_buff *, __u8);
+void irlmp_connless_data_indication(struct lsap_cb *, struct sk_buff *);
+#endif /* CONFIG_IRDA_ULTRA */
+
+void irlmp_status_indication(struct lap_cb *, LINK_STATUS link, LOCK_STATUS lock);
+void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow);
+
+LM_REASON irlmp_convert_lap_reason(LAP_REASON);
+
+static inline __u32 irlmp_get_saddr(const struct lsap_cb *self)
+{
+ return (self && self->lap) ? self->lap->saddr : 0;
+}
+
+static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
+{
+ return (self && self->lap) ? self->lap->daddr : 0;
+}
+
+const char *irlmp_reason_str(LM_REASON reason);
+
+extern int sysctl_discovery_timeout;
+extern int sysctl_discovery_slots;
+extern int sysctl_discovery;
+extern int sysctl_lap_keepalive_time; /* in ms, default is LM_IDLE_TIMEOUT */
+extern struct irlmp_cb *irlmp;
+
+/* Check if LAP queue is full.
+ * Used by IrTTP for low control, see comments in irlap.h - Jean II */
+static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
+{
+ if (self == NULL)
+ return 0;
+ if (self->lap == NULL)
+ return 0;
+ if (self->lap->irlap == NULL)
+ return 0;
+
+ return IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD;
+}
+
+/* After doing a irlmp_dup(), this get one of the two socket back into
+ * a state where it's waiting incoming connections.
+ * Note : this can be used *only* if the socket is not yet connected
+ * (i.e. NO irlmp_connect_response() done on this socket).
+ * - Jean II */
+static inline void irlmp_listen(struct lsap_cb *self)
+{
+ self->dlsap_sel = LSAP_ANY;
+ self->lap = NULL;
+ self->lsap_state = LSAP_DISCONNECTED;
+ /* Started when we received the LM_CONNECT_INDICATION */
+ del_timer(&self->watchdog_timer);
+}
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irlmp_event.h b/drivers/staging/irda/include/net/irda/irlmp_event.h
new file mode 100644
index 000000000000..9e4ec17a7449
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlmp_event.h
@@ -0,0 +1,98 @@
+/*********************************************************************
+ *
+ * Filename: irlmp_event.h
+ * Version: 0.1
+ * Description: IrDA-LMP event handling
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Thu Jul 8 12:18:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRLMP_EVENT_H
+#define IRLMP_EVENT_H
+
+/* A few forward declarations (to make compiler happy) */
+struct irlmp_cb;
+struct lsap_cb;
+struct lap_cb;
+struct discovery_t;
+
+/* LAP states */
+typedef enum {
+ /* IrLAP connection control states */
+ LAP_STANDBY, /* No LAP connection */
+ LAP_U_CONNECT, /* Starting LAP connection */
+ LAP_ACTIVE, /* LAP connection is active */
+} IRLMP_STATE;
+
+/* LSAP connection control states */
+typedef enum {
+ LSAP_DISCONNECTED, /* No LSAP connection */
+ LSAP_CONNECT, /* Connect indication from peer */
+ LSAP_CONNECT_PEND, /* Connect request from service user */
+ LSAP_DATA_TRANSFER_READY, /* LSAP connection established */
+ LSAP_SETUP, /* Trying to set up LSAP connection */
+ LSAP_SETUP_PEND, /* Request to start LAP connection */
+} LSAP_STATE;
+
+typedef enum {
+ /* LSAP events */
+ LM_CONNECT_REQUEST,
+ LM_CONNECT_CONFIRM,
+ LM_CONNECT_RESPONSE,
+ LM_CONNECT_INDICATION,
+
+ LM_DISCONNECT_INDICATION,
+ LM_DISCONNECT_REQUEST,
+
+ LM_DATA_REQUEST,
+ LM_UDATA_REQUEST,
+ LM_DATA_INDICATION,
+ LM_UDATA_INDICATION,
+
+ LM_WATCHDOG_TIMEOUT,
+
+ /* IrLAP events */
+ LM_LAP_CONNECT_REQUEST,
+ LM_LAP_CONNECT_INDICATION,
+ LM_LAP_CONNECT_CONFIRM,
+ LM_LAP_DISCONNECT_INDICATION,
+ LM_LAP_DISCONNECT_REQUEST,
+ LM_LAP_DISCOVERY_REQUEST,
+ LM_LAP_DISCOVERY_CONFIRM,
+ LM_LAP_IDLE_TIMEOUT,
+} IRLMP_EVENT;
+
+extern const char *const irlmp_state[];
+extern const char *const irlsap_state[];
+
+void irlmp_watchdog_timer_expired(void *data);
+void irlmp_discovery_timer_expired(void *data);
+void irlmp_idle_timer_expired(void *data);
+
+void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb);
+int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb);
+
+#endif /* IRLMP_EVENT_H */
+
+
+
+
diff --git a/drivers/staging/irda/include/net/irda/irlmp_frame.h b/drivers/staging/irda/include/net/irda/irlmp_frame.h
new file mode 100644
index 000000000000..1906eb71422e
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irlmp_frame.h
@@ -0,0 +1,62 @@
+/*********************************************************************
+ *
+ * Filename: irlmp_frame.h
+ * Version: 0.9
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Aug 19 02:09:59 1997
+ * Modified at: Fri Dec 10 13:21:53 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRMLP_FRAME_H
+#define IRMLP_FRAME_H
+
+#include <linux/skbuff.h>
+
+#include <net/irda/discovery.h>
+
+/* IrLMP frame opcodes */
+#define CONNECT_CMD 0x01
+#define CONNECT_CNF 0x81
+#define DISCONNECT 0x02
+#define ACCESSMODE_CMD 0x03
+#define ACCESSMODE_CNF 0x83
+
+#define CONTROL_BIT 0x80
+
+void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ int expedited, struct sk_buff *skb);
+void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ __u8 opcode, struct sk_buff *skb);
+void irlmp_link_data_indication(struct lap_cb *, struct sk_buff *,
+ int unreliable);
+#ifdef CONFIG_IRDA_ULTRA
+void irlmp_link_unitdata_indication(struct lap_cb *, struct sk_buff *);
+#endif /* CONFIG_IRDA_ULTRA */
+
+void irlmp_link_connect_indication(struct lap_cb *, __u32 saddr, __u32 daddr,
+ struct qos_info *qos, struct sk_buff *skb);
+void irlmp_link_connect_request(__u32 daddr);
+void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
+ struct sk_buff *skb);
+void irlmp_link_disconnect_indication(struct lap_cb *, struct irlap_cb *,
+ LAP_REASON reason, struct sk_buff *);
+void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log);
+void irlmp_link_discovery_indication(struct lap_cb *, discovery_t *discovery);
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irmod.h b/drivers/staging/irda/include/net/irda/irmod.h
new file mode 100644
index 000000000000..86f0dbb8ee5d
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irmod.h
@@ -0,0 +1,109 @@
+/*********************************************************************
+ *
+ * Filename: irmod.h
+ * Version: 0.3
+ * Description: IrDA module and utilities functions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Dec 15 13:58:52 1997
+ * Modified at: Fri Jan 28 13:15:24 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charg.
+ *
+ ********************************************************************/
+
+#ifndef IRMOD_H
+#define IRMOD_H
+
+/* Misc status information */
+typedef enum {
+ STATUS_OK,
+ STATUS_ABORTED,
+ STATUS_NO_ACTIVITY,
+ STATUS_NOISY,
+ STATUS_REMOTE,
+} LINK_STATUS;
+
+typedef enum {
+ LOCK_NO_CHANGE,
+ LOCK_LOCKED,
+ LOCK_UNLOCKED,
+} LOCK_STATUS;
+
+typedef enum { FLOW_STOP, FLOW_START } LOCAL_FLOW;
+
+/*
+ * IrLMP disconnect reasons. The order is very important, since they
+ * correspond to disconnect reasons sent in IrLMP disconnect frames, so
+ * please do not touch :-)
+ */
+typedef enum {
+ LM_USER_REQUEST = 1, /* User request */
+ LM_LAP_DISCONNECT, /* Unexpected IrLAP disconnect */
+ LM_CONNECT_FAILURE, /* Failed to establish IrLAP connection */
+ LM_LAP_RESET, /* IrLAP reset */
+ LM_INIT_DISCONNECT, /* Link Management initiated disconnect */
+ LM_LSAP_NOTCONN, /* Data delivered on unconnected LSAP */
+ LM_NON_RESP_CLIENT, /* Non responsive LM-MUX client */
+ LM_NO_AVAIL_CLIENT, /* No available LM-MUX client */
+ LM_CONN_HALF_OPEN, /* Connection is half open */
+ LM_BAD_SOURCE_ADDR, /* Illegal source address (i.e 0x00) */
+} LM_REASON;
+#define LM_UNKNOWN 0xff /* Unspecified disconnect reason */
+
+/* A few forward declarations (to make compiler happy) */
+struct qos_info; /* in <net/irda/qos.h> */
+
+/*
+ * Notify structure used between transport and link management layers
+ */
+typedef struct {
+ int (*data_indication)(void *priv, void *sap, struct sk_buff *skb);
+ int (*udata_indication)(void *priv, void *sap, struct sk_buff *skb);
+ void (*connect_confirm)(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size, struct sk_buff *skb);
+ void (*connect_indication)(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size, struct sk_buff *skb);
+ void (*disconnect_indication)(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *);
+ void (*flow_indication)(void *instance, void *sap, LOCAL_FLOW flow);
+ void (*status_indication)(void *instance,
+ LINK_STATUS link, LOCK_STATUS lock);
+ void *instance; /* Layer instance pointer */
+ char name[16]; /* Name of layer */
+} notify_t;
+
+#define NOTIFY_MAX_NAME 16
+
+/* Zero the notify structure */
+void irda_notify_init(notify_t *notify);
+
+/* Locking wrapper - Note the inverted logic on irda_lock().
+ * Those function basically return false if the lock is already in the
+ * position you want to set it. - Jean II */
+#define irda_lock(lock) (! test_and_set_bit(0, (void *) (lock)))
+#define irda_unlock(lock) (test_and_clear_bit(0, (void *) (lock)))
+
+#endif /* IRMOD_H */
+
+
+
+
+
+
+
+
+
diff --git a/drivers/staging/irda/include/net/irda/irqueue.h b/drivers/staging/irda/include/net/irda/irqueue.h
new file mode 100644
index 000000000000..37f512bd6733
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irqueue.h
@@ -0,0 +1,96 @@
+/*********************************************************************
+ *
+ * Filename: irqueue.h
+ * Version: 0.3
+ * Description: General queue implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Jun 9 13:26:50 1998
+ * Modified at: Thu Oct 7 13:25:16 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
+ * Copyright (c) 1998, Dag Brattli
+ * All Rights Reserved.
+ *
+ * This code is taken from the Vortex Operating System written by Aage
+ * Kvalnes and has been ported to Linux and Linux/IR by Dag Brattli
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#ifndef IRDA_QUEUE_H
+#define IRDA_QUEUE_H
+
+#define NAME_SIZE 32
+
+/*
+ * Hash types (some flags can be xored)
+ * See comments in irqueue.c for which one to use...
+ */
+#define HB_NOLOCK 0 /* No concurent access prevention */
+#define HB_LOCK 1 /* Prevent concurent write with global lock */
+
+/*
+ * Hash defines
+ */
+#define HASHBIN_SIZE 8
+#define HASHBIN_MASK 0x7
+
+#ifndef IRDA_ALIGN
+#define IRDA_ALIGN __attribute__((aligned))
+#endif
+
+#define Q_NULL { NULL, NULL, "", 0 }
+
+typedef void (*FREE_FUNC)(void *arg);
+
+struct irda_queue {
+ struct irda_queue *q_next;
+ struct irda_queue *q_prev;
+
+ char q_name[NAME_SIZE];
+ long q_hash; /* Must be able to cast a (void *) */
+};
+typedef struct irda_queue irda_queue_t;
+
+typedef struct hashbin_t {
+ __u32 magic;
+ int hb_type;
+ int hb_size;
+ spinlock_t hb_spinlock; /* HB_LOCK - Can be used by the user */
+
+ irda_queue_t* hb_queue[HASHBIN_SIZE] IRDA_ALIGN;
+
+ irda_queue_t* hb_current;
+} hashbin_t;
+
+hashbin_t *hashbin_new(int type);
+int hashbin_delete(hashbin_t* hashbin, FREE_FUNC func);
+int hashbin_clear(hashbin_t* hashbin, FREE_FUNC free_func);
+void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
+ const char* name);
+void* hashbin_remove(hashbin_t* hashbin, long hashv, const char* name);
+void* hashbin_remove_first(hashbin_t *hashbin);
+void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry);
+void* hashbin_find(hashbin_t* hashbin, long hashv, const char* name);
+void* hashbin_lock_find(hashbin_t* hashbin, long hashv, const char* name);
+void* hashbin_find_next(hashbin_t* hashbin, long hashv, const char* name,
+ void ** pnext);
+irda_queue_t *hashbin_get_first(hashbin_t *hashbin);
+irda_queue_t *hashbin_get_next(hashbin_t *hashbin);
+
+#define HASHBIN_GET_SIZE(hashbin) hashbin->hb_size
+
+#endif
diff --git a/drivers/staging/irda/include/net/irda/irttp.h b/drivers/staging/irda/include/net/irda/irttp.h
new file mode 100644
index 000000000000..98682d4bae8f
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/irttp.h
@@ -0,0 +1,210 @@
+/*********************************************************************
+ *
+ * Filename: irttp.h
+ * Version: 1.0
+ * Description: Tiny Transport Protocol (TTP) definitions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:31 1997
+ * Modified at: Sun Dec 12 13:09:07 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRTTP_H
+#define IRTTP_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h> /* struct lsap_cb */
+#include <net/irda/qos.h> /* struct qos_info */
+#include <net/irda/irqueue.h>
+
+#define TTP_MAX_CONNECTIONS LM_MAX_CONNECTIONS
+#define TTP_HEADER 1
+#define TTP_MAX_HEADER (TTP_HEADER + LMP_MAX_HEADER)
+#define TTP_SAR_HEADER 5
+#define TTP_PARAMETERS 0x80
+#define TTP_MORE 0x80
+
+/* Transmission queue sizes */
+/* Worst case scenario, two window of data - Jean II */
+#define TTP_TX_MAX_QUEUE 14
+/* We need to keep at least 5 frames to make sure that we can refill
+ * appropriately the LAP layer. LAP keeps only two buffers, and we need
+ * to have 7 to make a full window - Jean II */
+#define TTP_TX_LOW_THRESHOLD 5
+/* Most clients are synchronous with respect to flow control, so we can
+ * keep a low number of Tx buffers in TTP - Jean II */
+#define TTP_TX_HIGH_THRESHOLD 7
+
+/* Receive queue sizes */
+/* Minimum of credit that the peer should hold.
+ * If the peer has less credits than 9 frames, we will explicitly send
+ * him some credits (through irttp_give_credit() and a specific frame).
+ * Note that when we give credits it's likely that it won't be sent in
+ * this LAP window, but in the next one. So, we make sure that the peer
+ * has something to send while waiting for credits (one LAP window == 7
+ * + 1 frames while he process the credits). - Jean II */
+#define TTP_RX_MIN_CREDIT 8
+/* This is the default maximum number of credits held by the peer, so the
+ * default maximum number of frames he can send us before needing flow
+ * control answer from us (this may be negociated differently at TSAP setup).
+ * We want to minimise the number of times we have to explicitly send some
+ * credit to the peer, hoping we can piggyback it on the return data. In
+ * particular, it doesn't make sense for us to send credit more than once
+ * per LAP window.
+ * Moreover, giving credits has some latency, so we need strictly more than
+ * a LAP window, otherwise we may already have credits in our Tx queue.
+ * But on the other hand, we don't want to keep too many Rx buffer here
+ * before starting to flow control the other end, so make it exactly one
+ * LAP window + 1 + MIN_CREDITS. - Jean II */
+#define TTP_RX_DEFAULT_CREDIT 16
+/* Maximum number of credits we can allow the peer to have, and therefore
+ * maximum Rx queue size.
+ * Note that we try to deliver packets to the higher layer every time we
+ * receive something, so in normal mode the Rx queue will never contains
+ * more than one or two packets. - Jean II */
+#define TTP_RX_MAX_CREDIT 21
+
+/* What clients should use when calling ttp_open_tsap() */
+#define DEFAULT_INITIAL_CREDIT TTP_RX_DEFAULT_CREDIT
+
+/* Some priorities for disconnect requests */
+#define P_NORMAL 0
+#define P_HIGH 1
+
+#define TTP_SAR_DISABLE 0
+#define TTP_SAR_UNBOUND 0xffffffff
+
+/* Parameters */
+#define TTP_MAX_SDU_SIZE 0x01
+
+/*
+ * This structure contains all data associated with one instance of a TTP
+ * connection.
+ */
+struct tsap_cb {
+ irda_queue_t q; /* Must be first */
+ magic_t magic; /* Just in case */
+
+ __u8 stsap_sel; /* Source TSAP */
+ __u8 dtsap_sel; /* Destination TSAP */
+
+ struct lsap_cb *lsap; /* Corresponding LSAP to this TSAP */
+
+ __u8 connected; /* TSAP connected */
+
+ __u8 initial_credit; /* Initial credit to give peer */
+
+ int avail_credit; /* Available credit to return to peer */
+ int remote_credit; /* Credit held by peer TTP entity */
+ int send_credit; /* Credit held by local TTP entity */
+
+ struct sk_buff_head tx_queue; /* Frames to be transmitted */
+ struct sk_buff_head rx_queue; /* Received frames */
+ struct sk_buff_head rx_fragments;
+ int tx_queue_lock;
+ int rx_queue_lock;
+ spinlock_t lock;
+
+ notify_t notify; /* Callbacks to client layer */
+
+ struct net_device_stats stats;
+ struct timer_list todo_timer;
+
+ __u32 max_seg_size; /* Max data that fit into an IrLAP frame */
+ __u8 max_header_size;
+
+ int rx_sdu_busy; /* RxSdu.busy */
+ __u32 rx_sdu_size; /* Current size of a partially received frame */
+ __u32 rx_max_sdu_size; /* Max receive user data size */
+
+ int tx_sdu_busy; /* TxSdu.busy */
+ __u32 tx_max_sdu_size; /* Max transmit user data size */
+
+ int close_pend; /* Close, but disconnect_pend */
+ unsigned long disconnect_pend; /* Disconnect, but still data to send */
+ struct sk_buff *disconnect_skb;
+};
+
+struct irttp_cb {
+ magic_t magic;
+ hashbin_t *tsaps;
+};
+
+int irttp_init(void);
+void irttp_cleanup(void);
+
+struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify);
+int irttp_close_tsap(struct tsap_cb *self);
+
+int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb);
+int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb);
+
+int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *userdata);
+int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
+ struct sk_buff *userdata);
+int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *skb,
+ int priority);
+void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow);
+struct tsap_cb *irttp_dup(struct tsap_cb *self, void *instance);
+
+static inline __u32 irttp_get_saddr(struct tsap_cb *self)
+{
+ return irlmp_get_saddr(self->lsap);
+}
+
+static inline __u32 irttp_get_daddr(struct tsap_cb *self)
+{
+ return irlmp_get_daddr(self->lsap);
+}
+
+static inline __u32 irttp_get_max_seg_size(struct tsap_cb *self)
+{
+ return self->max_seg_size;
+}
+
+/* After doing a irttp_dup(), this get one of the two socket back into
+ * a state where it's waiting incoming connections.
+ * Note : this can be used *only* if the socket is not yet connected
+ * (i.e. NO irttp_connect_response() done on this socket).
+ * - Jean II */
+static inline void irttp_listen(struct tsap_cb *self)
+{
+ irlmp_listen(self->lsap);
+ self->dtsap_sel = LSAP_ANY;
+}
+
+/* Return TRUE if the node is in primary mode (i.e. master)
+ * - Jean II */
+static inline int irttp_is_primary(struct tsap_cb *self)
+{
+ if ((self == NULL) ||
+ (self->lsap == NULL) ||
+ (self->lsap->lap == NULL) ||
+ (self->lsap->lap->irlap == NULL))
+ return -2;
+ return irlap_is_primary(self->lsap->lap->irlap);
+}
+
+#endif /* IRTTP_H */
diff --git a/drivers/staging/irda/include/net/irda/parameters.h b/drivers/staging/irda/include/net/irda/parameters.h
new file mode 100644
index 000000000000..2d9cd0007cba
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/parameters.h
@@ -0,0 +1,100 @@
+/*********************************************************************
+ *
+ * Filename: parameters.h
+ * Version: 1.0
+ * Description: A more general way to handle (pi,pl,pv) parameters
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Jun 7 08:47:28 1999
+ * Modified at: Sun Jan 30 14:05:14 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Michel Dänzer <daenzer@debian.org>, 10/2001
+ * - simplify irda_pv_t to avoid endianness issues
+ *
+ ********************************************************************/
+
+#ifndef IRDA_PARAMS_H
+#define IRDA_PARAMS_H
+
+/*
+ * The currently supported types. Beware not to change the sequence since
+ * it a good reason why the sized integers has a value equal to their size
+ */
+typedef enum {
+ PV_INTEGER, /* Integer of any (pl) length */
+ PV_INT_8_BITS, /* Integer of 8 bits in length */
+ PV_INT_16_BITS, /* Integer of 16 bits in length */
+ PV_STRING, /* \0 terminated string */
+ PV_INT_32_BITS, /* Integer of 32 bits in length */
+ PV_OCT_SEQ, /* Octet sequence */
+ PV_NO_VALUE /* Does not contain any value (pl=0) */
+} PV_TYPE;
+
+/* Bit 7 of type field */
+#define PV_BIG_ENDIAN 0x80
+#define PV_LITTLE_ENDIAN 0x00
+#define PV_MASK 0x7f /* To mask away endian bit */
+
+#define PV_PUT 0
+#define PV_GET 1
+
+typedef union {
+ char *c;
+ __u32 i;
+ __u32 *ip;
+} irda_pv_t;
+
+typedef struct {
+ __u8 pi;
+ __u8 pl;
+ irda_pv_t pv;
+} irda_param_t;
+
+typedef int (*PI_HANDLER)(void *self, irda_param_t *param, int get);
+typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+
+typedef struct {
+ const PI_HANDLER func; /* Handler for this parameter identifier */
+ PV_TYPE type; /* Data type for this parameter */
+} pi_minor_info_t;
+
+typedef struct {
+ const pi_minor_info_t *pi_minor_call_table;
+ int len;
+} pi_major_info_t;
+
+typedef struct {
+ const pi_major_info_t *tables;
+ int len;
+ __u8 pi_mask;
+ int pi_major_offset;
+} pi_param_info_t;
+
+int irda_param_pack(__u8 *buf, char *fmt, ...);
+
+int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
+ pi_param_info_t *info);
+int irda_param_extract_all(void *self, __u8 *buf, int len,
+ pi_param_info_t *info);
+
+#define irda_param_insert_byte(buf,pi,pv) irda_param_pack(buf,"bbb",pi,1,pv)
+
+#endif /* IRDA_PARAMS_H */
+
diff --git a/drivers/staging/irda/include/net/irda/qos.h b/drivers/staging/irda/include/net/irda/qos.h
new file mode 100644
index 000000000000..05a5a249956f
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/qos.h
@@ -0,0 +1,101 @@
+/*********************************************************************
+ *
+ * Filename: qos.h
+ * Version: 1.0
+ * Description: Quality of Service definitions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Sep 19 23:21:09 1997
+ * Modified at: Thu Dec 2 13:51:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRDA_QOS_H
+#define IRDA_QOS_H
+
+#include <linux/skbuff.h>
+
+#include <net/irda/parameters.h>
+
+#define PI_BAUD_RATE 0x01
+#define PI_MAX_TURN_TIME 0x82
+#define PI_DATA_SIZE 0x83
+#define PI_WINDOW_SIZE 0x84
+#define PI_ADD_BOFS 0x85
+#define PI_MIN_TURN_TIME 0x86
+#define PI_LINK_DISC 0x08
+
+#define IR_115200_MAX 0x3f
+
+/* Baud rates (first byte) */
+#define IR_2400 0x01
+#define IR_9600 0x02
+#define IR_19200 0x04
+#define IR_38400 0x08
+#define IR_57600 0x10
+#define IR_115200 0x20
+#define IR_576000 0x40
+#define IR_1152000 0x80
+
+/* Baud rates (second byte) */
+#define IR_4000000 0x01
+#define IR_16000000 0x02
+
+/* Quality of Service information */
+typedef struct {
+ __u32 value;
+ __u16 bits; /* LSB is first byte, MSB is second byte */
+} qos_value_t;
+
+struct qos_info {
+ magic_t magic;
+
+ qos_value_t baud_rate; /* IR_11520O | ... */
+ qos_value_t max_turn_time;
+ qos_value_t data_size;
+ qos_value_t window_size;
+ qos_value_t additional_bofs;
+ qos_value_t min_turn_time;
+ qos_value_t link_disc_time;
+
+ qos_value_t power;
+};
+
+extern int sysctl_max_baud_rate;
+extern int sysctl_max_inactive_time;
+
+void irda_init_max_qos_capabilies(struct qos_info *qos);
+void irda_qos_compute_intersection(struct qos_info *, struct qos_info *);
+
+__u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time);
+
+void irda_qos_bits_to_value(struct qos_info *qos);
+
+/* So simple, how could we not inline those two ?
+ * Note : one byte is 10 bits if you include start and stop bits
+ * Jean II */
+#define irlap_min_turn_time_in_bytes(speed, min_turn_time) ( \
+ speed * min_turn_time / 10000000 \
+)
+#define irlap_xbofs_in_usec(speed, xbofs) ( \
+ xbofs * 10000000 / speed \
+)
+
+#endif
+
diff --git a/drivers/staging/irda/include/net/irda/timer.h b/drivers/staging/irda/include/net/irda/timer.h
new file mode 100644
index 000000000000..d784f242cf7b
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/timer.h
@@ -0,0 +1,105 @@
+/*********************************************************************
+ *
+ * Filename: timer.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Aug 16 00:59:29 1997
+ * Modified at: Thu Oct 7 12:25:24 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef TIMER_H
+#define TIMER_H
+
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+
+#include <asm/param.h> /* for HZ */
+
+#include <net/irda/irda.h>
+
+/* A few forward declarations (to make compiler happy) */
+struct irlmp_cb;
+struct irlap_cb;
+struct lsap_cb;
+struct lap_cb;
+
+/*
+ * Timeout definitions, some defined in IrLAP 6.13.5 - p. 92
+ */
+#define POLL_TIMEOUT (450*HZ/1000) /* Must never exceed 500 ms */
+#define FINAL_TIMEOUT (500*HZ/1000) /* Must never exceed 500 ms */
+
+/*
+ * Normally twice of p-timer. Note 3, IrLAP 6.3.11.2 - p. 60 suggests
+ * at least twice duration of the P-timer.
+ */
+#define WD_TIMEOUT (POLL_TIMEOUT*2)
+
+#define MEDIABUSY_TIMEOUT (500*HZ/1000) /* 500 msec */
+#define SMALLBUSY_TIMEOUT (100*HZ/1000) /* 100 msec - IrLAP 6.13.4 */
+
+/*
+ * Slot timer must never exceed 85 ms, and must always be at least 25 ms,
+ * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of
+ * devices, and other stackes uses a lot more, so it's best we do it as well
+ * (Note : this is the default value and sysctl overrides it - Jean II)
+ */
+#define SLOT_TIMEOUT (90*HZ/1000)
+
+/*
+ * The latest discovery frame (XID) is longer due to the extra discovery
+ * information (hints, device name...). This is its extra length.
+ * We use that when setting the query timeout. Jean II
+ */
+#define XIDEXTRA_TIMEOUT (34*HZ/1000) /* 34 msec */
+
+#define WATCHDOG_TIMEOUT (20*HZ) /* 20 sec */
+
+typedef void (*TIMER_CALLBACK)(void *);
+
+static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
+ void* data, TIMER_CALLBACK callback)
+{
+ ptimer->function = (void (*)(unsigned long)) callback;
+ ptimer->data = (unsigned long) data;
+
+ /* Set new value for timer (update or add timer).
+ * We use mod_timer() because it's more efficient and also
+ * safer with respect to race conditions - Jean II */
+ mod_timer(ptimer, jiffies + timeout);
+}
+
+
+void irlap_start_slot_timer(struct irlap_cb *self, int timeout);
+void irlap_start_query_timer(struct irlap_cb *self, int S, int s);
+void irlap_start_final_timer(struct irlap_cb *self, int timeout);
+void irlap_start_wd_timer(struct irlap_cb *self, int timeout);
+void irlap_start_backoff_timer(struct irlap_cb *self, int timeout);
+
+void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout);
+void irlap_stop_mbusy_timer(struct irlap_cb *);
+
+void irlmp_start_watchdog_timer(struct lsap_cb *, int timeout);
+void irlmp_start_discovery_timer(struct irlmp_cb *, int timeout);
+void irlmp_start_idle_timer(struct lap_cb *, int timeout);
+void irlmp_stop_idle_timer(struct lap_cb *self);
+
+#endif
+
diff --git a/drivers/staging/irda/include/net/irda/wrapper.h b/drivers/staging/irda/include/net/irda/wrapper.h
new file mode 100644
index 000000000000..eef53ebe3d76
--- /dev/null
+++ b/drivers/staging/irda/include/net/irda/wrapper.h
@@ -0,0 +1,58 @@
+/*********************************************************************
+ *
+ * Filename: wrapper.h
+ * Version: 1.2
+ * Description: IrDA SIR async wrapper layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Tue Jan 11 12:37:29 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef WRAPPER_H
+#define WRAPPER_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net/irda/irda_device.h> /* iobuff_t */
+
+#define BOF 0xc0 /* Beginning of frame */
+#define XBOF 0xff
+#define EOF 0xc1 /* End of frame */
+#define CE 0x7d /* Control escape */
+
+#define STA BOF /* Start flag */
+#define STO EOF /* End flag */
+
+#define IRDA_TRANS 0x20 /* Asynchronous transparency modifier */
+
+/* States for receiving a frame in async mode */
+enum {
+ OUTSIDE_FRAME,
+ BEGIN_FRAME,
+ LINK_ESCAPE,
+ INSIDE_FRAME
+};
+
+/* Proto definitions */
+int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize);
+void async_unwrap_char(struct net_device *dev, struct net_device_stats *stats,
+ iobuff_t *buf, __u8 byte);
+
+#endif
diff --git a/drivers/staging/irda/net/Kconfig b/drivers/staging/irda/net/Kconfig
new file mode 100644
index 000000000000..6abeae6c666a
--- /dev/null
+++ b/drivers/staging/irda/net/Kconfig
@@ -0,0 +1,96 @@
+#
+# IrDA protocol configuration
+#
+
+menuconfig IRDA
+ depends on NET && !S390
+ tristate "IrDA (infrared) subsystem support"
+ select CRC_CCITT
+ ---help---
+ Say Y here if you want to build support for the IrDA (TM) protocols.
+ The Infrared Data Associations (tm) specifies standards for wireless
+ infrared communication and is supported by most laptops and PDA's.
+
+ To use Linux support for the IrDA (tm) protocols, you will also need
+ some user-space utilities like irattach. For more information, see
+ the file <file:Documentation/networking/irda.txt>. You also want to
+ read the IR-HOWTO, available at
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you want to exchange bits of data (vCal, vCard) with a PDA, you
+ will need to install some OBEX application, such as OpenObex :
+ <http://sourceforge.net/projects/openobex/>
+
+ To compile this support as a module, choose M here: the module will
+ be called irda.
+
+comment "IrDA protocols"
+ depends on IRDA
+
+source "drivers/staging/irda/net/irlan/Kconfig"
+
+source "drivers/staging/irda/net/irnet/Kconfig"
+
+source "drivers/staging/irda/net/ircomm/Kconfig"
+
+config IRDA_ULTRA
+ bool "Ultra (connectionless) protocol"
+ depends on IRDA
+ help
+ Say Y here to support the connectionless Ultra IRDA protocol.
+ Ultra allows to exchange data over IrDA with really simple devices
+ (watch, beacon) without the overhead of the IrDA protocol (no handshaking,
+ no management frames, simple fixed header).
+ Ultra is available as a special socket : socket(AF_IRDA, SOCK_DGRAM, 1);
+
+comment "IrDA options"
+ depends on IRDA
+
+config IRDA_CACHE_LAST_LSAP
+ bool "Cache last LSAP"
+ depends on IRDA
+ help
+ Say Y here if you want IrLMP to cache the last LSAP used. This
+ makes sense since most frames will be sent/received on the same
+ connection. Enabling this option will save a hash-lookup per frame.
+
+ If unsure, say Y.
+
+config IRDA_FAST_RR
+ bool "Fast RRs (low latency)"
+ depends on IRDA
+ ---help---
+ Say Y here is you want IrLAP to send fast RR (Receive Ready) frames
+ when acting as a primary station.
+ Disabling this option will make latency over IrDA very bad. Enabling
+ this option will make the IrDA stack send more packet than strictly
+ necessary, thus reduce your battery life (but not that much).
+
+ Fast RR will make IrLAP send out a RR frame immediately when
+ receiving a frame if its own transmit queue is currently empty. This
+ will give a lot of speed improvement when receiving much data since
+ the secondary station will not have to wait the max. turn around
+ time (usually 500ms) before it is allowed to transmit the next time.
+ If the transmit queue of the secondary is also empty, the primary will
+ start backing-off before sending another RR frame, waiting longer
+ each time until the back-off reaches the max. turn around time.
+ This back-off increase in controlled via
+ /proc/sys/net/irda/fast_poll_increase
+
+ If unsure, say Y.
+
+config IRDA_DEBUG
+ bool "Debug information"
+ depends on IRDA
+ help
+ Say Y here if you want the IrDA subsystem to write debug information
+ to your syslog. You can change the debug level in
+ /proc/sys/net/irda/debug .
+ When this option is enabled, the IrDA also perform many extra internal
+ verifications which will usually prevent the kernel to crash in case of
+ bugs.
+
+ If unsure, say Y (since it makes it easier to find the bugs).
+
+source "drivers/staging/irda/drivers/Kconfig"
+
diff --git a/drivers/staging/irda/net/Makefile b/drivers/staging/irda/net/Makefile
new file mode 100644
index 000000000000..bd1a635b88cf
--- /dev/null
+++ b/drivers/staging/irda/net/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the Linux IrDA protocol layer.
+#
+
+subdir-ccflags-y += -I$(srctree)/drivers/staging/irda/include
+
+obj-$(CONFIG_IRDA) += irda.o
+obj-$(CONFIG_IRLAN) += irlan/
+obj-$(CONFIG_IRNET) += irnet/
+obj-$(CONFIG_IRCOMM) += ircomm/
+
+irda-y := iriap.o iriap_event.o irlmp.o irlmp_event.o irlmp_frame.o \
+ irlap.o irlap_event.o irlap_frame.o timer.o qos.o irqueue.o \
+ irttp.o irda_device.o irias_object.o wrapper.o af_irda.o \
+ discovery.o parameters.o irnetlink.o irmod.o
+irda-$(CONFIG_PROC_FS) += irproc.o
+irda-$(CONFIG_SYSCTL) += irsysctl.o
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
new file mode 100644
index 000000000000..23fa7c8b09a5
--- /dev/null
+++ b/drivers/staging/irda/net/af_irda.c
@@ -0,0 +1,2695 @@
+/*********************************************************************
+ *
+ * Filename: af_irda.c
+ * Version: 0.9
+ * Description: IrDA sockets implementation
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun May 31 10:12:43 1998
+ * Modified at: Sat Dec 25 21:10:23 1999
+ * Modified by: Dag Brattli <dag@brattli.net>
+ * Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc.
+ *
+ * Copyright (c) 1999 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1999-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Linux-IrDA now supports four different types of IrDA sockets:
+ *
+ * o SOCK_STREAM: TinyTP connections with SAR disabled. The
+ * max SDU size is 0 for conn. of this type
+ * o SOCK_SEQPACKET: TinyTP connections with SAR enabled. TTP may
+ * fragment the messages, but will preserve
+ * the message boundaries
+ * o SOCK_DGRAM: IRDAPROTO_UNITDATA: TinyTP connections with Unitdata
+ * (unreliable) transfers
+ * IRDAPROTO_ULTRA: Connectionless and unreliable data
+ *
+ ********************************************************************/
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/irda.h>
+#include <linux/poll.h>
+
+#include <asm/ioctls.h> /* TIOCOUTQ, TIOCINQ */
+#include <linux/uaccess.h>
+
+#include <net/sock.h>
+#include <net/tcp_states.h>
+
+#include <net/irda/af_irda.h>
+
+static int irda_create(struct net *net, struct socket *sock, int protocol, int kern);
+
+static const struct proto_ops irda_stream_ops;
+static const struct proto_ops irda_seqpacket_ops;
+static const struct proto_ops irda_dgram_ops;
+
+#ifdef CONFIG_IRDA_ULTRA
+static const struct proto_ops irda_ultra_ops;
+#define ULTRA_MAX_DATA 382
+#endif /* CONFIG_IRDA_ULTRA */
+
+#define IRDA_MAX_HEADER (TTP_MAX_HEADER)
+
+/*
+ * Function irda_data_indication (instance, sap, skb)
+ *
+ * Received some data from TinyTP. Just queue it on the receive queue
+ *
+ */
+static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+ int err;
+
+ self = instance;
+ sk = instance;
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err) {
+ pr_debug("%s(), error: no more mem!\n", __func__);
+ self->rx_flow = FLOW_STOP;
+
+ /* When we return error, TTP will need to requeue the skb */
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * Function irda_disconnect_indication (instance, sap, reason, skb)
+ *
+ * Connection has been closed. Check reason to find out why
+ *
+ */
+static void irda_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *skb)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ self = instance;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ /* Don't care about it, but let's not leak it */
+ if(skb)
+ dev_kfree_skb(skb);
+
+ sk = instance;
+ if (sk == NULL) {
+ pr_debug("%s(%p) : BUG : sk is NULL\n",
+ __func__, self);
+ return;
+ }
+
+ /* Prevent race conditions with irda_release() and irda_shutdown() */
+ bh_lock_sock(sk);
+ if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+
+ sk->sk_state_change(sk);
+
+ /* Close our TSAP.
+ * If we leave it open, IrLMP put it back into the list of
+ * unconnected LSAPs. The problem is that any incoming request
+ * can then be matched to this socket (and it will be, because
+ * it is at the head of the list). This would prevent any
+ * listening socket waiting on the same TSAP to get those
+ * requests. Some apps forget to close sockets, or hang to it
+ * a bit too long, so we may stay in this dead state long
+ * enough to be noticed...
+ * Note : all socket function do check sk->sk_state, so we are
+ * safe...
+ * Jean II
+ */
+ if (self->tsap) {
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+ }
+ bh_unlock_sock(sk);
+
+ /* Note : once we are there, there is not much you want to do
+ * with the socket anymore, apart from closing it.
+ * For example, bind() and connect() won't reset sk->sk_err,
+ * sk->sk_shutdown and sk->sk_flags to valid values...
+ * Jean II
+ */
+}
+
+/*
+ * Function irda_connect_confirm (instance, sap, qos, max_sdu_size, skb)
+ *
+ * Connections has been confirmed by the remote device
+ *
+ */
+static void irda_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ self = instance;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ sk = instance;
+ if (sk == NULL) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ dev_kfree_skb(skb);
+ // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ /* How much header space do we need to reserve */
+ self->max_header_size = max_header_size;
+
+ /* IrTTP max SDU size in transmit direction */
+ self->max_sdu_size_tx = max_sdu_size;
+
+ /* Find out what the largest chunk of data that we can transmit is */
+ switch (sk->sk_type) {
+ case SOCK_STREAM:
+ if (max_sdu_size != 0) {
+ net_err_ratelimited("%s: max_sdu_size must be 0\n",
+ __func__);
+ return;
+ }
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ break;
+ case SOCK_SEQPACKET:
+ if (max_sdu_size == 0) {
+ net_err_ratelimited("%s: max_sdu_size cannot be 0\n",
+ __func__);
+ return;
+ }
+ self->max_data_size = max_sdu_size;
+ break;
+ default:
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ }
+
+ pr_debug("%s(), max_data_size=%d\n", __func__,
+ self->max_data_size);
+
+ memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
+
+ /* We are now connected! */
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_state_change(sk);
+}
+
+/*
+ * Function irda_connect_indication(instance, sap, qos, max_sdu_size, userdata)
+ *
+ * Incoming connection
+ *
+ */
+static void irda_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size, struct sk_buff *skb)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ self = instance;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ sk = instance;
+ if (sk == NULL) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* How much header space do we need to reserve */
+ self->max_header_size = max_header_size;
+
+ /* IrTTP max SDU size in transmit direction */
+ self->max_sdu_size_tx = max_sdu_size;
+
+ /* Find out what the largest chunk of data that we can transmit is */
+ switch (sk->sk_type) {
+ case SOCK_STREAM:
+ if (max_sdu_size != 0) {
+ net_err_ratelimited("%s: max_sdu_size must be 0\n",
+ __func__);
+ kfree_skb(skb);
+ return;
+ }
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ break;
+ case SOCK_SEQPACKET:
+ if (max_sdu_size == 0) {
+ net_err_ratelimited("%s: max_sdu_size cannot be 0\n",
+ __func__);
+ kfree_skb(skb);
+ return;
+ }
+ self->max_data_size = max_sdu_size;
+ break;
+ default:
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ }
+
+ pr_debug("%s(), max_data_size=%d\n", __func__,
+ self->max_data_size);
+
+ memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
+
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_state_change(sk);
+}
+
+/*
+ * Function irda_connect_response (handle)
+ *
+ * Accept incoming connection
+ *
+ */
+static void irda_connect_response(struct irda_sock *self)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL);
+ if (skb == NULL) {
+ pr_debug("%s() Unable to allocate sk_buff!\n",
+ __func__);
+ return;
+ }
+
+ /* Reserve space for MUX_CONTROL and LAP header */
+ skb_reserve(skb, IRDA_MAX_HEADER);
+
+ irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb);
+}
+
+/*
+ * Function irda_flow_indication (instance, sap, flow)
+ *
+ * Used by TinyTP to tell us if it can accept more data or not
+ *
+ */
+static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ self = instance;
+ sk = instance;
+ BUG_ON(sk == NULL);
+
+ switch (flow) {
+ case FLOW_STOP:
+ pr_debug("%s(), IrTTP wants us to slow down\n",
+ __func__);
+ self->tx_flow = flow;
+ break;
+ case FLOW_START:
+ self->tx_flow = flow;
+ pr_debug("%s(), IrTTP wants us to start again\n",
+ __func__);
+ wake_up_interruptible(sk_sleep(sk));
+ break;
+ default:
+ pr_debug("%s(), Unknown flow command!\n", __func__);
+ /* Unknown flow command, better stop */
+ self->tx_flow = flow;
+ break;
+ }
+}
+
+/*
+ * Function irda_getvalue_confirm (obj_id, value, priv)
+ *
+ * Got answer from remote LM-IAS, just pass object to requester...
+ *
+ * Note : duplicate from above, but we need our own version that
+ * doesn't touch the dtsap_sel and save the full value structure...
+ */
+static void irda_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
+{
+ struct irda_sock *self;
+
+ self = priv;
+ if (!self) {
+ net_warn_ratelimited("%s: lost myself!\n", __func__);
+ return;
+ }
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ /* We probably don't need to make any more queries */
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ pr_debug("%s(), IAS query failed! (%d)\n", __func__,
+ result);
+
+ self->errno = result; /* We really need it later */
+
+ /* Wake up any processes waiting for result */
+ wake_up_interruptible(&self->query_wait);
+
+ return;
+ }
+
+ /* Pass the object to the caller (so the caller must delete it) */
+ self->ias_result = value;
+ self->errno = 0;
+
+ /* Wake up any processes waiting for result */
+ wake_up_interruptible(&self->query_wait);
+}
+
+/*
+ * Function irda_selective_discovery_indication (discovery)
+ *
+ * Got a selective discovery indication from IrLMP.
+ *
+ * IrLMP is telling us that this node is new and matching our hint bit
+ * filter. Wake up any process waiting for answer...
+ */
+static void irda_selective_discovery_indication(discinfo_t *discovery,
+ DISCOVERY_MODE mode,
+ void *priv)
+{
+ struct irda_sock *self;
+
+ self = priv;
+ if (!self) {
+ net_warn_ratelimited("%s: lost myself!\n", __func__);
+ return;
+ }
+
+ /* Pass parameter to the caller */
+ self->cachedaddr = discovery->daddr;
+
+ /* Wake up process if its waiting for device to be discovered */
+ wake_up_interruptible(&self->query_wait);
+}
+
+/*
+ * Function irda_discovery_timeout (priv)
+ *
+ * Timeout in the selective discovery process
+ *
+ * We were waiting for a node to be discovered, but nothing has come up
+ * so far. Wake up the user and tell him that we failed...
+ */
+static void irda_discovery_timeout(u_long priv)
+{
+ struct irda_sock *self;
+
+ self = (struct irda_sock *) priv;
+ BUG_ON(self == NULL);
+
+ /* Nothing for the caller */
+ self->cachelog = NULL;
+ self->cachedaddr = 0;
+ self->errno = -ETIME;
+
+ /* Wake up process if its still waiting... */
+ wake_up_interruptible(&self->query_wait);
+}
+
+/*
+ * Function irda_open_tsap (self)
+ *
+ * Open local Transport Service Access Point (TSAP)
+ *
+ */
+static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
+{
+ notify_t notify;
+
+ if (self->tsap) {
+ pr_debug("%s: busy!\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Initialize callbacks to be used by the IrDA stack */
+ irda_notify_init(&notify);
+ notify.connect_confirm = irda_connect_confirm;
+ notify.connect_indication = irda_connect_indication;
+ notify.disconnect_indication = irda_disconnect_indication;
+ notify.data_indication = irda_data_indication;
+ notify.udata_indication = irda_data_indication;
+ notify.flow_indication = irda_flow_indication;
+ notify.instance = self;
+ strncpy(notify.name, name, NOTIFY_MAX_NAME);
+
+ self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT,
+ &notify);
+ if (self->tsap == NULL) {
+ pr_debug("%s(), Unable to allocate TSAP!\n",
+ __func__);
+ return -ENOMEM;
+ }
+ /* Remember which TSAP selector we actually got */
+ self->stsap_sel = self->tsap->stsap_sel;
+
+ return 0;
+}
+
+/*
+ * Function irda_open_lsap (self)
+ *
+ * Open local Link Service Access Point (LSAP). Used for opening Ultra
+ * sockets
+ */
+#ifdef CONFIG_IRDA_ULTRA
+static int irda_open_lsap(struct irda_sock *self, int pid)
+{
+ notify_t notify;
+
+ if (self->lsap) {
+ net_warn_ratelimited("%s(), busy!\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Initialize callbacks to be used by the IrDA stack */
+ irda_notify_init(&notify);
+ notify.udata_indication = irda_data_indication;
+ notify.instance = self;
+ strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME);
+
+ self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid);
+ if (self->lsap == NULL) {
+ pr_debug("%s(), Unable to allocate LSAP!\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irda_find_lsap_sel (self, name)
+ *
+ * Try to lookup LSAP selector in remote LM-IAS
+ *
+ * Basically, we start a IAP query, and then go to sleep. When the query
+ * return, irda_getvalue_confirm will wake us up, and we can examine the
+ * result of the query...
+ * Note that in some case, the query fail even before we go to sleep,
+ * creating some races...
+ */
+static int irda_find_lsap_sel(struct irda_sock *self, char *name)
+{
+ pr_debug("%s(%p, %s)\n", __func__, self, name);
+
+ if (self->iriap) {
+ net_warn_ratelimited("%s(): busy with a previous query\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ irda_getvalue_confirm);
+ if(self->iriap == NULL)
+ return -ENOMEM;
+
+ /* Treat unexpected wakeup as disconnect */
+ self->errno = -EHOSTUNREACH;
+
+ /* Query remote LM-IAS */
+ iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr,
+ name, "IrDA:TinyTP:LsapSel");
+
+ /* Wait for answer, if not yet finished (or failed) */
+ if (wait_event_interruptible(self->query_wait, (self->iriap==NULL)))
+ /* Treat signals as disconnect */
+ return -EHOSTUNREACH;
+
+ /* Check what happened */
+ if (self->errno)
+ {
+ /* Requested object/attribute doesn't exist */
+ if((self->errno == IAS_CLASS_UNKNOWN) ||
+ (self->errno == IAS_ATTRIB_UNKNOWN))
+ return -EADDRNOTAVAIL;
+ else
+ return -EHOSTUNREACH;
+ }
+
+ /* Get the remote TSAP selector */
+ switch (self->ias_result->type) {
+ case IAS_INTEGER:
+ pr_debug("%s() int=%d\n",
+ __func__, self->ias_result->t.integer);
+
+ if (self->ias_result->t.integer != -1)
+ self->dtsap_sel = self->ias_result->t.integer;
+ else
+ self->dtsap_sel = 0;
+ break;
+ default:
+ self->dtsap_sel = 0;
+ pr_debug("%s(), bad type!\n", __func__);
+ break;
+ }
+ if (self->ias_result)
+ irias_delete_value(self->ias_result);
+
+ if (self->dtsap_sel)
+ return 0;
+
+ return -EADDRNOTAVAIL;
+}
+
+/*
+ * Function irda_discover_daddr_and_lsap_sel (self, name)
+ *
+ * This try to find a device with the requested service.
+ *
+ * It basically look into the discovery log. For each address in the list,
+ * it queries the LM-IAS of the device to find if this device offer
+ * the requested service.
+ * If there is more than one node supporting the service, we complain
+ * to the user (it should move devices around).
+ * The, we set both the destination address and the lsap selector to point
+ * on the service on the unique device we have found.
+ *
+ * Note : this function fails if there is more than one device in range,
+ * because IrLMP doesn't disconnect the LAP when the last LSAP is closed.
+ * Moreover, we would need to wait the LAP disconnection...
+ */
+static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
+{
+ discinfo_t *discoveries; /* Copy of the discovery log */
+ int number; /* Number of nodes in the log */
+ int i;
+ int err = -ENETUNREACH;
+ __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */
+ __u8 dtsap_sel = 0x0; /* TSAP associated with it */
+
+ pr_debug("%s(), name=%s\n", __func__, name);
+
+ /* Ask lmp for the current discovery log
+ * Note : we have to use irlmp_get_discoveries(), as opposed
+ * to play with the cachelog directly, because while we are
+ * making our ias query, le log might change... */
+ discoveries = irlmp_get_discoveries(&number, self->mask.word,
+ self->nslots);
+ /* Check if the we got some results */
+ if (discoveries == NULL)
+ return -ENETUNREACH; /* No nodes discovered */
+
+ /*
+ * Now, check all discovered devices (if any), and connect
+ * client only about the services that the client is
+ * interested in...
+ */
+ for(i = 0; i < number; i++) {
+ /* Try the address in the log */
+ self->daddr = discoveries[i].daddr;
+ self->saddr = 0x0;
+ pr_debug("%s(), trying daddr = %08x\n",
+ __func__, self->daddr);
+
+ /* Query remote LM-IAS for this service */
+ err = irda_find_lsap_sel(self, name);
+ switch (err) {
+ case 0:
+ /* We found the requested service */
+ if(daddr != DEV_ADDR_ANY) {
+ pr_debug("%s(), discovered service ''%s'' in two different devices !!!\n",
+ __func__, name);
+ self->daddr = DEV_ADDR_ANY;
+ kfree(discoveries);
+ return -ENOTUNIQ;
+ }
+ /* First time we found that one, save it ! */
+ daddr = self->daddr;
+ dtsap_sel = self->dtsap_sel;
+ break;
+ case -EADDRNOTAVAIL:
+ /* Requested service simply doesn't exist on this node */
+ break;
+ default:
+ /* Something bad did happen :-( */
+ pr_debug("%s(), unexpected IAS query failure\n",
+ __func__);
+ self->daddr = DEV_ADDR_ANY;
+ kfree(discoveries);
+ return -EHOSTUNREACH;
+ }
+ }
+ /* Cleanup our copy of the discovery log */
+ kfree(discoveries);
+
+ /* Check out what we found */
+ if(daddr == DEV_ADDR_ANY) {
+ pr_debug("%s(), cannot discover service ''%s'' in any device !!!\n",
+ __func__, name);
+ self->daddr = DEV_ADDR_ANY;
+ return -EADDRNOTAVAIL;
+ }
+
+ /* Revert back to discovered device & service */
+ self->daddr = daddr;
+ self->saddr = 0x0;
+ self->dtsap_sel = dtsap_sel;
+
+ pr_debug("%s(), discovered requested service ''%s'' at address %08x\n",
+ __func__, name, self->daddr);
+
+ return 0;
+}
+
+/*
+ * Function irda_getname (sock, uaddr, uaddr_len, peer)
+ *
+ * Return the our own, or peers socket address (sockaddr_irda)
+ *
+ */
+static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct sockaddr_irda saddr;
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+
+ memset(&saddr, 0, sizeof(saddr));
+ if (peer) {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ saddr.sir_family = AF_IRDA;
+ saddr.sir_lsap_sel = self->dtsap_sel;
+ saddr.sir_addr = self->daddr;
+ } else {
+ saddr.sir_family = AF_IRDA;
+ saddr.sir_lsap_sel = self->stsap_sel;
+ saddr.sir_addr = self->saddr;
+ }
+
+ pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
+ pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr);
+
+ /* uaddr_len come to us uninitialised */
+ *uaddr_len = sizeof (struct sockaddr_irda);
+ memcpy(uaddr, &saddr, *uaddr_len);
+
+ return 0;
+}
+
+/*
+ * Function irda_listen (sock, backlog)
+ *
+ * Just move to the listen state
+ *
+ */
+static int irda_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err = -EOPNOTSUPP;
+
+ lock_sock(sk);
+
+ if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
+ (sk->sk_type != SOCK_DGRAM))
+ goto out;
+
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
+
+ err = 0;
+ }
+out:
+ release_sock(sk);
+
+ return err;
+}
+
+/*
+ * Function irda_bind (sock, uaddr, addr_len)
+ *
+ * Used by servers to register their well known TSAP
+ *
+ */
+static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
+ struct irda_sock *self = irda_sk(sk);
+ int err;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ if (addr_len != sizeof(struct sockaddr_irda))
+ return -EINVAL;
+
+ lock_sock(sk);
+#ifdef CONFIG_IRDA_ULTRA
+ /* Special care for Ultra sockets */
+ if ((sk->sk_type == SOCK_DGRAM) &&
+ (sk->sk_protocol == IRDAPROTO_ULTRA)) {
+ self->pid = addr->sir_lsap_sel;
+ err = -EOPNOTSUPP;
+ if (self->pid & 0x80) {
+ pr_debug("%s(), extension in PID not supp!\n",
+ __func__);
+ goto out;
+ }
+ err = irda_open_lsap(self, self->pid);
+ if (err < 0)
+ goto out;
+
+ /* Pretend we are connected */
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
+ err = 0;
+
+ goto out;
+ }
+#endif /* CONFIG_IRDA_ULTRA */
+
+ self->ias_obj = irias_new_object(addr->sir_name, jiffies);
+ err = -ENOMEM;
+ if (self->ias_obj == NULL)
+ goto out;
+
+ err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
+ if (err < 0) {
+ irias_delete_object(self->ias_obj);
+ self->ias_obj = NULL;
+ goto out;
+ }
+
+ /* Register with LM-IAS */
+ irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel",
+ self->stsap_sel, IAS_KERNEL_ATTR);
+ irias_insert_object(self->ias_obj);
+
+ err = 0;
+out:
+ release_sock(sk);
+ return err;
+}
+
+/*
+ * Function irda_accept (sock, newsock, flags)
+ *
+ * Wait for incoming connection
+ *
+ */
+static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *new, *self = irda_sk(sk);
+ struct sock *newsk;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
+ if (err)
+ return err;
+
+ err = -EINVAL;
+
+ lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED)
+ goto out;
+
+ err = -EOPNOTSUPP;
+ if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
+ (sk->sk_type != SOCK_DGRAM))
+ goto out;
+
+ err = -EINVAL;
+ if (sk->sk_state != TCP_LISTEN)
+ goto out;
+
+ /*
+ * The read queue this time is holding sockets ready to use
+ * hooked into the SABM we saved
+ */
+
+ /*
+ * We can perform the accept only if there is incoming data
+ * on the listening socket.
+ * So, we will block the caller until we receive any data.
+ * If the caller was waiting on select() or poll() before
+ * calling us, the data is waiting for us ;-)
+ * Jean II
+ */
+ while (1) {
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ if (skb)
+ break;
+
+ /* Non blocking operation */
+ err = -EWOULDBLOCK;
+ if (flags & O_NONBLOCK)
+ goto out;
+
+ err = wait_event_interruptible(*(sk_sleep(sk)),
+ skb_peek(&sk->sk_receive_queue));
+ if (err)
+ goto out;
+ }
+
+ newsk = newsock->sk;
+ err = -EIO;
+ if (newsk == NULL)
+ goto out;
+
+ newsk->sk_state = TCP_ESTABLISHED;
+
+ new = irda_sk(newsk);
+
+ /* Now attach up the new socket */
+ new->tsap = irttp_dup(self->tsap, new);
+ err = -EPERM; /* value does not seem to make sense. -arnd */
+ if (!new->tsap) {
+ pr_debug("%s(), dup failed!\n", __func__);
+ goto out;
+ }
+
+ new->stsap_sel = new->tsap->stsap_sel;
+ new->dtsap_sel = new->tsap->dtsap_sel;
+ new->saddr = irttp_get_saddr(new->tsap);
+ new->daddr = irttp_get_daddr(new->tsap);
+
+ new->max_sdu_size_tx = self->max_sdu_size_tx;
+ new->max_sdu_size_rx = self->max_sdu_size_rx;
+ new->max_data_size = self->max_data_size;
+ new->max_header_size = self->max_header_size;
+
+ memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info));
+
+ /* Clean up the original one to keep it in listen state */
+ irttp_listen(self->tsap);
+
+ sk->sk_ack_backlog--;
+
+ newsock->state = SS_CONNECTED;
+
+ irda_connect_response(new);
+ err = 0;
+out:
+ kfree_skb(skb);
+ release_sock(sk);
+ return err;
+}
+
+/*
+ * Function irda_connect (sock, uaddr, addr_len, flags)
+ *
+ * Connect to a IrDA device
+ *
+ * The main difference with a "standard" connect is that with IrDA we need
+ * to resolve the service name into a TSAP selector (in TCP, port number
+ * doesn't have to be resolved).
+ * Because of this service name resolution, we can offer "auto-connect",
+ * where we connect to a service without specifying a destination address.
+ *
+ * Note : by consulting "errno", the user space caller may learn the cause
+ * of the failure. Most of them are visible in the function, others may come
+ * from subroutines called and are listed here :
+ * o EBUSY : already processing a connect
+ * o EHOSTUNREACH : bad addr->sir_addr argument
+ * o EADDRNOTAVAIL : bad addr->sir_name argument
+ * o ENOTUNIQ : more than one node has addr->sir_name (auto-connect)
+ * o ENETUNREACH : no node found on the network (auto-connect)
+ */
+static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
+ struct irda_sock *self = irda_sk(sk);
+ int err;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ lock_sock(sk);
+ /* Don't allow connect for Ultra sockets */
+ err = -ESOCKTNOSUPPORT;
+ if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
+ goto out;
+
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ sock->state = SS_CONNECTED;
+ err = 0;
+ goto out; /* Connect completed during a ERESTARTSYS event */
+ }
+
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ sock->state = SS_UNCONNECTED;
+ err = -ECONNREFUSED;
+ goto out;
+ }
+
+ err = -EISCONN; /* No reconnect on a seqpacket socket */
+ if (sk->sk_state == TCP_ESTABLISHED)
+ goto out;
+
+ sk->sk_state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+
+ err = -EINVAL;
+ if (addr_len != sizeof(struct sockaddr_irda))
+ goto out;
+
+ /* Check if user supplied any destination device address */
+ if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) {
+ /* Try to find one suitable */
+ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
+ if (err) {
+ pr_debug("%s(), auto-connect failed!\n", __func__);
+ goto out;
+ }
+ } else {
+ /* Use the one provided by the user */
+ self->daddr = addr->sir_addr;
+ pr_debug("%s(), daddr = %08x\n", __func__, self->daddr);
+
+ /* If we don't have a valid service name, we assume the
+ * user want to connect on a specific LSAP. Prevent
+ * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */
+ if((addr->sir_name[0] != '\0') ||
+ (addr->sir_lsap_sel >= 0x70)) {
+ /* Query remote LM-IAS using service name */
+ err = irda_find_lsap_sel(self, addr->sir_name);
+ if (err) {
+ pr_debug("%s(), connect failed!\n", __func__);
+ goto out;
+ }
+ } else {
+ /* Directly connect to the remote LSAP
+ * specified by the sir_lsap field.
+ * Please use with caution, in IrDA LSAPs are
+ * dynamic and there is no "well-known" LSAP. */
+ self->dtsap_sel = addr->sir_lsap_sel;
+ }
+ }
+
+ /* Check if we have opened a local TSAP */
+ if (!self->tsap) {
+ err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
+ if (err)
+ goto out;
+ }
+
+ /* Move to connecting socket, start sending Connect Requests */
+ sock->state = SS_CONNECTING;
+ sk->sk_state = TCP_SYN_SENT;
+
+ /* Connect to remote device */
+ err = irttp_connect_request(self->tsap, self->dtsap_sel,
+ self->saddr, self->daddr, NULL,
+ self->max_sdu_size_rx, NULL);
+ if (err) {
+ pr_debug("%s(), connect failed!\n", __func__);
+ goto out;
+ }
+
+ /* Now the loop */
+ err = -EINPROGRESS;
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ goto out;
+
+ err = -ERESTARTSYS;
+ if (wait_event_interruptible(*(sk_sleep(sk)),
+ (sk->sk_state != TCP_SYN_SENT)))
+ goto out;
+
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ sock->state = SS_UNCONNECTED;
+ err = sock_error(sk);
+ if (!err)
+ err = -ECONNRESET;
+ goto out;
+ }
+
+ sock->state = SS_CONNECTED;
+
+ /* At this point, IrLMP has assigned our source address */
+ self->saddr = irttp_get_saddr(self->tsap);
+ err = 0;
+out:
+ release_sock(sk);
+ return err;
+}
+
+static struct proto irda_proto = {
+ .name = "IRDA",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct irda_sock),
+};
+
+/*
+ * Function irda_create (sock, protocol)
+ *
+ * Create IrDA socket
+ *
+ */
+static int irda_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+ struct irda_sock *self;
+
+ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+ return -EINVAL;
+
+ if (net != &init_net)
+ return -EAFNOSUPPORT;
+
+ /* Check for valid socket type */
+ switch (sock->type) {
+ case SOCK_STREAM: /* For TTP connections with SAR disabled */
+ case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */
+ case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
+
+ /* Allocate networking socket */
+ sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto, kern);
+ if (sk == NULL)
+ return -ENOMEM;
+
+ self = irda_sk(sk);
+ pr_debug("%s() : self is %p\n", __func__, self);
+
+ init_waitqueue_head(&self->query_wait);
+
+ switch (sock->type) {
+ case SOCK_STREAM:
+ sock->ops = &irda_stream_ops;
+ self->max_sdu_size_rx = TTP_SAR_DISABLE;
+ break;
+ case SOCK_SEQPACKET:
+ sock->ops = &irda_seqpacket_ops;
+ self->max_sdu_size_rx = TTP_SAR_UNBOUND;
+ break;
+ case SOCK_DGRAM:
+ switch (protocol) {
+#ifdef CONFIG_IRDA_ULTRA
+ case IRDAPROTO_ULTRA:
+ sock->ops = &irda_ultra_ops;
+ /* Initialise now, because we may send on unbound
+ * sockets. Jean II */
+ self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER;
+ self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER;
+ break;
+#endif /* CONFIG_IRDA_ULTRA */
+ case IRDAPROTO_UNITDATA:
+ sock->ops = &irda_dgram_ops;
+ /* We let Unitdata conn. be like seqpack conn. */
+ self->max_sdu_size_rx = TTP_SAR_UNBOUND;
+ break;
+ default:
+ sk_free(sk);
+ return -ESOCKTNOSUPPORT;
+ }
+ break;
+ default:
+ sk_free(sk);
+ return -ESOCKTNOSUPPORT;
+ }
+
+ /* Initialise networking socket struct */
+ sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
+ sk->sk_family = PF_IRDA;
+ sk->sk_protocol = protocol;
+
+ /* Register as a client with IrLMP */
+ self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
+ self->mask.word = 0xffff;
+ self->rx_flow = self->tx_flow = FLOW_START;
+ self->nslots = DISCOVERY_DEFAULT_SLOTS;
+ self->daddr = DEV_ADDR_ANY; /* Until we get connected */
+ self->saddr = 0x0; /* so IrLMP assign us any link */
+ return 0;
+}
+
+/*
+ * Function irda_destroy_socket (self)
+ *
+ * Destroy socket
+ *
+ */
+static void irda_destroy_socket(struct irda_sock *self)
+{
+ pr_debug("%s(%p)\n", __func__, self);
+
+ /* Unregister with IrLMP */
+ irlmp_unregister_client(self->ckey);
+ irlmp_unregister_service(self->skey);
+
+ /* Unregister with LM-IAS */
+ if (self->ias_obj) {
+ irias_delete_object(self->ias_obj);
+ self->ias_obj = NULL;
+ }
+
+ if (self->iriap) {
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+ }
+
+ if (self->tsap) {
+ irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+#ifdef CONFIG_IRDA_ULTRA
+ if (self->lsap) {
+ irlmp_close_lsap(self->lsap);
+ self->lsap = NULL;
+ }
+#endif /* CONFIG_IRDA_ULTRA */
+}
+
+/*
+ * Function irda_release (sock)
+ */
+static int irda_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk == NULL)
+ return 0;
+
+ lock_sock(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
+
+ /* Destroy IrDA socket */
+ irda_destroy_socket(irda_sk(sk));
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+
+ /* Purge queues (see sock_init_data()) */
+ skb_queue_purge(&sk->sk_receive_queue);
+
+ /* Destroy networking socket if we are the last reference on it,
+ * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
+ sock_put(sk);
+
+ /* Notes on socket locking and deallocation... - Jean II
+ * In theory we should put pairs of sock_hold() / sock_put() to
+ * prevent the socket to be destroyed whenever there is an
+ * outstanding request or outstanding incoming packet or event.
+ *
+ * 1) This may include IAS request, both in connect and getsockopt.
+ * Unfortunately, the situation is a bit more messy than it looks,
+ * because we close iriap and kfree(self) above.
+ *
+ * 2) This may include selective discovery in getsockopt.
+ * Same stuff as above, irlmp registration and self are gone.
+ *
+ * Probably 1 and 2 may not matter, because it's all triggered
+ * by a process and the socket layer already prevent the
+ * socket to go away while a process is holding it, through
+ * sockfd_put() and fput()...
+ *
+ * 3) This may include deferred TSAP closure. In particular,
+ * we may receive a late irda_disconnect_indication()
+ * Fortunately, (tsap_cb *)->close_pend should protect us
+ * from that.
+ *
+ * I did some testing on SMP, and it looks solid. And the socket
+ * memory leak is now gone... - Jean II
+ */
+
+ return 0;
+}
+
+/*
+ * Function irda_sendmsg (sock, msg, len)
+ *
+ * Send message down to TinyTP. This function is used for both STREAM and
+ * SEQPACK services. This is possible since it forces the client to
+ * fragment the message if necessary
+ */
+static int irda_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ struct sk_buff *skb;
+ int err = -EPIPE;
+
+ pr_debug("%s(), len=%zd\n", __func__, len);
+
+ /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
+ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
+ MSG_NOSIGNAL)) {
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto out_err;
+
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ self = irda_sk(sk);
+
+ /* Check if IrTTP is wants us to slow down */
+
+ if (wait_event_interruptible(*(sk_sleep(sk)),
+ (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* Check if we are still connected */
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ /* Check that we don't send out too big frames */
+ if (len > self->max_data_size) {
+ pr_debug("%s(), Chopping frame from %zd to %d bytes!\n",
+ __func__, len, self->max_data_size);
+ len = self->max_data_size;
+ }
+
+ skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ goto out_err;
+
+ skb_reserve(skb, self->max_header_size + 16);
+ skb_reset_transport_header(skb);
+ skb_put(skb, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+
+ /*
+ * Just send the message to TinyTP, and let it deal with possible
+ * errors. No need to duplicate all that here
+ */
+ err = irttp_data_request(self->tsap, skb);
+ if (err) {
+ pr_debug("%s(), err=%d\n", __func__, err);
+ goto out_err;
+ }
+
+ release_sock(sk);
+ /* Tell client how much data we actually sent */
+ return len;
+
+out_err:
+ err = sk_stream_error(sk, msg->msg_flags, err);
+out:
+ release_sock(sk);
+ return err;
+
+}
+
+/*
+ * Function irda_recvmsg_dgram (sock, msg, size, flags)
+ *
+ * Try to receive message and copy it to user. The frame is discarded
+ * after being read, regardless of how much the user actually read
+ */
+static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+ struct sk_buff *skb;
+ size_t copied;
+ int err;
+
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return err;
+
+ skb_reset_transport_header(skb);
+ copied = skb->len;
+
+ if (copied > size) {
+ pr_debug("%s(), Received truncated frame (%zd < %zd)!\n",
+ __func__, copied, size);
+ copied = size;
+ msg->msg_flags |= MSG_TRUNC;
+ }
+ skb_copy_datagram_msg(skb, 0, msg, copied);
+
+ skb_free_datagram(sk, skb);
+
+ /*
+ * Check if we have previously stopped IrTTP and we know
+ * have more free space in our rx_queue. If so tell IrTTP
+ * to start delivering frames again before our rx_queue gets
+ * empty
+ */
+ if (self->rx_flow == FLOW_STOP) {
+ if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
+ pr_debug("%s(), Starting IrTTP\n", __func__);
+ self->rx_flow = FLOW_START;
+ irttp_flow_request(self->tsap, FLOW_START);
+ }
+ }
+
+ return copied;
+}
+
+/*
+ * Function irda_recvmsg_stream (sock, msg, size, flags)
+ */
+static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+ int noblock = flags & MSG_DONTWAIT;
+ size_t copied = 0;
+ int target, err;
+ long timeo;
+
+ if ((err = sock_error(sk)) < 0)
+ return err;
+
+ if (sock->flags & __SO_ACCEPTCON)
+ return -EINVAL;
+
+ err =-EOPNOTSUPP;
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ err = 0;
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, noblock);
+
+ do {
+ int chunk;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
+
+ if (skb == NULL) {
+ DEFINE_WAIT(wait);
+ err = 0;
+
+ if (copied >= target)
+ break;
+
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * POSIX 1003.1g mandates this order.
+ */
+ err = sock_error(sk);
+ if (err)
+ ;
+ else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ ;
+ else if (noblock)
+ err = -EAGAIN;
+ else if (signal_pending(current))
+ err = sock_intr_errno(timeo);
+ else if (sk->sk_state != TCP_ESTABLISHED)
+ err = -ENOTCONN;
+ else if (skb_peek(&sk->sk_receive_queue) == NULL)
+ /* Wait process until data arrives */
+ schedule();
+
+ finish_wait(sk_sleep(sk), &wait);
+
+ if (err)
+ return err;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+
+ continue;
+ }
+
+ chunk = min_t(unsigned int, skb->len, size);
+ if (memcpy_to_msg(msg, skb->data, chunk)) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+ }
+ copied += chunk;
+ size -= chunk;
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ skb_pull(skb, chunk);
+
+ /* put the skb back if we didn't use it up.. */
+ if (skb->len) {
+ pr_debug("%s(), back on q!\n",
+ __func__);
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+
+ kfree_skb(skb);
+ } else {
+ pr_debug("%s() questionable!?\n", __func__);
+
+ /* put message back and return */
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ } while (size);
+
+ /*
+ * Check if we have previously stopped IrTTP and we know
+ * have more free space in our rx_queue. If so tell IrTTP
+ * to start delivering frames again before our rx_queue gets
+ * empty
+ */
+ if (self->rx_flow == FLOW_STOP) {
+ if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
+ pr_debug("%s(), Starting IrTTP\n", __func__);
+ self->rx_flow = FLOW_START;
+ irttp_flow_request(self->tsap, FLOW_START);
+ }
+ }
+
+ return copied;
+}
+
+/*
+ * Function irda_sendmsg_dgram (sock, msg, len)
+ *
+ * Send message down to TinyTP for the unreliable sequenced
+ * packet service...
+ *
+ */
+static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg,
+ size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ struct sk_buff *skb;
+ int err;
+
+ pr_debug("%s(), len=%zd\n", __func__, len);
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ send_sig(SIGPIPE, current, 0);
+ err = -EPIPE;
+ goto out;
+ }
+
+ err = -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
+
+ self = irda_sk(sk);
+
+ /*
+ * Check that we don't send out too big frames. This is an unreliable
+ * service, so we have no fragmentation and no coalescence
+ */
+ if (len > self->max_data_size) {
+ pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n",
+ __func__, len, self->max_data_size);
+ len = self->max_data_size;
+ }
+
+ skb = sock_alloc_send_skb(sk, len + self->max_header_size,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ err = -ENOBUFS;
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, self->max_header_size);
+ skb_reset_transport_header(skb);
+
+ pr_debug("%s(), appending user data\n", __func__);
+ skb_put(skb, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
+ if (err) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * Just send the message to TinyTP, and let it deal with possible
+ * errors. No need to duplicate all that here
+ */
+ err = irttp_udata_request(self->tsap, skb);
+ if (err) {
+ pr_debug("%s(), err=%d\n", __func__, err);
+ goto out;
+ }
+
+ release_sock(sk);
+ return len;
+
+out:
+ release_sock(sk);
+ return err;
+}
+
+/*
+ * Function irda_sendmsg_ultra (sock, msg, len)
+ *
+ * Send message down to IrLMP for the unreliable Ultra
+ * packet service...
+ */
+#ifdef CONFIG_IRDA_ULTRA
+static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg,
+ size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ __u8 pid = 0;
+ int bound = 0;
+ struct sk_buff *skb;
+ int err;
+
+ pr_debug("%s(), len=%zd\n", __func__, len);
+
+ err = -EINVAL;
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ err = -EPIPE;
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ send_sig(SIGPIPE, current, 0);
+ goto out;
+ }
+
+ self = irda_sk(sk);
+
+ /* Check if an address was specified with sendto. Jean II */
+ if (msg->msg_name) {
+ DECLARE_SOCKADDR(struct sockaddr_irda *, addr, msg->msg_name);
+ err = -EINVAL;
+ /* Check address, extract pid. Jean II */
+ if (msg->msg_namelen < sizeof(*addr))
+ goto out;
+ if (addr->sir_family != AF_IRDA)
+ goto out;
+
+ pid = addr->sir_lsap_sel;
+ if (pid & 0x80) {
+ pr_debug("%s(), extension in PID not supp!\n",
+ __func__);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ } else {
+ /* Check that the socket is properly bound to an Ultra
+ * port. Jean II */
+ if ((self->lsap == NULL) ||
+ (sk->sk_state != TCP_ESTABLISHED)) {
+ pr_debug("%s(), socket not bound to Ultra PID.\n",
+ __func__);
+ err = -ENOTCONN;
+ goto out;
+ }
+ /* Use PID from socket */
+ bound = 1;
+ }
+
+ /*
+ * Check that we don't send out too big frames. This is an unreliable
+ * service, so we have no fragmentation and no coalescence
+ */
+ if (len > self->max_data_size) {
+ pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n",
+ __func__, len, self->max_data_size);
+ len = self->max_data_size;
+ }
+
+ skb = sock_alloc_send_skb(sk, len + self->max_header_size,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ err = -ENOBUFS;
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, self->max_header_size);
+ skb_reset_transport_header(skb);
+
+ pr_debug("%s(), appending user data\n", __func__);
+ skb_put(skb, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
+ if (err) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ err = irlmp_connless_data_request((bound ? self->lsap : NULL),
+ skb, pid);
+ if (err)
+ pr_debug("%s(), err=%d\n", __func__, err);
+out:
+ release_sock(sk);
+ return err ? : len;
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irda_shutdown (sk, how)
+ */
+static int irda_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ lock_sock(sk);
+
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
+
+ if (self->iriap) {
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+ }
+
+ if (self->tsap) {
+ irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+
+ /* A few cleanup so the socket look as good as new... */
+ self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */
+ self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */
+ self->saddr = 0x0; /* so IrLMP assign us any link */
+
+ release_sock(sk);
+
+ return 0;
+}
+
+/*
+ * Function irda_poll (file, sock, wait)
+ */
+static unsigned int irda_poll(struct file * file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+ unsigned int mask;
+
+ poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ /* Exceptional events? */
+ if (sk->sk_err)
+ mask |= POLLERR;
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ pr_debug("%s(), POLLHUP\n", __func__);
+ mask |= POLLHUP;
+ }
+
+ /* Readable? */
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
+ pr_debug("Socket is readable\n");
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ /* Connection-based need to check for termination and startup */
+ switch (sk->sk_type) {
+ case SOCK_STREAM:
+ if (sk->sk_state == TCP_CLOSE) {
+ pr_debug("%s(), POLLHUP\n", __func__);
+ mask |= POLLHUP;
+ }
+
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ if ((self->tx_flow == FLOW_START) &&
+ sock_writeable(sk))
+ {
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ }
+ }
+ break;
+ case SOCK_SEQPACKET:
+ if ((self->tx_flow == FLOW_START) &&
+ sock_writeable(sk))
+ {
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ }
+ break;
+ case SOCK_DGRAM:
+ if (sock_writeable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ break;
+ default:
+ break;
+ }
+
+ return mask;
+}
+
+/*
+ * Function irda_ioctl (sock, cmd, arg)
+ */
+static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ pr_debug("%s(), cmd=%#x\n", __func__, cmd);
+
+ err = -EINVAL;
+ switch (cmd) {
+ case TIOCOUTQ: {
+ long amount;
+
+ amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+ if (amount < 0)
+ amount = 0;
+ err = put_user(amount, (unsigned int __user *)arg);
+ break;
+ }
+
+ case TIOCINQ: {
+ struct sk_buff *skb;
+ long amount = 0L;
+ /* These two are safe on a single CPU system as only user tasks fiddle here */
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
+ amount = skb->len;
+ err = put_user(amount, (unsigned int __user *)arg);
+ break;
+ }
+
+ case SIOCGSTAMP:
+ if (sk != NULL)
+ err = sock_get_timestamp(sk, (struct timeval __user *)arg);
+ break;
+
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ break;
+ default:
+ pr_debug("%s(), doing device ioctl!\n", __func__);
+ err = -ENOIOCTLCMD;
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Function irda_ioctl (sock, cmd, arg)
+ */
+static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ /*
+ * All IRDA's ioctl are standard ones.
+ */
+ return -ENOIOCTLCMD;
+}
+#endif
+
+/*
+ * Function irda_setsockopt (sock, level, optname, optval, optlen)
+ *
+ * Set some options for the socket
+ *
+ */
+static int irda_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+ struct irda_ias_set *ias_opt;
+ struct ias_object *ias_obj;
+ struct ias_attrib * ias_attr; /* Attribute in IAS object */
+ int opt, free_ias = 0, err = 0;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ if (level != SOL_IRLMP)
+ return -ENOPROTOOPT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case IRLMP_IAS_SET:
+ /* The user want to add an attribute to an existing IAS object
+ * (in the IAS database) or to create a new object with this
+ * attribute.
+ * We first query IAS to know if the object exist, and then
+ * create the right attribute...
+ */
+
+ if (optlen != sizeof(struct irda_ias_set)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Copy query to the driver. */
+ ias_opt = memdup_user(optval, optlen);
+ if (IS_ERR(ias_opt)) {
+ err = PTR_ERR(ias_opt);
+ goto out;
+ }
+
+ /* Find the object we target.
+ * If the user gives us an empty string, we use the object
+ * associated with this socket. This will workaround
+ * duplicated class name - Jean II */
+ if(ias_opt->irda_class_name[0] == '\0') {
+ if(self->ias_obj == NULL) {
+ kfree(ias_opt);
+ err = -EINVAL;
+ goto out;
+ }
+ ias_obj = self->ias_obj;
+ } else
+ ias_obj = irias_find_object(ias_opt->irda_class_name);
+
+ /* Only ROOT can mess with the global IAS database.
+ * Users can only add attributes to the object associated
+ * with the socket they own - Jean II */
+ if((!capable(CAP_NET_ADMIN)) &&
+ ((ias_obj == NULL) || (ias_obj != self->ias_obj))) {
+ kfree(ias_opt);
+ err = -EPERM;
+ goto out;
+ }
+
+ /* If the object doesn't exist, create it */
+ if(ias_obj == (struct ias_object *) NULL) {
+ /* Create a new object */
+ ias_obj = irias_new_object(ias_opt->irda_class_name,
+ jiffies);
+ if (ias_obj == NULL) {
+ kfree(ias_opt);
+ err = -ENOMEM;
+ goto out;
+ }
+ free_ias = 1;
+ }
+
+ /* Do we have the attribute already ? */
+ if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) {
+ kfree(ias_opt);
+ if (free_ias) {
+ kfree(ias_obj->name);
+ kfree(ias_obj);
+ }
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Look at the type */
+ switch(ias_opt->irda_attrib_type) {
+ case IAS_INTEGER:
+ /* Add an integer attribute */
+ irias_add_integer_attrib(
+ ias_obj,
+ ias_opt->irda_attrib_name,
+ ias_opt->attribute.irda_attrib_int,
+ IAS_USER_ATTR);
+ break;
+ case IAS_OCT_SEQ:
+ /* Check length */
+ if(ias_opt->attribute.irda_attrib_octet_seq.len >
+ IAS_MAX_OCTET_STRING) {
+ kfree(ias_opt);
+ if (free_ias) {
+ kfree(ias_obj->name);
+ kfree(ias_obj);
+ }
+
+ err = -EINVAL;
+ goto out;
+ }
+ /* Add an octet sequence attribute */
+ irias_add_octseq_attrib(
+ ias_obj,
+ ias_opt->irda_attrib_name,
+ ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
+ ias_opt->attribute.irda_attrib_octet_seq.len,
+ IAS_USER_ATTR);
+ break;
+ case IAS_STRING:
+ /* Should check charset & co */
+ /* Check length */
+ /* The length is encoded in a __u8, and
+ * IAS_MAX_STRING == 256, so there is no way
+ * userspace can pass us a string too large.
+ * Jean II */
+ /* NULL terminate the string (avoid troubles) */
+ ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0';
+ /* Add a string attribute */
+ irias_add_string_attrib(
+ ias_obj,
+ ias_opt->irda_attrib_name,
+ ias_opt->attribute.irda_attrib_string.string,
+ IAS_USER_ATTR);
+ break;
+ default :
+ kfree(ias_opt);
+ if (free_ias) {
+ kfree(ias_obj->name);
+ kfree(ias_obj);
+ }
+ err = -EINVAL;
+ goto out;
+ }
+ irias_insert_object(ias_obj);
+ kfree(ias_opt);
+ break;
+ case IRLMP_IAS_DEL:
+ /* The user want to delete an object from our local IAS
+ * database. We just need to query the IAS, check is the
+ * object is not owned by the kernel and delete it.
+ */
+
+ if (optlen != sizeof(struct irda_ias_set)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Copy query to the driver. */
+ ias_opt = memdup_user(optval, optlen);
+ if (IS_ERR(ias_opt)) {
+ err = PTR_ERR(ias_opt);
+ goto out;
+ }
+
+ /* Find the object we target.
+ * If the user gives us an empty string, we use the object
+ * associated with this socket. This will workaround
+ * duplicated class name - Jean II */
+ if(ias_opt->irda_class_name[0] == '\0')
+ ias_obj = self->ias_obj;
+ else
+ ias_obj = irias_find_object(ias_opt->irda_class_name);
+ if(ias_obj == (struct ias_object *) NULL) {
+ kfree(ias_opt);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Only ROOT can mess with the global IAS database.
+ * Users can only del attributes from the object associated
+ * with the socket they own - Jean II */
+ if((!capable(CAP_NET_ADMIN)) &&
+ ((ias_obj == NULL) || (ias_obj != self->ias_obj))) {
+ kfree(ias_opt);
+ err = -EPERM;
+ goto out;
+ }
+
+ /* Find the attribute (in the object) we target */
+ ias_attr = irias_find_attrib(ias_obj,
+ ias_opt->irda_attrib_name);
+ if(ias_attr == (struct ias_attrib *) NULL) {
+ kfree(ias_opt);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Check is the user space own the object */
+ if(ias_attr->value->owner != IAS_USER_ATTR) {
+ pr_debug("%s(), attempting to delete a kernel attribute\n",
+ __func__);
+ kfree(ias_opt);
+ err = -EPERM;
+ goto out;
+ }
+
+ /* Remove the attribute (and maybe the object) */
+ irias_delete_attrib(ias_obj, ias_attr, 1);
+ kfree(ias_opt);
+ break;
+ case IRLMP_MAX_SDU_SIZE:
+ if (optlen < sizeof(int)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (get_user(opt, (int __user *)optval)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* Only possible for a seqpacket service (TTP with SAR) */
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ pr_debug("%s(), setting max_sdu_size = %d\n",
+ __func__, opt);
+ self->max_sdu_size_rx = opt;
+ } else {
+ net_warn_ratelimited("%s: not allowed to set MAXSDUSIZE for this socket type!\n",
+ __func__);
+ err = -ENOPROTOOPT;
+ goto out;
+ }
+ break;
+ case IRLMP_HINTS_SET:
+ if (optlen < sizeof(int)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* The input is really a (__u8 hints[2]), easier as an int */
+ if (get_user(opt, (int __user *)optval)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* Unregister any old registration */
+ irlmp_unregister_service(self->skey);
+
+ self->skey = irlmp_register_service((__u16) opt);
+ break;
+ case IRLMP_HINT_MASK_SET:
+ /* As opposed to the previous case which set the hint bits
+ * that we advertise, this one set the filter we use when
+ * making a discovery (nodes which don't match any hint
+ * bit in the mask are not reported).
+ */
+ if (optlen < sizeof(int)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* The input is really a (__u8 hints[2]), easier as an int */
+ if (get_user(opt, (int __user *)optval)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* Set the new hint mask */
+ self->mask.word = (__u16) opt;
+ /* Mask out extension bits */
+ self->mask.word &= 0x7f7f;
+ /* Check if no bits */
+ if(!self->mask.word)
+ self->mask.word = 0xFFFF;
+
+ break;
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+out:
+ release_sock(sk);
+
+ return err;
+}
+
+/*
+ * Function irda_extract_ias_value(ias_opt, ias_value)
+ *
+ * Translate internal IAS value structure to the user space representation
+ *
+ * The external representation of IAS values, as we exchange them with
+ * user space program is quite different from the internal representation,
+ * as stored in the IAS database (because we need a flat structure for
+ * crossing kernel boundary).
+ * This function transform the former in the latter. We also check
+ * that the value type is valid.
+ */
+static int irda_extract_ias_value(struct irda_ias_set *ias_opt,
+ struct ias_value *ias_value)
+{
+ /* Look at the type */
+ switch (ias_value->type) {
+ case IAS_INTEGER:
+ /* Copy the integer */
+ ias_opt->attribute.irda_attrib_int = ias_value->t.integer;
+ break;
+ case IAS_OCT_SEQ:
+ /* Set length */
+ ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len;
+ /* Copy over */
+ memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
+ ias_value->t.oct_seq, ias_value->len);
+ break;
+ case IAS_STRING:
+ /* Set length */
+ ias_opt->attribute.irda_attrib_string.len = ias_value->len;
+ ias_opt->attribute.irda_attrib_string.charset = ias_value->charset;
+ /* Copy over */
+ memcpy(ias_opt->attribute.irda_attrib_string.string,
+ ias_value->t.string, ias_value->len);
+ /* NULL terminate the string (avoid troubles) */
+ ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0';
+ break;
+ case IAS_MISSING:
+ default :
+ return -EINVAL;
+ }
+
+ /* Copy type over */
+ ias_opt->irda_attrib_type = ias_value->type;
+
+ return 0;
+}
+
+/*
+ * Function irda_getsockopt (sock, level, optname, optval, optlen)
+ */
+static int irda_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+ struct irda_device_list list = { 0 };
+ struct irda_device_info *discoveries;
+ struct irda_ias_set * ias_opt; /* IAS get/query params */
+ struct ias_object * ias_obj; /* Object in IAS */
+ struct ias_attrib * ias_attr; /* Attribute in IAS object */
+ int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */
+ int val = 0;
+ int len = 0;
+ int err = 0;
+ int offset, total;
+
+ pr_debug("%s(%p)\n", __func__, self);
+
+ if (level != SOL_IRLMP)
+ return -ENOPROTOOPT;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if(len < 0)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case IRLMP_ENUMDEVICES:
+
+ /* Offset to first device entry */
+ offset = sizeof(struct irda_device_list) -
+ sizeof(struct irda_device_info);
+
+ if (len < offset) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Ask lmp for the current discovery log */
+ discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
+ self->nslots);
+ /* Check if the we got some results */
+ if (discoveries == NULL) {
+ err = -EAGAIN;
+ goto out; /* Didn't find any devices */
+ }
+
+ /* Write total list length back to client */
+ if (copy_to_user(optval, &list, offset))
+ err = -EFAULT;
+
+ /* Copy the list itself - watch for overflow */
+ if (list.len > 2048) {
+ err = -EINVAL;
+ goto bed;
+ }
+ total = offset + (list.len * sizeof(struct irda_device_info));
+ if (total > len)
+ total = len;
+ if (copy_to_user(optval+offset, discoveries, total - offset))
+ err = -EFAULT;
+
+ /* Write total number of bytes used back to client */
+ if (put_user(total, optlen))
+ err = -EFAULT;
+bed:
+ /* Free up our buffer */
+ kfree(discoveries);
+ break;
+ case IRLMP_MAX_SDU_SIZE:
+ val = self->max_data_size;
+ len = sizeof(int);
+ if (put_user(len, optlen)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(optval, &val, len)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ break;
+ case IRLMP_IAS_GET:
+ /* The user want an object from our local IAS database.
+ * We just need to query the IAS and return the value
+ * that we found */
+
+ /* Check that the user has allocated the right space for us */
+ if (len != sizeof(struct irda_ias_set)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Copy query to the driver. */
+ ias_opt = memdup_user(optval, len);
+ if (IS_ERR(ias_opt)) {
+ err = PTR_ERR(ias_opt);
+ goto out;
+ }
+
+ /* Find the object we target.
+ * If the user gives us an empty string, we use the object
+ * associated with this socket. This will workaround
+ * duplicated class name - Jean II */
+ if(ias_opt->irda_class_name[0] == '\0')
+ ias_obj = self->ias_obj;
+ else
+ ias_obj = irias_find_object(ias_opt->irda_class_name);
+ if(ias_obj == (struct ias_object *) NULL) {
+ kfree(ias_opt);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Find the attribute (in the object) we target */
+ ias_attr = irias_find_attrib(ias_obj,
+ ias_opt->irda_attrib_name);
+ if(ias_attr == (struct ias_attrib *) NULL) {
+ kfree(ias_opt);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Translate from internal to user structure */
+ err = irda_extract_ias_value(ias_opt, ias_attr->value);
+ if(err) {
+ kfree(ias_opt);
+ goto out;
+ }
+
+ /* Copy reply to the user */
+ if (copy_to_user(optval, ias_opt,
+ sizeof(struct irda_ias_set))) {
+ kfree(ias_opt);
+ err = -EFAULT;
+ goto out;
+ }
+ /* Note : don't need to put optlen, we checked it */
+ kfree(ias_opt);
+ break;
+ case IRLMP_IAS_QUERY:
+ /* The user want an object from a remote IAS database.
+ * We need to use IAP to query the remote database and
+ * then wait for the answer to come back. */
+
+ /* Check that the user has allocated the right space for us */
+ if (len != sizeof(struct irda_ias_set)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Copy query to the driver. */
+ ias_opt = memdup_user(optval, len);
+ if (IS_ERR(ias_opt)) {
+ err = PTR_ERR(ias_opt);
+ goto out;
+ }
+
+ /* At this point, there are two cases...
+ * 1) the socket is connected - that's the easy case, we
+ * just query the device we are connected to...
+ * 2) the socket is not connected - the user doesn't want
+ * to connect and/or may not have a valid service name
+ * (so can't create a fake connection). In this case,
+ * we assume that the user pass us a valid destination
+ * address in the requesting structure...
+ */
+ if(self->daddr != DEV_ADDR_ANY) {
+ /* We are connected - reuse known daddr */
+ daddr = self->daddr;
+ } else {
+ /* We are not connected, we must specify a valid
+ * destination address */
+ daddr = ias_opt->daddr;
+ if((!daddr) || (daddr == DEV_ADDR_ANY)) {
+ kfree(ias_opt);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Check that we can proceed with IAP */
+ if (self->iriap) {
+ net_warn_ratelimited("%s: busy with a previous query\n",
+ __func__);
+ kfree(ias_opt);
+ err = -EBUSY;
+ goto out;
+ }
+
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ irda_getvalue_confirm);
+
+ if (self->iriap == NULL) {
+ kfree(ias_opt);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Treat unexpected wakeup as disconnect */
+ self->errno = -EHOSTUNREACH;
+
+ /* Query remote LM-IAS */
+ iriap_getvaluebyclass_request(self->iriap,
+ self->saddr, daddr,
+ ias_opt->irda_class_name,
+ ias_opt->irda_attrib_name);
+
+ /* Wait for answer, if not yet finished (or failed) */
+ if (wait_event_interruptible(self->query_wait,
+ (self->iriap == NULL))) {
+ /* pending request uses copy of ias_opt-content
+ * we can free it regardless! */
+ kfree(ias_opt);
+ /* Treat signals as disconnect */
+ err = -EHOSTUNREACH;
+ goto out;
+ }
+
+ /* Check what happened */
+ if (self->errno)
+ {
+ kfree(ias_opt);
+ /* Requested object/attribute doesn't exist */
+ if((self->errno == IAS_CLASS_UNKNOWN) ||
+ (self->errno == IAS_ATTRIB_UNKNOWN))
+ err = -EADDRNOTAVAIL;
+ else
+ err = -EHOSTUNREACH;
+
+ goto out;
+ }
+
+ /* Translate from internal to user structure */
+ err = irda_extract_ias_value(ias_opt, self->ias_result);
+ if (self->ias_result)
+ irias_delete_value(self->ias_result);
+ if (err) {
+ kfree(ias_opt);
+ goto out;
+ }
+
+ /* Copy reply to the user */
+ if (copy_to_user(optval, ias_opt,
+ sizeof(struct irda_ias_set))) {
+ kfree(ias_opt);
+ err = -EFAULT;
+ goto out;
+ }
+ /* Note : don't need to put optlen, we checked it */
+ kfree(ias_opt);
+ break;
+ case IRLMP_WAITDEVICE:
+ /* This function is just another way of seeing life ;-)
+ * IRLMP_ENUMDEVICES assumes that you have a static network,
+ * and that you just want to pick one of the devices present.
+ * On the other hand, in here we assume that no device is
+ * present and that at some point in the future a device will
+ * come into range. When this device arrive, we just wake
+ * up the caller, so that he has time to connect to it before
+ * the device goes away...
+ * Note : once the node has been discovered for more than a
+ * few second, it won't trigger this function, unless it
+ * goes away and come back changes its hint bits (so we
+ * might call it IRLMP_WAITNEWDEVICE).
+ */
+
+ /* Check that the user is passing us an int */
+ if (len != sizeof(int)) {
+ err = -EINVAL;
+ goto out;
+ }
+ /* Get timeout in ms (max time we block the caller) */
+ if (get_user(val, (int __user *)optval)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* Tell IrLMP we want to be notified */
+ irlmp_update_client(self->ckey, self->mask.word,
+ irda_selective_discovery_indication,
+ NULL, (void *) self);
+
+ /* Do some discovery (and also return cached results) */
+ irlmp_discovery_request(self->nslots);
+
+ /* Wait until a node is discovered */
+ if (!self->cachedaddr) {
+ pr_debug("%s(), nothing discovered yet, going to sleep...\n",
+ __func__);
+
+ /* Set watchdog timer to expire in <val> ms. */
+ self->errno = 0;
+ setup_timer(&self->watchdog, irda_discovery_timeout,
+ (unsigned long)self);
+ mod_timer(&self->watchdog,
+ jiffies + msecs_to_jiffies(val));
+
+ /* Wait for IR-LMP to call us back */
+ err = __wait_event_interruptible(self->query_wait,
+ (self->cachedaddr != 0 || self->errno == -ETIME));
+
+ /* If watchdog is still activated, kill it! */
+ del_timer(&(self->watchdog));
+
+ pr_debug("%s(), ...waking up !\n", __func__);
+
+ if (err != 0)
+ goto out;
+ }
+ else
+ pr_debug("%s(), found immediately !\n",
+ __func__);
+
+ /* Tell IrLMP that we have been notified */
+ irlmp_update_client(self->ckey, self->mask.word,
+ NULL, NULL, NULL);
+
+ /* Check if the we got some results */
+ if (!self->cachedaddr) {
+ err = -EAGAIN; /* Didn't find any devices */
+ goto out;
+ }
+ daddr = self->cachedaddr;
+ /* Cleanup */
+ self->cachedaddr = 0;
+
+ /* We return the daddr of the device that trigger the
+ * wakeup. As irlmp pass us only the new devices, we
+ * are sure that it's not an old device.
+ * If the user want more details, he should query
+ * the whole discovery log and pick one device...
+ */
+ if (put_user(daddr, (int __user *)optval)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ break;
+ default:
+ err = -ENOPROTOOPT;
+ }
+
+out:
+
+ release_sock(sk);
+
+ return err;
+}
+
+static const struct net_proto_family irda_family_ops = {
+ .family = PF_IRDA,
+ .create = irda_create,
+ .owner = THIS_MODULE,
+};
+
+static const struct proto_ops irda_stream_ops = {
+ .family = PF_IRDA,
+ .owner = THIS_MODULE,
+ .release = irda_release,
+ .bind = irda_bind,
+ .connect = irda_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = irda_accept,
+ .getname = irda_getname,
+ .poll = irda_poll,
+ .ioctl = irda_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = irda_compat_ioctl,
+#endif
+ .listen = irda_listen,
+ .shutdown = irda_shutdown,
+ .setsockopt = irda_setsockopt,
+ .getsockopt = irda_getsockopt,
+ .sendmsg = irda_sendmsg,
+ .recvmsg = irda_recvmsg_stream,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static const struct proto_ops irda_seqpacket_ops = {
+ .family = PF_IRDA,
+ .owner = THIS_MODULE,
+ .release = irda_release,
+ .bind = irda_bind,
+ .connect = irda_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = irda_accept,
+ .getname = irda_getname,
+ .poll = datagram_poll,
+ .ioctl = irda_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = irda_compat_ioctl,
+#endif
+ .listen = irda_listen,
+ .shutdown = irda_shutdown,
+ .setsockopt = irda_setsockopt,
+ .getsockopt = irda_getsockopt,
+ .sendmsg = irda_sendmsg,
+ .recvmsg = irda_recvmsg_dgram,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static const struct proto_ops irda_dgram_ops = {
+ .family = PF_IRDA,
+ .owner = THIS_MODULE,
+ .release = irda_release,
+ .bind = irda_bind,
+ .connect = irda_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = irda_accept,
+ .getname = irda_getname,
+ .poll = datagram_poll,
+ .ioctl = irda_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = irda_compat_ioctl,
+#endif
+ .listen = irda_listen,
+ .shutdown = irda_shutdown,
+ .setsockopt = irda_setsockopt,
+ .getsockopt = irda_getsockopt,
+ .sendmsg = irda_sendmsg_dgram,
+ .recvmsg = irda_recvmsg_dgram,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+#ifdef CONFIG_IRDA_ULTRA
+static const struct proto_ops irda_ultra_ops = {
+ .family = PF_IRDA,
+ .owner = THIS_MODULE,
+ .release = irda_release,
+ .bind = irda_bind,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = irda_getname,
+ .poll = datagram_poll,
+ .ioctl = irda_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = irda_compat_ioctl,
+#endif
+ .listen = sock_no_listen,
+ .shutdown = irda_shutdown,
+ .setsockopt = irda_setsockopt,
+ .getsockopt = irda_getsockopt,
+ .sendmsg = irda_sendmsg_ultra,
+ .recvmsg = irda_recvmsg_dgram,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irsock_init (pro)
+ *
+ * Initialize IrDA protocol
+ *
+ */
+int __init irsock_init(void)
+{
+ int rc = proto_register(&irda_proto, 0);
+
+ if (rc == 0)
+ rc = sock_register(&irda_family_ops);
+
+ return rc;
+}
+
+/*
+ * Function irsock_cleanup (void)
+ *
+ * Remove IrDA protocol
+ *
+ */
+void irsock_cleanup(void)
+{
+ sock_unregister(PF_IRDA);
+ proto_unregister(&irda_proto);
+}
diff --git a/drivers/staging/irda/net/discovery.c b/drivers/staging/irda/net/discovery.c
new file mode 100644
index 000000000000..364d70aed068
--- /dev/null
+++ b/drivers/staging/irda/net/discovery.c
@@ -0,0 +1,417 @@
+/*********************************************************************
+ *
+ * Filename: discovery.c
+ * Version: 0.1
+ * Description: Routines for handling discoveries at the IrLMP layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Apr 6 15:33:50 1999
+ * Modified at: Sat Oct 9 17:11:31 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Fri May 28 3:11 CST 1999
+ * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+
+#include <net/irda/discovery.h>
+
+#include <asm/unaligned.h>
+
+/*
+ * Function irlmp_add_discovery (cachelog, discovery)
+ *
+ * Add a new discovery to the cachelog, and remove any old discoveries
+ * from the same device
+ *
+ * Note : we try to preserve the time this device was *first* discovered
+ * (as opposed to the time of last discovery used for cleanup). This is
+ * used by clients waiting for discovery events to tell if the device
+ * discovered is "new" or just the same old one. They can't rely there
+ * on a binary flag (new/old), because not all discovery events are
+ * propagated to them, and they might not always listen, so they would
+ * miss some new devices popping up...
+ * Jean II
+ */
+void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
+{
+ discovery_t *discovery, *node;
+ unsigned long flags;
+
+ /* Set time of first discovery if node is new (see below) */
+ new->firststamp = new->timestamp;
+
+ spin_lock_irqsave(&cachelog->hb_spinlock, flags);
+
+ /*
+ * Remove all discoveries of devices that has previously been
+ * discovered on the same link with the same name (info), or the
+ * same daddr. We do this since some devices (mostly PDAs) change
+ * their device address between every discovery.
+ */
+ discovery = (discovery_t *) hashbin_get_first(cachelog);
+ while (discovery != NULL ) {
+ node = discovery;
+
+ /* Be sure to stay one item ahead */
+ discovery = (discovery_t *) hashbin_get_next(cachelog);
+
+ if ((node->data.saddr == new->data.saddr) &&
+ ((node->data.daddr == new->data.daddr) ||
+ (strcmp(node->data.info, new->data.info) == 0)))
+ {
+ /* This discovery is a previous discovery
+ * from the same device, so just remove it
+ */
+ hashbin_remove_this(cachelog, (irda_queue_t *) node);
+ /* Check if hints bits are unchanged */
+ if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints))
+ /* Set time of first discovery for this node */
+ new->firststamp = node->firststamp;
+ kfree(node);
+ }
+ }
+
+ /* Insert the new and updated version */
+ hashbin_insert(cachelog, (irda_queue_t *) new, new->data.daddr, NULL);
+
+ spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
+}
+
+/*
+ * Function irlmp_add_discovery_log (cachelog, log)
+ *
+ * Merge a disovery log into the cachelog.
+ *
+ */
+void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
+{
+ discovery_t *discovery;
+
+ /*
+ * If log is missing this means that IrLAP was unable to perform the
+ * discovery, so restart discovery again with just the half timeout
+ * of the normal one.
+ */
+ /* Well... It means that there was nobody out there - Jean II */
+ if (log == NULL) {
+ /* irlmp_start_discovery_timer(irlmp, 150); */
+ return;
+ }
+
+ /*
+ * Locking : we are the only owner of this discovery log, so
+ * no need to lock it.
+ * We just need to lock the global log in irlmp_add_discovery().
+ */
+ discovery = (discovery_t *) hashbin_remove_first(log);
+ while (discovery != NULL) {
+ irlmp_add_discovery(cachelog, discovery);
+
+ discovery = (discovery_t *) hashbin_remove_first(log);
+ }
+
+ /* Delete the now empty log */
+ hashbin_delete(log, (FREE_FUNC) kfree);
+}
+
+/*
+ * Function irlmp_expire_discoveries (log, saddr, force)
+ *
+ * Go through all discoveries and expire all that has stayed too long
+ *
+ * Note : this assume that IrLAP won't change its saddr, which
+ * currently is a valid assumption...
+ */
+void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
+{
+ discovery_t * discovery;
+ discovery_t * curr;
+ unsigned long flags;
+ discinfo_t * buffer = NULL;
+ int n; /* Size of the full log */
+ int i = 0; /* How many we expired */
+
+ IRDA_ASSERT(log != NULL, return;);
+ spin_lock_irqsave(&log->hb_spinlock, flags);
+
+ discovery = (discovery_t *) hashbin_get_first(log);
+ while (discovery != NULL) {
+ /* Be sure to be one item ahead */
+ curr = discovery;
+ discovery = (discovery_t *) hashbin_get_next(log);
+
+ /* Test if it's time to expire this discovery */
+ if ((curr->data.saddr == saddr) &&
+ (force ||
+ ((jiffies - curr->timestamp) > DISCOVERY_EXPIRE_TIMEOUT)))
+ {
+ /* Create buffer as needed.
+ * As this function get called a lot and most time
+ * we don't have anything to put in the log (we are
+ * quite picky), we can save a lot of overhead
+ * by not calling kmalloc. Jean II */
+ if(buffer == NULL) {
+ /* Create the client specific buffer */
+ n = HASHBIN_GET_SIZE(log);
+ buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
+ if (buffer == NULL) {
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
+ return;
+ }
+
+ }
+
+ /* Copy discovery information */
+ memcpy(&(buffer[i]), &(curr->data),
+ sizeof(discinfo_t));
+ i++;
+
+ /* Remove it from the log */
+ curr = hashbin_remove_this(log, (irda_queue_t *) curr);
+ kfree(curr);
+ }
+ }
+
+ /* Drop the spinlock before calling the higher layers, as
+ * we can't guarantee they won't call us back and create a
+ * deadlock. We will work on our own private data, so we
+ * don't care to be interrupted. - Jean II */
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
+
+ if(buffer == NULL)
+ return;
+
+ /* Tell IrLMP and registered clients about it */
+ irlmp_discovery_expiry(buffer, i);
+
+ /* Free up our buffer */
+ kfree(buffer);
+}
+
+#if 0
+/*
+ * Function irlmp_dump_discoveries (log)
+ *
+ * Print out all discoveries in log
+ *
+ */
+void irlmp_dump_discoveries(hashbin_t *log)
+{
+ discovery_t *discovery;
+
+ IRDA_ASSERT(log != NULL, return;);
+
+ discovery = (discovery_t *) hashbin_get_first(log);
+ while (discovery != NULL) {
+ pr_debug("Discovery:\n");
+ pr_debug(" daddr=%08x\n", discovery->data.daddr);
+ pr_debug(" saddr=%08x\n", discovery->data.saddr);
+ pr_debug(" nickname=%s\n", discovery->data.info);
+
+ discovery = (discovery_t *) hashbin_get_next(log);
+ }
+}
+#endif
+
+/*
+ * Function irlmp_copy_discoveries (log, pn, mask)
+ *
+ * Copy all discoveries in a buffer
+ *
+ * This function implement a safe way for lmp clients to access the
+ * discovery log. The basic problem is that we don't want the log
+ * to change (add/remove) while the client is reading it. If the
+ * lmp client manipulate directly the hashbin, he is sure to get
+ * into troubles...
+ * The idea is that we copy all the current discovery log in a buffer
+ * which is specific to the client and pass this copy to him. As we
+ * do this operation with the spinlock grabbed, we are safe...
+ * Note : we don't want those clients to grab the spinlock, because
+ * we have no control on how long they will hold it...
+ * Note : we choose to copy the log in "struct irda_device_info" to
+ * save space...
+ * Note : the client must kfree himself() the log...
+ * Jean II
+ */
+struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
+ __u16 mask, int old_entries)
+{
+ discovery_t * discovery;
+ unsigned long flags;
+ discinfo_t * buffer = NULL;
+ int j_timeout = (sysctl_discovery_timeout * HZ);
+ int n; /* Size of the full log */
+ int i = 0; /* How many we picked */
+
+ IRDA_ASSERT(pn != NULL, return NULL;);
+ IRDA_ASSERT(log != NULL, return NULL;);
+
+ /* Save spin lock */
+ spin_lock_irqsave(&log->hb_spinlock, flags);
+
+ discovery = (discovery_t *) hashbin_get_first(log);
+ while (discovery != NULL) {
+ /* Mask out the ones we don't want :
+ * We want to match the discovery mask, and to get only
+ * the most recent one (unless we want old ones) */
+ if ((get_unaligned((__u16 *)discovery->data.hints) & mask) &&
+ ((old_entries) ||
+ ((jiffies - discovery->firststamp) < j_timeout))) {
+ /* Create buffer as needed.
+ * As this function get called a lot and most time
+ * we don't have anything to put in the log (we are
+ * quite picky), we can save a lot of overhead
+ * by not calling kmalloc. Jean II */
+ if(buffer == NULL) {
+ /* Create the client specific buffer */
+ n = HASHBIN_GET_SIZE(log);
+ buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
+ if (buffer == NULL) {
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
+ return NULL;
+ }
+
+ }
+
+ /* Copy discovery information */
+ memcpy(&(buffer[i]), &(discovery->data),
+ sizeof(discinfo_t));
+ i++;
+ }
+ discovery = (discovery_t *) hashbin_get_next(log);
+ }
+
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
+
+ /* Get the actual number of device in the buffer and return */
+ *pn = i;
+ return buffer;
+}
+
+#ifdef CONFIG_PROC_FS
+static inline discovery_t *discovery_seq_idx(loff_t pos)
+
+{
+ discovery_t *discovery;
+
+ for (discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog);
+ discovery != NULL;
+ discovery = (discovery_t *) hashbin_get_next(irlmp->cachelog)) {
+ if (pos-- == 0)
+ break;
+ }
+
+ return discovery;
+}
+
+static void *discovery_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ spin_lock_irq(&irlmp->cachelog->hb_spinlock);
+ return *pos ? discovery_seq_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *discovery_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return (v == SEQ_START_TOKEN)
+ ? (void *) hashbin_get_first(irlmp->cachelog)
+ : (void *) hashbin_get_next(irlmp->cachelog);
+}
+
+static void discovery_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_irq(&irlmp->cachelog->hb_spinlock);
+}
+
+static int discovery_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "IrLMP: Discovery log:\n\n");
+ else {
+ const discovery_t *discovery = v;
+
+ seq_printf(seq, "nickname: %s, hint: 0x%02x%02x",
+ discovery->data.info,
+ discovery->data.hints[0],
+ discovery->data.hints[1]);
+#if 0
+ if ( discovery->data.hints[0] & HINT_PNP)
+ seq_puts(seq, "PnP Compatible ");
+ if ( discovery->data.hints[0] & HINT_PDA)
+ seq_puts(seq, "PDA/Palmtop ");
+ if ( discovery->data.hints[0] & HINT_COMPUTER)
+ seq_puts(seq, "Computer ");
+ if ( discovery->data.hints[0] & HINT_PRINTER)
+ seq_puts(seq, "Printer ");
+ if ( discovery->data.hints[0] & HINT_MODEM)
+ seq_puts(seq, "Modem ");
+ if ( discovery->data.hints[0] & HINT_FAX)
+ seq_puts(seq, "Fax ");
+ if ( discovery->data.hints[0] & HINT_LAN)
+ seq_puts(seq, "LAN Access ");
+
+ if ( discovery->data.hints[1] & HINT_TELEPHONY)
+ seq_puts(seq, "Telephony ");
+ if ( discovery->data.hints[1] & HINT_FILE_SERVER)
+ seq_puts(seq, "File Server ");
+ if ( discovery->data.hints[1] & HINT_COMM)
+ seq_puts(seq, "IrCOMM ");
+ if ( discovery->data.hints[1] & HINT_OBEX)
+ seq_puts(seq, "IrOBEX ");
+#endif
+ seq_printf(seq,", saddr: 0x%08x, daddr: 0x%08x\n\n",
+ discovery->data.saddr,
+ discovery->data.daddr);
+
+ seq_putc(seq, '\n');
+ }
+ return 0;
+}
+
+static const struct seq_operations discovery_seq_ops = {
+ .start = discovery_seq_start,
+ .next = discovery_seq_next,
+ .stop = discovery_seq_stop,
+ .show = discovery_seq_show,
+};
+
+static int discovery_seq_open(struct inode *inode, struct file *file)
+{
+ IRDA_ASSERT(irlmp != NULL, return -EINVAL;);
+
+ return seq_open(file, &discovery_seq_ops);
+}
+
+const struct file_operations discovery_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = discovery_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
diff --git a/drivers/staging/irda/net/ircomm/Kconfig b/drivers/staging/irda/net/ircomm/Kconfig
new file mode 100644
index 000000000000..19492c1707b7
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/Kconfig
@@ -0,0 +1,12 @@
+config IRCOMM
+ tristate "IrCOMM protocol"
+ depends on IRDA && TTY
+ help
+ Say Y here if you want to build support for the IrCOMM protocol.
+ To compile it as modules, choose M here: the modules will be
+ called ircomm and ircomm_tty.
+ IrCOMM implements serial port emulation, and makes it possible to
+ use all existing applications that understands TTY's with an
+ infrared link. Thus you should be able to use application like PPP,
+ minicom and others.
+
diff --git a/drivers/staging/irda/net/ircomm/Makefile b/drivers/staging/irda/net/ircomm/Makefile
new file mode 100644
index 000000000000..ab23b5ba7e33
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux IrDA IrCOMM protocol layer.
+#
+
+obj-$(CONFIG_IRCOMM) += ircomm.o ircomm-tty.o
+
+ircomm-y := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o
+ircomm-tty-y := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o
diff --git a/drivers/staging/irda/net/ircomm/ircomm_core.c b/drivers/staging/irda/net/ircomm/ircomm_core.c
new file mode 100644
index 000000000000..3af219545f6d
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_core.c
@@ -0,0 +1,563 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_core.c
+ * Version: 1.0
+ * Description: IrCOMM service interface
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 20:37:34 1999
+ * Modified at: Tue Dec 21 13:26:41 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irias_object.h>
+
+#include <net/irda/ircomm_event.h>
+#include <net/irda/ircomm_lmp.h>
+#include <net/irda/ircomm_ttp.h>
+#include <net/irda/ircomm_param.h>
+#include <net/irda/ircomm_core.h>
+
+static int __ircomm_close(struct ircomm_cb *self);
+static void ircomm_control_indication(struct ircomm_cb *self,
+ struct sk_buff *skb, int clen);
+
+#ifdef CONFIG_PROC_FS
+extern struct proc_dir_entry *proc_irda;
+static int ircomm_seq_open(struct inode *, struct file *);
+
+static const struct file_operations ircomm_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = ircomm_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+hashbin_t *ircomm = NULL;
+
+static int __init ircomm_init(void)
+{
+ ircomm = hashbin_new(HB_LOCK);
+ if (ircomm == NULL) {
+ net_err_ratelimited("%s(), can't allocate hashbin!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_PROC_FS
+ { struct proc_dir_entry *ent;
+ ent = proc_create("ircomm", 0, proc_irda, &ircomm_proc_fops);
+ if (!ent) {
+ printk(KERN_ERR "ircomm_init: can't create /proc entry!\n");
+ return -ENODEV;
+ }
+ }
+#endif /* CONFIG_PROC_FS */
+
+ net_info_ratelimited("IrCOMM protocol (Dag Brattli)\n");
+
+ return 0;
+}
+
+static void __exit ircomm_cleanup(void)
+{
+ hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("ircomm", proc_irda);
+#endif /* CONFIG_PROC_FS */
+}
+
+/*
+ * Function ircomm_open (client_notify)
+ *
+ * Start a new IrCOMM instance
+ *
+ */
+struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
+{
+ struct ircomm_cb *self = NULL;
+ int ret;
+
+ pr_debug("%s(), service_type=0x%02x\n", __func__ ,
+ service_type);
+
+ IRDA_ASSERT(ircomm != NULL, return NULL;);
+
+ self = kzalloc(sizeof(struct ircomm_cb), GFP_KERNEL);
+ if (self == NULL)
+ return NULL;
+
+ self->notify = *notify;
+ self->magic = IRCOMM_MAGIC;
+
+ /* Check if we should use IrLMP or IrTTP */
+ if (service_type & IRCOMM_3_WIRE_RAW) {
+ self->flow_status = FLOW_START;
+ ret = ircomm_open_lsap(self);
+ } else
+ ret = ircomm_open_tsap(self);
+
+ if (ret < 0) {
+ kfree(self);
+ return NULL;
+ }
+
+ self->service_type = service_type;
+ self->line = line;
+
+ hashbin_insert(ircomm, (irda_queue_t *) self, line, NULL);
+
+ ircomm_next_state(self, IRCOMM_IDLE);
+
+ return self;
+}
+
+EXPORT_SYMBOL(ircomm_open);
+
+/*
+ * Function ircomm_close_instance (self)
+ *
+ * Remove IrCOMM instance
+ *
+ */
+static int __ircomm_close(struct ircomm_cb *self)
+{
+ /* Disconnect link if any */
+ ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL);
+
+ /* Remove TSAP */
+ if (self->tsap) {
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+
+ /* Remove LSAP */
+ if (self->lsap) {
+ irlmp_close_lsap(self->lsap);
+ self->lsap = NULL;
+ }
+ self->magic = 0;
+
+ kfree(self);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_close (self)
+ *
+ * Closes and removes the specified IrCOMM instance
+ *
+ */
+int ircomm_close(struct ircomm_cb *self)
+{
+ struct ircomm_cb *entry;
+
+ IRDA_ASSERT(self != NULL, return -EIO;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;);
+
+ entry = hashbin_remove(ircomm, self->line, NULL);
+
+ IRDA_ASSERT(entry == self, return -1;);
+
+ return __ircomm_close(self);
+}
+
+EXPORT_SYMBOL(ircomm_close);
+
+/*
+ * Function ircomm_connect_request (self, service_type)
+ *
+ * Impl. of this function is differ from one of the reference. This
+ * function does discovery as well as sending connect request
+ *
+ */
+int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr, struct sk_buff *skb,
+ __u8 service_type)
+{
+ struct ircomm_info info;
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
+
+ self->service_type= service_type;
+
+ info.dlsap_sel = dlsap_sel;
+ info.saddr = saddr;
+ info.daddr = daddr;
+
+ ret = ircomm_do_event(self, IRCOMM_CONNECT_REQUEST, skb, &info);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(ircomm_connect_request);
+
+/*
+ * Function ircomm_connect_indication (self, qos, skb)
+ *
+ * Notify user layer about the incoming connection
+ *
+ */
+void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info)
+{
+ /*
+ * If there are any data hiding in the control channel, we must
+ * deliver it first. The side effect is that the control channel
+ * will be removed from the skb
+ */
+ if (self->notify.connect_indication)
+ self->notify.connect_indication(self->notify.instance, self,
+ info->qos, info->max_data_size,
+ info->max_header_size, skb);
+ else {
+ pr_debug("%s(), missing handler\n", __func__);
+ }
+}
+
+/*
+ * Function ircomm_connect_response (self, userdata, max_sdu_size)
+ *
+ * User accepts connection
+ *
+ */
+int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
+
+ ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(ircomm_connect_response);
+
+/*
+ * Function connect_confirm (self, skb)
+ *
+ * Notify user layer that the link is now connected
+ *
+ */
+void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info)
+{
+ if (self->notify.connect_confirm )
+ self->notify.connect_confirm(self->notify.instance,
+ self, info->qos,
+ info->max_data_size,
+ info->max_header_size, skb);
+ else {
+ pr_debug("%s(), missing handler\n", __func__);
+ }
+}
+
+/*
+ * Function ircomm_data_request (self, userdata)
+ *
+ * Send IrCOMM data to peer device
+ *
+ */
+int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -EFAULT;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
+ IRDA_ASSERT(skb != NULL, return -EFAULT;);
+
+ ret = ircomm_do_event(self, IRCOMM_DATA_REQUEST, skb, NULL);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(ircomm_data_request);
+
+/*
+ * Function ircomm_data_indication (self, skb)
+ *
+ * Data arrived, so deliver it to user
+ *
+ */
+void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(skb->len > 0, return;);
+
+ if (self->notify.data_indication)
+ self->notify.data_indication(self->notify.instance, self, skb);
+ else {
+ pr_debug("%s(), missing handler\n", __func__);
+ }
+}
+
+/*
+ * Function ircomm_process_data (self, skb)
+ *
+ * Data arrived which may contain control channel data
+ *
+ */
+void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
+{
+ int clen;
+
+ IRDA_ASSERT(skb->len > 0, return;);
+
+ clen = skb->data[0];
+
+ /*
+ * Input validation check: a stir4200/mcp2150 combinations sometimes
+ * results in frames with clen > remaining packet size. These are
+ * illegal; if we throw away just this frame then it seems to carry on
+ * fine
+ */
+ if (unlikely(skb->len < (clen + 1))) {
+ pr_debug("%s() throwing away illegal frame\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * If there are any data hiding in the control channel, we must
+ * deliver it first. The side effect is that the control channel
+ * will be removed from the skb
+ */
+ if (clen > 0)
+ ircomm_control_indication(self, skb, clen);
+
+ /* Remove control channel from data channel */
+ skb_pull(skb, clen+1);
+
+ if (skb->len)
+ ircomm_data_indication(self, skb);
+ else {
+ pr_debug("%s(), data was control info only!\n",
+ __func__);
+ }
+}
+
+/*
+ * Function ircomm_control_request (self, params)
+ *
+ * Send control data to peer device
+ *
+ */
+int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -EFAULT;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
+ IRDA_ASSERT(skb != NULL, return -EFAULT;);
+
+ ret = ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb, NULL);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(ircomm_control_request);
+
+/*
+ * Function ircomm_control_indication (self, skb)
+ *
+ * Data has arrived on the control channel
+ *
+ */
+static void ircomm_control_indication(struct ircomm_cb *self,
+ struct sk_buff *skb, int clen)
+{
+ /* Use udata for delivering data on the control channel */
+ if (self->notify.udata_indication) {
+ struct sk_buff *ctrl_skb;
+
+ /* We don't own the skb, so clone it */
+ ctrl_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ctrl_skb)
+ return;
+
+ /* Remove data channel from control channel */
+ skb_trim(ctrl_skb, clen+1);
+
+ self->notify.udata_indication(self->notify.instance, self,
+ ctrl_skb);
+
+ /* Drop reference count -
+ * see ircomm_tty_control_indication(). */
+ dev_kfree_skb(ctrl_skb);
+ } else {
+ pr_debug("%s(), missing handler\n", __func__);
+ }
+}
+
+/*
+ * Function ircomm_disconnect_request (self, userdata, priority)
+ *
+ * User layer wants to disconnect the IrCOMM connection
+ *
+ */
+int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata)
+{
+ struct ircomm_info info;
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
+
+ ret = ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, userdata,
+ &info);
+ return ret;
+}
+
+EXPORT_SYMBOL(ircomm_disconnect_request);
+
+/*
+ * Function disconnect_indication (self, skb)
+ *
+ * Tell user that the link has been disconnected
+ *
+ */
+void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
+ struct ircomm_info *info)
+{
+ IRDA_ASSERT(info != NULL, return;);
+
+ if (self->notify.disconnect_indication) {
+ self->notify.disconnect_indication(self->notify.instance, self,
+ info->reason, skb);
+ } else {
+ pr_debug("%s(), missing handler\n", __func__);
+ }
+}
+
+/*
+ * Function ircomm_flow_request (self, flow)
+ *
+ *
+ *
+ */
+void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+
+ if (self->service_type == IRCOMM_3_WIRE_RAW)
+ return;
+
+ irttp_flow_request(self->tsap, flow);
+}
+
+EXPORT_SYMBOL(ircomm_flow_request);
+
+#ifdef CONFIG_PROC_FS
+static void *ircomm_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct ircomm_cb *self;
+ loff_t off = 0;
+
+ spin_lock_irq(&ircomm->hb_spinlock);
+
+ for (self = (struct ircomm_cb *) hashbin_get_first(ircomm);
+ self != NULL;
+ self = (struct ircomm_cb *) hashbin_get_next(ircomm)) {
+ if (off++ == *pos)
+ break;
+
+ }
+ return self;
+}
+
+static void *ircomm_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+
+ return (void *) hashbin_get_next(ircomm);
+}
+
+static void ircomm_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_irq(&ircomm->hb_spinlock);
+}
+
+static int ircomm_seq_show(struct seq_file *seq, void *v)
+{
+ const struct ircomm_cb *self = v;
+
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EINVAL; );
+
+ if(self->line < 0x10)
+ seq_printf(seq, "ircomm%d", self->line);
+ else
+ seq_printf(seq, "irlpt%d", self->line - 0x10);
+
+ seq_printf(seq,
+ " state: %s, slsap_sel: %#02x, dlsap_sel: %#02x, mode:",
+ ircomm_state[ self->state],
+ self->slsap_sel, self->dlsap_sel);
+
+ if(self->service_type & IRCOMM_3_WIRE_RAW)
+ seq_printf(seq, " 3-wire-raw");
+ if(self->service_type & IRCOMM_3_WIRE)
+ seq_printf(seq, " 3-wire");
+ if(self->service_type & IRCOMM_9_WIRE)
+ seq_printf(seq, " 9-wire");
+ if(self->service_type & IRCOMM_CENTRONICS)
+ seq_printf(seq, " Centronics");
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations ircomm_seq_ops = {
+ .start = ircomm_seq_start,
+ .next = ircomm_seq_next,
+ .stop = ircomm_seq_stop,
+ .show = ircomm_seq_show,
+};
+
+static int ircomm_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ircomm_seq_ops);
+}
+#endif /* CONFIG_PROC_FS */
+
+MODULE_AUTHOR("Dag Brattli <dag@brattli.net>");
+MODULE_DESCRIPTION("IrCOMM protocol");
+MODULE_LICENSE("GPL");
+
+module_init(ircomm_init);
+module_exit(ircomm_cleanup);
diff --git a/drivers/staging/irda/net/ircomm/ircomm_event.c b/drivers/staging/irda/net/ircomm/ircomm_event.c
new file mode 100644
index 000000000000..b0730ac9f388
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_event.c
@@ -0,0 +1,246 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_event.c
+ * Version: 1.0
+ * Description: IrCOMM layer state machine
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 20:33:11 1999
+ * Modified at: Sun Dec 12 13:44:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irias_object.h>
+
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_event.h>
+
+static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info);
+static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info);
+static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info);
+static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info);
+
+const char *const ircomm_state[] = {
+ "IRCOMM_IDLE",
+ "IRCOMM_WAITI",
+ "IRCOMM_WAITR",
+ "IRCOMM_CONN",
+};
+
+static const char *const ircomm_event[] __maybe_unused = {
+ "IRCOMM_CONNECT_REQUEST",
+ "IRCOMM_CONNECT_RESPONSE",
+ "IRCOMM_TTP_CONNECT_INDICATION",
+ "IRCOMM_LMP_CONNECT_INDICATION",
+ "IRCOMM_TTP_CONNECT_CONFIRM",
+ "IRCOMM_LMP_CONNECT_CONFIRM",
+
+ "IRCOMM_LMP_DISCONNECT_INDICATION",
+ "IRCOMM_TTP_DISCONNECT_INDICATION",
+ "IRCOMM_DISCONNECT_REQUEST",
+
+ "IRCOMM_TTP_DATA_INDICATION",
+ "IRCOMM_LMP_DATA_INDICATION",
+ "IRCOMM_DATA_REQUEST",
+ "IRCOMM_CONTROL_REQUEST",
+ "IRCOMM_CONTROL_INDICATION",
+};
+
+static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info) =
+{
+ ircomm_state_idle,
+ ircomm_state_waiti,
+ ircomm_state_waitr,
+ ircomm_state_conn,
+};
+
+/*
+ * Function ircomm_state_idle (self, event, skb)
+ *
+ * IrCOMM is currently idle
+ *
+ */
+static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info)
+{
+ int ret = 0;
+
+ switch (event) {
+ case IRCOMM_CONNECT_REQUEST:
+ ircomm_next_state(self, IRCOMM_WAITI);
+ ret = self->issue.connect_request(self, skb, info);
+ break;
+ case IRCOMM_TTP_CONNECT_INDICATION:
+ case IRCOMM_LMP_CONNECT_INDICATION:
+ ircomm_next_state(self, IRCOMM_WAITR);
+ ircomm_connect_indication(self, skb, info);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_state_waiti (self, event, skb)
+ *
+ * The IrCOMM user has requested an IrCOMM connection to the remote
+ * device and is awaiting confirmation
+ */
+static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info)
+{
+ int ret = 0;
+
+ switch (event) {
+ case IRCOMM_TTP_CONNECT_CONFIRM:
+ case IRCOMM_LMP_CONNECT_CONFIRM:
+ ircomm_next_state(self, IRCOMM_CONN);
+ ircomm_connect_confirm(self, skb, info);
+ break;
+ case IRCOMM_TTP_DISCONNECT_INDICATION:
+ case IRCOMM_LMP_DISCONNECT_INDICATION:
+ ircomm_next_state(self, IRCOMM_IDLE);
+ ircomm_disconnect_indication(self, skb, info);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_state_waitr (self, event, skb)
+ *
+ * IrCOMM has received an incoming connection request and is awaiting
+ * response from the user
+ */
+static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info)
+{
+ int ret = 0;
+
+ switch (event) {
+ case IRCOMM_CONNECT_RESPONSE:
+ ircomm_next_state(self, IRCOMM_CONN);
+ ret = self->issue.connect_response(self, skb);
+ break;
+ case IRCOMM_DISCONNECT_REQUEST:
+ ircomm_next_state(self, IRCOMM_IDLE);
+ ret = self->issue.disconnect_request(self, skb, info);
+ break;
+ case IRCOMM_TTP_DISCONNECT_INDICATION:
+ case IRCOMM_LMP_DISCONNECT_INDICATION:
+ ircomm_next_state(self, IRCOMM_IDLE);
+ ircomm_disconnect_indication(self, skb, info);
+ break;
+ default:
+ pr_debug("%s(), unknown event = %s\n", __func__ ,
+ ircomm_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_state_conn (self, event, skb)
+ *
+ * IrCOMM is connected to the peer IrCOMM device
+ *
+ */
+static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info)
+{
+ int ret = 0;
+
+ switch (event) {
+ case IRCOMM_DATA_REQUEST:
+ ret = self->issue.data_request(self, skb, 0);
+ break;
+ case IRCOMM_TTP_DATA_INDICATION:
+ ircomm_process_data(self, skb);
+ break;
+ case IRCOMM_LMP_DATA_INDICATION:
+ ircomm_data_indication(self, skb);
+ break;
+ case IRCOMM_CONTROL_REQUEST:
+ /* Just send a separate frame for now */
+ ret = self->issue.data_request(self, skb, skb->len);
+ break;
+ case IRCOMM_TTP_DISCONNECT_INDICATION:
+ case IRCOMM_LMP_DISCONNECT_INDICATION:
+ ircomm_next_state(self, IRCOMM_IDLE);
+ ircomm_disconnect_indication(self, skb, info);
+ break;
+ case IRCOMM_DISCONNECT_REQUEST:
+ ircomm_next_state(self, IRCOMM_IDLE);
+ ret = self->issue.disconnect_request(self, skb, info);
+ break;
+ default:
+ pr_debug("%s(), unknown event = %s\n", __func__ ,
+ ircomm_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_do_event (self, event, skb)
+ *
+ * Process event
+ *
+ */
+int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb, struct ircomm_info *info)
+{
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_state[self->state], ircomm_event[event]);
+
+ return (*state[self->state])(self, event, skb, info);
+}
+
+/*
+ * Function ircomm_next_state (self, state)
+ *
+ * Switch state
+ *
+ */
+void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state)
+{
+ self->state = state;
+
+ pr_debug("%s: next state=%s, service type=%d\n", __func__ ,
+ ircomm_state[self->state], self->service_type);
+}
diff --git a/drivers/staging/irda/net/ircomm/ircomm_lmp.c b/drivers/staging/irda/net/ircomm/ircomm_lmp.c
new file mode 100644
index 000000000000..e4cc847bb933
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_lmp.c
@@ -0,0 +1,350 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_lmp.c
+ * Version: 1.0
+ * Description: Interface between IrCOMM and IrLMP
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 20:48:27 1999
+ * Modified at: Sun Dec 12 13:44:17 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: Previous IrLPT work by Thomas Davis
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/init.h>
+#include <linux/gfp.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irda_device.h> /* struct irda_skb_cb */
+
+#include <net/irda/ircomm_event.h>
+#include <net/irda/ircomm_lmp.h>
+
+
+/*
+ * Function ircomm_lmp_connect_request (self, userdata)
+ *
+ *
+ *
+ */
+static int ircomm_lmp_connect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ struct ircomm_info *info)
+{
+ int ret = 0;
+
+ /* Don't forget to refcount it - should be NULL anyway */
+ if(userdata)
+ skb_get(userdata);
+
+ ret = irlmp_connect_request(self->lsap, info->dlsap_sel,
+ info->saddr, info->daddr, NULL, userdata);
+ return ret;
+}
+
+/*
+ * Function ircomm_lmp_connect_response (self, skb)
+ *
+ *
+ *
+ */
+static int ircomm_lmp_connect_response(struct ircomm_cb *self,
+ struct sk_buff *userdata)
+{
+ struct sk_buff *tx_skb;
+
+ /* Any userdata supplied? */
+ if (userdata == NULL) {
+ tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+ } else {
+ /*
+ * Check that the client has reserved enough space for
+ * headers
+ */
+ IRDA_ASSERT(skb_headroom(userdata) >= LMP_MAX_HEADER,
+ return -1;);
+
+ /* Don't forget to refcount it - should be NULL anyway */
+ skb_get(userdata);
+ tx_skb = userdata;
+ }
+
+ return irlmp_connect_response(self->lsap, tx_skb);
+}
+
+static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ struct ircomm_info *info)
+{
+ struct sk_buff *tx_skb;
+ int ret;
+
+ if (!userdata) {
+ tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+ userdata = tx_skb;
+ } else {
+ /* Don't forget to refcount it - should be NULL anyway */
+ skb_get(userdata);
+ }
+
+ ret = irlmp_disconnect_request(self->lsap, userdata);
+
+ return ret;
+}
+
+/*
+ * Function ircomm_lmp_flow_control (skb)
+ *
+ * This function is called when a data frame we have sent to IrLAP has
+ * been deallocated. We do this to make sure we don't flood IrLAP with
+ * frames, since we are not using the IrTTP flow control mechanism
+ */
+static void ircomm_lmp_flow_control(struct sk_buff *skb)
+{
+ struct irda_skb_cb *cb;
+ struct ircomm_cb *self;
+ int line;
+
+ IRDA_ASSERT(skb != NULL, return;);
+
+ cb = (struct irda_skb_cb *) skb->cb;
+
+ line = cb->line;
+
+ self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL);
+ if (!self) {
+ pr_debug("%s(), didn't find myself\n", __func__);
+ return;
+ }
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+
+ self->pkt_count--;
+
+ if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) {
+ pr_debug("%s(), asking TTY to start again!\n", __func__);
+ self->flow_status = FLOW_START;
+ if (self->notify.flow_indication)
+ self->notify.flow_indication(self->notify.instance,
+ self, FLOW_START);
+ }
+}
+
+/*
+ * Function ircomm_lmp_data_request (self, userdata)
+ *
+ * Send data frame to peer device
+ *
+ */
+static int ircomm_lmp_data_request(struct ircomm_cb *self,
+ struct sk_buff *skb,
+ int not_used)
+{
+ struct irda_skb_cb *cb;
+ int ret;
+
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ cb = (struct irda_skb_cb *) skb->cb;
+
+ cb->line = self->line;
+
+ pr_debug("%s(), sending frame\n", __func__);
+
+ /* Don't forget to refcount it - see ircomm_tty_do_softint() */
+ skb_get(skb);
+
+ skb_orphan(skb);
+ skb->destructor = ircomm_lmp_flow_control;
+
+ if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
+ pr_debug("%s(), asking TTY to slow down!\n", __func__);
+ self->flow_status = FLOW_STOP;
+ if (self->notify.flow_indication)
+ self->notify.flow_indication(self->notify.instance,
+ self, FLOW_STOP);
+ }
+ ret = irlmp_data_request(self->lsap, skb);
+ if (ret) {
+ net_err_ratelimited("%s(), failed\n", __func__);
+ /* irlmp_data_request already free the packet */
+ }
+
+ return ret;
+}
+
+/*
+ * Function ircomm_lmp_data_indication (instance, sap, skb)
+ *
+ * Incoming data which we must deliver to the state machine, to check
+ * we are still connected.
+ */
+static int ircomm_lmp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ ircomm_do_event(self, IRCOMM_LMP_DATA_INDICATION, skb, NULL);
+
+ /* Drop reference count - see ircomm_tty_data_indication(). */
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_lmp_connect_confirm (instance, sap, qos, max_sdu_size,
+ * max_header_size, skb)
+ *
+ * Connection has been confirmed by peer device
+ *
+ */
+static void ircomm_lmp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+ struct ircomm_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(qos != NULL, return;);
+
+ info.max_data_size = max_seg_size;
+ info.max_header_size = max_header_size;
+ info.qos = qos;
+
+ ircomm_do_event(self, IRCOMM_LMP_CONNECT_CONFIRM, skb, &info);
+
+ /* Drop reference count - see ircomm_tty_connect_confirm(). */
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function ircomm_lmp_connect_indication (instance, sap, qos, max_sdu_size,
+ * max_header_size, skb)
+ *
+ * Peer device wants to make a connection with us
+ *
+ */
+static void ircomm_lmp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *)instance;
+ struct ircomm_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(qos != NULL, return;);
+
+ info.max_data_size = max_seg_size;
+ info.max_header_size = max_header_size;
+ info.qos = qos;
+
+ ircomm_do_event(self, IRCOMM_LMP_CONNECT_INDICATION, skb, &info);
+
+ /* Drop reference count - see ircomm_tty_connect_indication(). */
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function ircomm_lmp_disconnect_indication (instance, sap, reason, skb)
+ *
+ * Peer device has closed the connection, or the link went down for some
+ * other reason
+ */
+static void ircomm_lmp_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+ struct ircomm_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+
+ info.reason = reason;
+
+ ircomm_do_event(self, IRCOMM_LMP_DISCONNECT_INDICATION, skb, &info);
+
+ /* Drop reference count - see ircomm_tty_disconnect_indication(). */
+ if(skb)
+ dev_kfree_skb(skb);
+}
+/*
+ * Function ircomm_open_lsap (self)
+ *
+ * Open LSAP. This function will only be used when using "raw" services
+ *
+ */
+int ircomm_open_lsap(struct ircomm_cb *self)
+{
+ notify_t notify;
+
+ /* Register callbacks */
+ irda_notify_init(&notify);
+ notify.data_indication = ircomm_lmp_data_indication;
+ notify.connect_confirm = ircomm_lmp_connect_confirm;
+ notify.connect_indication = ircomm_lmp_connect_indication;
+ notify.disconnect_indication = ircomm_lmp_disconnect_indication;
+ notify.instance = self;
+ strlcpy(notify.name, "IrCOMM", sizeof(notify.name));
+
+ self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0);
+ if (!self->lsap) {
+ pr_debug("%sfailed to allocate tsap\n", __func__);
+ return -1;
+ }
+ self->slsap_sel = self->lsap->slsap_sel;
+
+ /*
+ * Initialize the call-table for issuing commands
+ */
+ self->issue.data_request = ircomm_lmp_data_request;
+ self->issue.connect_request = ircomm_lmp_connect_request;
+ self->issue.connect_response = ircomm_lmp_connect_response;
+ self->issue.disconnect_request = ircomm_lmp_disconnect_request;
+
+ return 0;
+}
diff --git a/drivers/staging/irda/net/ircomm/ircomm_param.c b/drivers/staging/irda/net/ircomm/ircomm_param.c
new file mode 100644
index 000000000000..5728e76ca6d5
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_param.c
@@ -0,0 +1,501 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_param.c
+ * Version: 1.0
+ * Description: Parameter handling for the IrCOMM protocol
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Jun 7 10:25:11 1999
+ * Modified at: Sun Jan 30 14:32:03 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/gfp.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/parameters.h>
+
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_tty_attach.h>
+#include <net/irda/ircomm_tty.h>
+
+#include <net/irda/ircomm_param.h>
+
+static int ircomm_param_service_type(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_port_type(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_port_name(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_service_type(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_data_rate(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_data_format(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_flow_control(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get);
+static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get);
+static int ircomm_param_line_status(void *instance, irda_param_t *param,
+ int get);
+static int ircomm_param_dte(void *instance, irda_param_t *param, int get);
+static int ircomm_param_dce(void *instance, irda_param_t *param, int get);
+static int ircomm_param_poll(void *instance, irda_param_t *param, int get);
+
+static const pi_minor_info_t pi_minor_call_table_common[] = {
+ { ircomm_param_service_type, PV_INT_8_BITS },
+ { ircomm_param_port_type, PV_INT_8_BITS },
+ { ircomm_param_port_name, PV_STRING }
+};
+static const pi_minor_info_t pi_minor_call_table_non_raw[] = {
+ { ircomm_param_data_rate, PV_INT_32_BITS | PV_BIG_ENDIAN },
+ { ircomm_param_data_format, PV_INT_8_BITS },
+ { ircomm_param_flow_control, PV_INT_8_BITS },
+ { ircomm_param_xon_xoff, PV_INT_16_BITS },
+ { ircomm_param_enq_ack, PV_INT_16_BITS },
+ { ircomm_param_line_status, PV_INT_8_BITS }
+};
+static const pi_minor_info_t pi_minor_call_table_9_wire[] = {
+ { ircomm_param_dte, PV_INT_8_BITS },
+ { ircomm_param_dce, PV_INT_8_BITS },
+ { ircomm_param_poll, PV_NO_VALUE },
+};
+
+static const pi_major_info_t pi_major_call_table[] = {
+ { pi_minor_call_table_common, 3 },
+ { pi_minor_call_table_non_raw, 6 },
+ { pi_minor_call_table_9_wire, 3 }
+/* { pi_minor_call_table_centronics } */
+};
+
+pi_param_info_t ircomm_param_info = { pi_major_call_table, 3, 0x0f, 4 };
+
+/*
+ * Function ircomm_param_request (self, pi, flush)
+ *
+ * Queue a parameter for the control channel
+ *
+ */
+int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int count;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ /* Make sure we don't send parameters for raw mode */
+ if (self->service_type == IRCOMM_3_WIRE_RAW)
+ return 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ skb = self->ctrl_skb;
+ if (!skb) {
+ skb = alloc_skb(256, GFP_ATOMIC);
+ if (!skb) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, self->max_header_size);
+ self->ctrl_skb = skb;
+ }
+ /*
+ * Inserting is a little bit tricky since we don't know how much
+ * room we will need. But this should hopefully work OK
+ */
+ count = irda_param_insert(self, pi, skb_tail_pointer(skb),
+ skb_tailroom(skb), &ircomm_param_info);
+ if (count < 0) {
+ net_warn_ratelimited("%s(), no room for parameter!\n",
+ __func__);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -1;
+ }
+ skb_put(skb, count);
+ pr_debug("%s(), skb->len=%d\n", __func__, skb->len);
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ if (flush) {
+ /* ircomm_tty_do_softint will take care of the rest */
+ schedule_work(&self->tqueue);
+ }
+
+ return count;
+}
+
+/*
+ * Function ircomm_param_service_type (self, buf, len)
+ *
+ * Handle service type, this function will both be called after the LM-IAS
+ * query and then the remote device sends its initial parameters
+ *
+ */
+static int ircomm_param_service_type(void *instance, irda_param_t *param,
+ int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ __u8 service_type = (__u8) param->pv.i;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get) {
+ param->pv.i = self->settings.service_type;
+ return 0;
+ }
+
+ /* Find all common service types */
+ service_type &= self->service_type;
+ if (!service_type) {
+ pr_debug("%s(), No common service type to use!\n", __func__);
+ return -1;
+ }
+ pr_debug("%s(), services in common=%02x\n", __func__ ,
+ service_type);
+
+ /*
+ * Now choose a preferred service type of those available
+ */
+ if (service_type & IRCOMM_CENTRONICS)
+ self->settings.service_type = IRCOMM_CENTRONICS;
+ else if (service_type & IRCOMM_9_WIRE)
+ self->settings.service_type = IRCOMM_9_WIRE;
+ else if (service_type & IRCOMM_3_WIRE)
+ self->settings.service_type = IRCOMM_3_WIRE;
+ else if (service_type & IRCOMM_3_WIRE_RAW)
+ self->settings.service_type = IRCOMM_3_WIRE_RAW;
+
+ pr_debug("%s(), resulting service type=0x%02x\n", __func__ ,
+ self->settings.service_type);
+
+ /*
+ * Now the line is ready for some communication. Check if we are a
+ * server, and send over some initial parameters.
+ * Client do it in ircomm_tty_state_setup().
+ * Note : we may get called from ircomm_tty_getvalue_confirm(),
+ * therefore before we even have open any socket. And self->client
+ * is initialised to TRUE only later. So, we check if the link is
+ * really initialised. - Jean II
+ */
+ if ((self->max_header_size != IRCOMM_TTY_HDR_UNINITIALISED) &&
+ (!self->client) &&
+ (self->settings.service_type != IRCOMM_3_WIRE_RAW))
+ {
+ /* Init connection */
+ ircomm_tty_send_initial_parameters(self);
+ ircomm_tty_link_established(self);
+ }
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_port_type (self, param)
+ *
+ * The port type parameter tells if the devices are serial or parallel.
+ * Since we only advertise serial service, this parameter should only
+ * be equal to IRCOMM_SERIAL.
+ */
+static int ircomm_param_port_type(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = IRCOMM_SERIAL;
+ else {
+ self->settings.port_type = (__u8) param->pv.i;
+
+ pr_debug("%s(), port type=%d\n", __func__ ,
+ self->settings.port_type);
+ }
+ return 0;
+}
+
+/*
+ * Function ircomm_param_port_name (self, param)
+ *
+ * Exchange port name
+ *
+ */
+static int ircomm_param_port_name(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get) {
+ pr_debug("%s(), not imp!\n", __func__);
+ } else {
+ pr_debug("%s(), port-name=%s\n", __func__ , param->pv.c);
+ strncpy(self->settings.port_name, param->pv.c, 32);
+ }
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_data_rate (self, param)
+ *
+ * Exchange data rate to be used in this settings
+ *
+ */
+static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->settings.data_rate;
+ else
+ self->settings.data_rate = param->pv.i;
+
+ pr_debug("%s(), data rate = %d\n", __func__ , param->pv.i);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_data_format (self, param)
+ *
+ * Exchange data format to be used in this settings
+ *
+ */
+static int ircomm_param_data_format(void *instance, irda_param_t *param,
+ int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->settings.data_format;
+ else
+ self->settings.data_format = (__u8) param->pv.i;
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_flow_control (self, param)
+ *
+ * Exchange flow control settings to be used in this settings
+ *
+ */
+static int ircomm_param_flow_control(void *instance, irda_param_t *param,
+ int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->settings.flow_control;
+ else
+ self->settings.flow_control = (__u8) param->pv.i;
+
+ pr_debug("%s(), flow control = 0x%02x\n", __func__ , (__u8)param->pv.i);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_xon_xoff (self, param)
+ *
+ * Exchange XON/XOFF characters
+ *
+ */
+static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get) {
+ param->pv.i = self->settings.xonxoff[0];
+ param->pv.i |= self->settings.xonxoff[1] << 8;
+ } else {
+ self->settings.xonxoff[0] = (__u16) param->pv.i & 0xff;
+ self->settings.xonxoff[1] = (__u16) param->pv.i >> 8;
+ }
+
+ pr_debug("%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ ,
+ param->pv.i & 0xff, param->pv.i >> 8);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_enq_ack (self, param)
+ *
+ * Exchange ENQ/ACK characters
+ *
+ */
+static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get) {
+ param->pv.i = self->settings.enqack[0];
+ param->pv.i |= self->settings.enqack[1] << 8;
+ } else {
+ self->settings.enqack[0] = (__u16) param->pv.i & 0xff;
+ self->settings.enqack[1] = (__u16) param->pv.i >> 8;
+ }
+
+ pr_debug("%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ ,
+ param->pv.i & 0xff, param->pv.i >> 8);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_line_status (self, param)
+ *
+ *
+ *
+ */
+static int ircomm_param_line_status(void *instance, irda_param_t *param,
+ int get)
+{
+ pr_debug("%s(), not impl.\n", __func__);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_dte (instance, param)
+ *
+ * If we get here, there must be some sort of null-modem connection, and
+ * we are probably working in server mode as well.
+ */
+static int ircomm_param_dte(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ __u8 dte;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->settings.dte;
+ else {
+ dte = (__u8) param->pv.i;
+
+ self->settings.dce = 0;
+
+ if (dte & IRCOMM_DELTA_DTR)
+ self->settings.dce |= (IRCOMM_DELTA_DSR|
+ IRCOMM_DELTA_RI |
+ IRCOMM_DELTA_CD);
+ if (dte & IRCOMM_DTR)
+ self->settings.dce |= (IRCOMM_DSR|
+ IRCOMM_RI |
+ IRCOMM_CD);
+
+ if (dte & IRCOMM_DELTA_RTS)
+ self->settings.dce |= IRCOMM_DELTA_CTS;
+ if (dte & IRCOMM_RTS)
+ self->settings.dce |= IRCOMM_CTS;
+
+ /* Take appropriate actions */
+ ircomm_tty_check_modem_status(self);
+
+ /* Null modem cable emulator */
+ self->settings.null_modem = TRUE;
+ }
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_dce (instance, param)
+ *
+ *
+ *
+ */
+static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ __u8 dce;
+
+ pr_debug("%s(), dce = 0x%02x\n", __func__ , (__u8)param->pv.i);
+
+ dce = (__u8) param->pv.i;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ self->settings.dce = dce;
+
+ /* Check if any of the settings have changed */
+ if (dce & 0x0f) {
+ if (dce & IRCOMM_DELTA_CTS) {
+ pr_debug("%s(), CTS\n", __func__);
+ }
+ }
+
+ ircomm_tty_check_modem_status(self);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_param_poll (instance, param)
+ *
+ * Called when the peer device is polling for the line settings
+ *
+ */
+static int ircomm_param_poll(void *instance, irda_param_t *param, int get)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ /* Poll parameters are always of length 0 (just a signal) */
+ if (!get) {
+ /* Respond with DTE line settings */
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+ }
+ return 0;
+}
+
+
+
+
+
diff --git a/drivers/staging/irda/net/ircomm/ircomm_ttp.c b/drivers/staging/irda/net/ircomm/ircomm_ttp.c
new file mode 100644
index 000000000000..4b81e0934770
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_ttp.c
@@ -0,0 +1,350 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_ttp.c
+ * Version: 1.0
+ * Description: Interface between IrCOMM and IrTTP
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 20:48:27 1999
+ * Modified at: Mon Dec 13 11:35:13 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irttp.h>
+
+#include <net/irda/ircomm_event.h>
+#include <net/irda/ircomm_ttp.h>
+
+static int ircomm_ttp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static void ircomm_ttp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+static void ircomm_ttp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+static void ircomm_ttp_flow_indication(void *instance, void *sap,
+ LOCAL_FLOW cmd);
+static void ircomm_ttp_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb);
+static int ircomm_ttp_data_request(struct ircomm_cb *self,
+ struct sk_buff *skb,
+ int clen);
+static int ircomm_ttp_connect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ struct ircomm_info *info);
+static int ircomm_ttp_connect_response(struct ircomm_cb *self,
+ struct sk_buff *userdata);
+static int ircomm_ttp_disconnect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ struct ircomm_info *info);
+
+/*
+ * Function ircomm_open_tsap (self)
+ *
+ *
+ *
+ */
+int ircomm_open_tsap(struct ircomm_cb *self)
+{
+ notify_t notify;
+
+ /* Register callbacks */
+ irda_notify_init(&notify);
+ notify.data_indication = ircomm_ttp_data_indication;
+ notify.connect_confirm = ircomm_ttp_connect_confirm;
+ notify.connect_indication = ircomm_ttp_connect_indication;
+ notify.flow_indication = ircomm_ttp_flow_indication;
+ notify.disconnect_indication = ircomm_ttp_disconnect_indication;
+ notify.instance = self;
+ strlcpy(notify.name, "IrCOMM", sizeof(notify.name));
+
+ self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
+ &notify);
+ if (!self->tsap) {
+ pr_debug("%sfailed to allocate tsap\n", __func__);
+ return -1;
+ }
+ self->slsap_sel = self->tsap->stsap_sel;
+
+ /*
+ * Initialize the call-table for issuing commands
+ */
+ self->issue.data_request = ircomm_ttp_data_request;
+ self->issue.connect_request = ircomm_ttp_connect_request;
+ self->issue.connect_response = ircomm_ttp_connect_response;
+ self->issue.disconnect_request = ircomm_ttp_disconnect_request;
+
+ return 0;
+}
+
+/*
+ * Function ircomm_ttp_connect_request (self, userdata)
+ *
+ *
+ *
+ */
+static int ircomm_ttp_connect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ struct ircomm_info *info)
+{
+ int ret = 0;
+
+ /* Don't forget to refcount it - should be NULL anyway */
+ if(userdata)
+ skb_get(userdata);
+
+ ret = irttp_connect_request(self->tsap, info->dlsap_sel,
+ info->saddr, info->daddr, NULL,
+ TTP_SAR_DISABLE, userdata);
+
+ return ret;
+}
+
+/*
+ * Function ircomm_ttp_connect_response (self, skb)
+ *
+ *
+ *
+ */
+static int ircomm_ttp_connect_response(struct ircomm_cb *self,
+ struct sk_buff *userdata)
+{
+ int ret;
+
+ /* Don't forget to refcount it - should be NULL anyway */
+ if(userdata)
+ skb_get(userdata);
+
+ ret = irttp_connect_response(self->tsap, TTP_SAR_DISABLE, userdata);
+
+ return ret;
+}
+
+/*
+ * Function ircomm_ttp_data_request (self, userdata)
+ *
+ * Send IrCOMM data to IrTTP layer. Currently we do not try to combine
+ * control data with pure data, so they will be sent as separate frames.
+ * Should not be a big problem though, since control frames are rare. But
+ * some of them are sent after connection establishment, so this can
+ * increase the latency a bit.
+ */
+static int ircomm_ttp_data_request(struct ircomm_cb *self,
+ struct sk_buff *skb,
+ int clen)
+{
+ int ret;
+
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ pr_debug("%s(), clen=%d\n", __func__ , clen);
+
+ /*
+ * Insert clen field, currently we either send data only, or control
+ * only frames, to make things easier and avoid queueing
+ */
+ IRDA_ASSERT(skb_headroom(skb) >= IRCOMM_HEADER_SIZE, return -1;);
+
+ /* Don't forget to refcount it - see ircomm_tty_do_softint() */
+ skb_get(skb);
+
+ skb_push(skb, IRCOMM_HEADER_SIZE);
+
+ skb->data[0] = clen;
+
+ ret = irttp_data_request(self->tsap, skb);
+ if (ret) {
+ net_err_ratelimited("%s(), failed\n", __func__);
+ /* irttp_data_request already free the packet */
+ }
+
+ return ret;
+}
+
+/*
+ * Function ircomm_ttp_data_indication (instance, sap, skb)
+ *
+ * Incoming data
+ *
+ */
+static int ircomm_ttp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ ircomm_do_event(self, IRCOMM_TTP_DATA_INDICATION, skb, NULL);
+
+ /* Drop reference count - see ircomm_tty_data_indication(). */
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void ircomm_ttp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+ struct ircomm_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(qos != NULL, goto out;);
+
+ if (max_sdu_size != TTP_SAR_DISABLE) {
+ net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n",
+ __func__);
+ goto out;
+ }
+
+ info.max_data_size = irttp_get_max_seg_size(self->tsap)
+ - IRCOMM_HEADER_SIZE;
+ info.max_header_size = max_header_size + IRCOMM_HEADER_SIZE;
+ info.qos = qos;
+
+ ircomm_do_event(self, IRCOMM_TTP_CONNECT_CONFIRM, skb, &info);
+
+out:
+ /* Drop reference count - see ircomm_tty_connect_confirm(). */
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function ircomm_ttp_connect_indication (instance, sap, qos, max_sdu_size,
+ * max_header_size, skb)
+ *
+ *
+ *
+ */
+static void ircomm_ttp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *)instance;
+ struct ircomm_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(qos != NULL, goto out;);
+
+ if (max_sdu_size != TTP_SAR_DISABLE) {
+ net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n",
+ __func__);
+ goto out;
+ }
+
+ info.max_data_size = irttp_get_max_seg_size(self->tsap)
+ - IRCOMM_HEADER_SIZE;
+ info.max_header_size = max_header_size + IRCOMM_HEADER_SIZE;
+ info.qos = qos;
+
+ ircomm_do_event(self, IRCOMM_TTP_CONNECT_INDICATION, skb, &info);
+
+out:
+ /* Drop reference count - see ircomm_tty_connect_indication(). */
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function ircomm_ttp_disconnect_request (self, userdata, info)
+ *
+ *
+ *
+ */
+static int ircomm_ttp_disconnect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ struct ircomm_info *info)
+{
+ int ret;
+
+ /* Don't forget to refcount it - should be NULL anyway */
+ if(userdata)
+ skb_get(userdata);
+
+ ret = irttp_disconnect_request(self->tsap, userdata, P_NORMAL);
+
+ return ret;
+}
+
+/*
+ * Function ircomm_ttp_disconnect_indication (instance, sap, reason, skb)
+ *
+ *
+ *
+ */
+static void ircomm_ttp_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+ struct ircomm_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+
+ info.reason = reason;
+
+ ircomm_do_event(self, IRCOMM_TTP_DISCONNECT_INDICATION, skb, &info);
+
+ /* Drop reference count - see ircomm_tty_disconnect_indication(). */
+ if(skb)
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function ircomm_ttp_flow_indication (instance, sap, cmd)
+ *
+ * Layer below is telling us to start or stop the flow of data
+ *
+ */
+static void ircomm_ttp_flow_indication(void *instance, void *sap,
+ LOCAL_FLOW cmd)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
+
+ if (self->notify.flow_indication)
+ self->notify.flow_indication(self->notify.instance, self, cmd);
+}
+
+
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty.c b/drivers/staging/irda/net/ircomm/ircomm_tty.c
new file mode 100644
index 000000000000..ec157c3419b5
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty.c
@@ -0,0 +1,1329 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty.c
+ * Version: 1.0
+ * Description: IrCOMM serial TTY driver
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Jun 6 21:00:56 1999
+ * Modified at: Wed Feb 23 00:09:02 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: serial.c and previous IrCOMM work by Takahide Higuchi
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/interrupt.h>
+#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */
+
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_param.h>
+#include <net/irda/ircomm_tty_attach.h>
+#include <net/irda/ircomm_tty.h>
+
+static int ircomm_tty_install(struct tty_driver *driver,
+ struct tty_struct *tty);
+static int ircomm_tty_open(struct tty_struct *tty, struct file *filp);
+static void ircomm_tty_close(struct tty_struct * tty, struct file *filp);
+static int ircomm_tty_write(struct tty_struct * tty,
+ const unsigned char *buf, int count);
+static int ircomm_tty_write_room(struct tty_struct *tty);
+static void ircomm_tty_throttle(struct tty_struct *tty);
+static void ircomm_tty_unthrottle(struct tty_struct *tty);
+static int ircomm_tty_chars_in_buffer(struct tty_struct *tty);
+static void ircomm_tty_flush_buffer(struct tty_struct *tty);
+static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
+static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static void ircomm_tty_hangup(struct tty_struct *tty);
+static void ircomm_tty_do_softint(struct work_struct *work);
+static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
+static void ircomm_tty_stop(struct tty_struct *tty);
+
+static int ircomm_tty_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static int ircomm_tty_control_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static void ircomm_tty_flow_indication(void *instance, void *sap,
+ LOCAL_FLOW cmd);
+#ifdef CONFIG_PROC_FS
+static const struct file_operations ircomm_tty_proc_fops;
+#endif /* CONFIG_PROC_FS */
+static struct tty_driver *driver;
+
+static hashbin_t *ircomm_tty = NULL;
+
+static const struct tty_operations ops = {
+ .install = ircomm_tty_install,
+ .open = ircomm_tty_open,
+ .close = ircomm_tty_close,
+ .write = ircomm_tty_write,
+ .write_room = ircomm_tty_write_room,
+ .chars_in_buffer = ircomm_tty_chars_in_buffer,
+ .flush_buffer = ircomm_tty_flush_buffer,
+ .ioctl = ircomm_tty_ioctl, /* ircomm_tty_ioctl.c */
+ .tiocmget = ircomm_tty_tiocmget, /* ircomm_tty_ioctl.c */
+ .tiocmset = ircomm_tty_tiocmset, /* ircomm_tty_ioctl.c */
+ .throttle = ircomm_tty_throttle,
+ .unthrottle = ircomm_tty_unthrottle,
+ .send_xchar = ircomm_tty_send_xchar,
+ .set_termios = ircomm_tty_set_termios,
+ .stop = ircomm_tty_stop,
+ .start = ircomm_tty_start,
+ .hangup = ircomm_tty_hangup,
+ .wait_until_sent = ircomm_tty_wait_until_sent,
+#ifdef CONFIG_PROC_FS
+ .proc_fops = &ircomm_tty_proc_fops,
+#endif /* CONFIG_PROC_FS */
+};
+
+static void ircomm_port_raise_dtr_rts(struct tty_port *port, int raise)
+{
+ struct ircomm_tty_cb *self = container_of(port, struct ircomm_tty_cb,
+ port);
+ /*
+ * Here, we use to lock those two guys, but as ircomm_param_request()
+ * does it itself, I don't see the point (and I see the deadlock).
+ * Jean II
+ */
+ if (raise)
+ self->settings.dte |= IRCOMM_RTS | IRCOMM_DTR;
+ else
+ self->settings.dte &= ~(IRCOMM_RTS | IRCOMM_DTR);
+
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+}
+
+static int ircomm_port_carrier_raised(struct tty_port *port)
+{
+ struct ircomm_tty_cb *self = container_of(port, struct ircomm_tty_cb,
+ port);
+ return self->settings.dce & IRCOMM_CD;
+}
+
+static const struct tty_port_operations ircomm_port_ops = {
+ .dtr_rts = ircomm_port_raise_dtr_rts,
+ .carrier_raised = ircomm_port_carrier_raised,
+};
+
+/*
+ * Function ircomm_tty_init()
+ *
+ * Init IrCOMM TTY layer/driver
+ *
+ */
+static int __init ircomm_tty_init(void)
+{
+ driver = alloc_tty_driver(IRCOMM_TTY_PORTS);
+ if (!driver)
+ return -ENOMEM;
+ ircomm_tty = hashbin_new(HB_LOCK);
+ if (ircomm_tty == NULL) {
+ net_err_ratelimited("%s(), can't allocate hashbin!\n",
+ __func__);
+ put_tty_driver(driver);
+ return -ENOMEM;
+ }
+
+ driver->driver_name = "ircomm";
+ driver->name = "ircomm";
+ driver->major = IRCOMM_TTY_MAJOR;
+ driver->minor_start = IRCOMM_TTY_MINOR;
+ driver->type = TTY_DRIVER_TYPE_SERIAL;
+ driver->subtype = SERIAL_TYPE_NORMAL;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(driver, &ops);
+ if (tty_register_driver(driver)) {
+ net_err_ratelimited("%s(): Couldn't register serial driver\n",
+ __func__);
+ put_tty_driver(driver);
+ return -1;
+ }
+ return 0;
+}
+
+static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ ircomm_tty_shutdown(self);
+
+ self->magic = 0;
+ tty_port_destroy(&self->port);
+ kfree(self);
+}
+
+/*
+ * Function ircomm_tty_cleanup ()
+ *
+ * Remove IrCOMM TTY layer/driver
+ *
+ */
+static void __exit ircomm_tty_cleanup(void)
+{
+ int ret;
+
+ ret = tty_unregister_driver(driver);
+ if (ret) {
+ net_err_ratelimited("%s(), failed to unregister driver\n",
+ __func__);
+ return;
+ }
+
+ hashbin_delete(ircomm_tty, (FREE_FUNC) __ircomm_tty_cleanup);
+ put_tty_driver(driver);
+}
+
+/*
+ * Function ircomm_startup (self)
+ *
+ *
+ *
+ */
+static int ircomm_tty_startup(struct ircomm_tty_cb *self)
+{
+ notify_t notify;
+ int ret = -ENODEV;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ /* Check if already open */
+ if (tty_port_initialized(&self->port)) {
+ pr_debug("%s(), already open so break out!\n", __func__);
+ return 0;
+ }
+ tty_port_set_initialized(&self->port, 1);
+
+ /* Register with IrCOMM */
+ irda_notify_init(&notify);
+ /* These callbacks we must handle ourselves */
+ notify.data_indication = ircomm_tty_data_indication;
+ notify.udata_indication = ircomm_tty_control_indication;
+ notify.flow_indication = ircomm_tty_flow_indication;
+
+ /* Use the ircomm_tty interface for these ones */
+ notify.disconnect_indication = ircomm_tty_disconnect_indication;
+ notify.connect_confirm = ircomm_tty_connect_confirm;
+ notify.connect_indication = ircomm_tty_connect_indication;
+ strlcpy(notify.name, "ircomm_tty", sizeof(notify.name));
+ notify.instance = self;
+
+ if (!self->ircomm) {
+ self->ircomm = ircomm_open(&notify, self->service_type,
+ self->line);
+ }
+ if (!self->ircomm)
+ goto err;
+
+ self->slsap_sel = self->ircomm->slsap_sel;
+
+ /* Connect IrCOMM link with remote device */
+ ret = ircomm_tty_attach_cable(self);
+ if (ret < 0) {
+ net_err_ratelimited("%s(), error attaching cable!\n", __func__);
+ goto err;
+ }
+
+ return 0;
+err:
+ tty_port_set_initialized(&self->port, 0);
+ return ret;
+}
+
+/*
+ * Function ircomm_block_til_ready (self, filp)
+ *
+ *
+ *
+ */
+static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+ struct tty_struct *tty, struct file *filp)
+{
+ struct tty_port *port = &self->port;
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ int do_clocal = 0;
+ unsigned long flags;
+
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if (tty_io_error(tty)) {
+ tty_port_set_active(port, 1);
+ return 0;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ /* nonblock mode is set */
+ if (C_BAUD(tty))
+ tty_port_raise_dtr_rts(port);
+ tty_port_set_active(port, 1);
+ pr_debug("%s(), O_NONBLOCK requested!\n", __func__);
+ return 0;
+ }
+
+ if (C_CLOCAL(tty)) {
+ pr_debug("%s(), doing CLOCAL!\n", __func__);
+ do_clocal = 1;
+ }
+
+ /* Wait for carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, port->count is dropped by one, so that
+ * mgsl_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+
+ retval = 0;
+ add_wait_queue(&port->open_wait, &wait);
+
+ pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
+ __FILE__, __LINE__, tty->driver->name, port->count);
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->count--;
+ port->blocked_open++;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ while (1) {
+ if (C_BAUD(tty) && tty_port_initialized(port))
+ tty_port_raise_dtr_rts(port);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
+ retval = (port->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
+ break;
+ }
+
+ /*
+ * Check if link is ready now. Even if CLOCAL is
+ * specified, we cannot return before the IrCOMM link is
+ * ready
+ */
+ if ((do_clocal || tty_port_carrier_raised(port)) &&
+ self->state == IRCOMM_TTY_READY)
+ {
+ break;
+ }
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
+ __FILE__, __LINE__, tty->driver->name, port->count);
+
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->open_wait, &wait);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!tty_hung_up_p(filp))
+ port->count++;
+ port->blocked_open--;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+ __FILE__, __LINE__, tty->driver->name, port->count);
+
+ if (!retval)
+ tty_port_set_active(port, 1);
+
+ return retval;
+}
+
+
+static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self;
+ unsigned int line = tty->index;
+
+ /* Check if instance already exists */
+ self = hashbin_lock_find(ircomm_tty, line, NULL);
+ if (!self) {
+ /* No, so make new instance */
+ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
+ if (self == NULL)
+ return -ENOMEM;
+
+ tty_port_init(&self->port);
+ self->port.ops = &ircomm_port_ops;
+ self->magic = IRCOMM_TTY_MAGIC;
+ self->flow = FLOW_STOP;
+
+ self->line = line;
+ INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
+ self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
+ self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
+
+ /* Init some important stuff */
+ init_timer(&self->watchdog_timer);
+ spin_lock_init(&self->spinlock);
+
+ /*
+ * Force TTY into raw mode by default which is usually what
+ * we want for IrCOMM and IrLPT. This way applications will
+ * not have to twiddle with printcap etc.
+ *
+ * Note this is completely usafe and doesn't work properly
+ */
+ tty->termios.c_iflag = 0;
+ tty->termios.c_oflag = 0;
+
+ /* Insert into hash */
+ hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL);
+ }
+
+ tty->driver_data = self;
+
+ return tty_port_install(&self->port, driver, tty);
+}
+
+/*
+ * Function ircomm_tty_open (tty, filp)
+ *
+ * This routine is called when a particular tty device is opened. This
+ * routine is mandatory; if this routine is not filled in, the attempted
+ * open will fail with ENODEV.
+ */
+static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct ircomm_tty_cb *self = tty->driver_data;
+ unsigned long flags;
+ int ret;
+
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->port.lock, flags);
+ self->port.count++;
+ spin_unlock_irqrestore(&self->port.lock, flags);
+ tty_port_tty_set(&self->port, tty);
+
+ pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+ self->line, self->port.count);
+
+ /* Not really used by us, but lets do it anyway */
+ self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ /* Check if this is a "normal" ircomm device, or an irlpt device */
+ if (self->line < 0x10) {
+ self->service_type = IRCOMM_3_WIRE | IRCOMM_9_WIRE;
+ self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */
+ /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */
+ self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */
+ pr_debug("%s(), IrCOMM device\n", __func__);
+ } else {
+ pr_debug("%s(), IrLPT device\n", __func__);
+ self->service_type = IRCOMM_3_WIRE_RAW;
+ self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */
+ }
+
+ ret = ircomm_tty_startup(self);
+ if (ret)
+ return ret;
+
+ ret = ircomm_tty_block_til_ready(self, tty, filp);
+ if (ret) {
+ pr_debug("%s(), returning after block_til_ready with %d\n",
+ __func__, ret);
+
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Function ircomm_tty_close (tty, filp)
+ *
+ * This routine is called when a particular tty device is closed.
+ *
+ */
+static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ struct tty_port *port = &self->port;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ if (tty_port_close_start(port, tty, filp) == 0)
+ return;
+
+ ircomm_tty_shutdown(self);
+
+ tty_driver_flush_buffer(tty);
+
+ tty_port_close_end(port, tty);
+ tty_port_tty_set(port, NULL);
+}
+
+/*
+ * Function ircomm_tty_flush_buffer (tty)
+ *
+ *
+ *
+ */
+static void ircomm_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ /*
+ * Let do_softint() do this to avoid race condition with
+ * do_softint() ;-)
+ */
+ schedule_work(&self->tqueue);
+}
+
+/*
+ * Function ircomm_tty_do_softint (work)
+ *
+ * We use this routine to give the write wakeup to the user at at a
+ * safe time (as fast as possible after write have completed). This
+ * can be compared to the Tx interrupt.
+ */
+static void ircomm_tty_do_softint(struct work_struct *work)
+{
+ struct ircomm_tty_cb *self =
+ container_of(work, struct ircomm_tty_cb, tqueue);
+ struct tty_struct *tty;
+ unsigned long flags;
+ struct sk_buff *skb, *ctrl_skb;
+
+ if (!self || self->magic != IRCOMM_TTY_MAGIC)
+ return;
+
+ tty = tty_port_tty_get(&self->port);
+ if (!tty)
+ return;
+
+ /* Unlink control buffer */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ ctrl_skb = self->ctrl_skb;
+ self->ctrl_skb = NULL;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /* Flush control buffer if any */
+ if(ctrl_skb) {
+ if(self->flow == FLOW_START)
+ ircomm_control_request(self->ircomm, ctrl_skb);
+ /* Drop reference count - see ircomm_ttp_data_request(). */
+ dev_kfree_skb(ctrl_skb);
+ }
+
+ if (tty->hw_stopped)
+ goto put;
+
+ /* Unlink transmit buffer */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ skb = self->tx_skb;
+ self->tx_skb = NULL;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /* Flush transmit buffer if any */
+ if (skb) {
+ ircomm_tty_do_event(self, IRCOMM_TTY_DATA_REQUEST, skb, NULL);
+ /* Drop reference count - see ircomm_ttp_data_request(). */
+ dev_kfree_skb(skb);
+ }
+
+ /* Check if user (still) wants to be waken up */
+ tty_wakeup(tty);
+put:
+ tty_kref_put(tty);
+}
+
+/*
+ * Function ircomm_tty_write (tty, buf, count)
+ *
+ * This routine is called by the kernel to write a series of characters
+ * to the tty device. The characters may come from user space or kernel
+ * space. This routine will return the number of characters actually
+ * accepted for writing. This routine is mandatory.
+ */
+static int ircomm_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ unsigned long flags;
+ struct sk_buff *skb;
+ int tailroom = 0;
+ int len = 0;
+ int size;
+
+ pr_debug("%s(), count=%d, hw_stopped=%d\n", __func__ , count,
+ tty->hw_stopped);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ /* We may receive packets from the TTY even before we have finished
+ * our setup. Not cool.
+ * The problem is that we don't know the final header and data size
+ * to create the proper skb, so any skb we would create would have
+ * bogus header and data size, so need care.
+ * We use a bogus header size to safely detect this condition.
+ * Another problem is that hw_stopped was set to 0 way before it
+ * should be, so we would drop this skb. It should now be fixed.
+ * One option is to not accept data until we are properly setup.
+ * But, I suspect that when it happens, the ppp line discipline
+ * just "drops" the data, which might screw up connect scripts.
+ * The second option is to create a "safe skb", with large header
+ * and small size (see ircomm_tty_open() for values).
+ * We just need to make sure that when the real values get filled,
+ * we don't mess up the original "safe skb" (see tx_data_size).
+ * Jean II */
+ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) {
+ pr_debug("%s() : not initialised\n", __func__);
+#ifdef IRCOMM_NO_TX_BEFORE_INIT
+ /* We didn't consume anything, TTY will retry */
+ return 0;
+#endif
+ }
+
+ if (count < 1)
+ return 0;
+
+ /* Protect our manipulation of self->tx_skb and related */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ /* Fetch current transmit buffer */
+ skb = self->tx_skb;
+
+ /*
+ * Send out all the data we get, possibly as multiple fragmented
+ * frames, but this will only happen if the data is larger than the
+ * max data size. The normal case however is just the opposite, and
+ * this function may be called multiple times, and will then actually
+ * defragment the data and send it out as one packet as soon as
+ * possible, but at a safer point in time
+ */
+ while (count) {
+ size = count;
+
+ /* Adjust data size to the max data size */
+ if (size > self->max_data_size)
+ size = self->max_data_size;
+
+ /*
+ * Do we already have a buffer ready for transmit, or do
+ * we need to allocate a new frame
+ */
+ if (skb) {
+ /*
+ * Any room for more data at the end of the current
+ * transmit buffer? Cannot use skb_tailroom, since
+ * dev_alloc_skb gives us a larger skb than we
+ * requested
+ * Note : use tx_data_size, because max_data_size
+ * may have changed and we don't want to overwrite
+ * the skb. - Jean II
+ */
+ if ((tailroom = (self->tx_data_size - skb->len)) > 0) {
+ /* Adjust data to tailroom */
+ if (size > tailroom)
+ size = tailroom;
+ } else {
+ /*
+ * Current transmit frame is full, so break
+ * out, so we can send it as soon as possible
+ */
+ break;
+ }
+ } else {
+ /* Prepare a full sized frame */
+ skb = alloc_skb(self->max_data_size+
+ self->max_header_size,
+ GFP_ATOMIC);
+ if (!skb) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -ENOBUFS;
+ }
+ skb_reserve(skb, self->max_header_size);
+ self->tx_skb = skb;
+ /* Remember skb size because max_data_size may
+ * change later on - Jean II */
+ self->tx_data_size = self->max_data_size;
+ }
+
+ /* Copy data */
+ skb_put_data(skb, buf + len, size);
+
+ count -= size;
+ len += size;
+ }
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /*
+ * Schedule a new thread which will transmit the frame as soon
+ * as possible, but at a safe point in time. We do this so the
+ * "user" can give us data multiple times, as PPP does (because of
+ * its 256 byte tx buffer). We will then defragment and send out
+ * all this data as one single packet.
+ */
+ schedule_work(&self->tqueue);
+
+ return len;
+}
+
+/*
+ * Function ircomm_tty_write_room (tty)
+ *
+ * This routine returns the numbers of characters the tty driver will
+ * accept for queuing to be written. This number is subject to change as
+ * output buffers get emptied, or if the output flow control is acted.
+ */
+static int ircomm_tty_write_room(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ unsigned long flags;
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+#ifdef IRCOMM_NO_TX_BEFORE_INIT
+ /* max_header_size tells us if the channel is initialised or not. */
+ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED)
+ /* Don't bother us yet */
+ return 0;
+#endif
+
+ /* Check if we are allowed to transmit any data.
+ * hw_stopped is the regular flow control.
+ * Jean II */
+ if (tty->hw_stopped)
+ ret = 0;
+ else {
+ spin_lock_irqsave(&self->spinlock, flags);
+ if (self->tx_skb)
+ ret = self->tx_data_size - self->tx_skb->len;
+ else
+ ret = self->max_data_size;
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+ pr_debug("%s(), ret=%d\n", __func__ , ret);
+
+ return ret;
+}
+
+/*
+ * Function ircomm_tty_wait_until_sent (tty, timeout)
+ *
+ * This routine waits until the device has written out all of the
+ * characters in its transmitter FIFO.
+ */
+static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ unsigned long orig_jiffies, poll_time;
+ unsigned long flags;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ orig_jiffies = jiffies;
+
+ /* Set poll time to 200 ms */
+ poll_time = msecs_to_jiffies(200);
+ if (timeout)
+ poll_time = min_t(unsigned long, timeout, poll_time);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ while (self->tx_skb && self->tx_skb->len) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ schedule_timeout_interruptible(poll_time);
+ spin_lock_irqsave(&self->spinlock, flags);
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ __set_current_state(TASK_RUNNING);
+}
+
+/*
+ * Function ircomm_tty_throttle (tty)
+ *
+ * This routine notifies the tty driver that input buffers for the line
+ * discipline are close to full, and it should somehow signal that no
+ * more characters should be sent to the tty.
+ */
+static void ircomm_tty_throttle(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ /* Software flow control? */
+ if (I_IXOFF(tty))
+ ircomm_tty_send_xchar(tty, STOP_CHAR(tty));
+
+ /* Hardware flow control? */
+ if (C_CRTSCTS(tty)) {
+ self->settings.dte &= ~IRCOMM_RTS;
+ self->settings.dte |= IRCOMM_DELTA_RTS;
+
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+ }
+
+ ircomm_flow_request(self->ircomm, FLOW_STOP);
+}
+
+/*
+ * Function ircomm_tty_unthrottle (tty)
+ *
+ * This routine notifies the tty drivers that it should signals that
+ * characters can now be sent to the tty without fear of overrunning the
+ * input buffers of the line disciplines.
+ */
+static void ircomm_tty_unthrottle(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ /* Using software flow control? */
+ if (I_IXOFF(tty))
+ ircomm_tty_send_xchar(tty, START_CHAR(tty));
+
+ /* Using hardware flow control? */
+ if (C_CRTSCTS(tty)) {
+ self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS);
+
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+ pr_debug("%s(), FLOW_START\n", __func__);
+ }
+ ircomm_flow_request(self->ircomm, FLOW_START);
+}
+
+/*
+ * Function ircomm_tty_chars_in_buffer (tty)
+ *
+ * Indicates if there are any data in the buffer
+ *
+ */
+static int ircomm_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ unsigned long flags;
+ int len = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->tx_skb)
+ len = self->tx_skb->len;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ return len;
+}
+
+static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
+{
+ unsigned long flags;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ if (!tty_port_initialized(&self->port))
+ return;
+ tty_port_set_initialized(&self->port, 0);
+
+ ircomm_tty_detach_cable(self);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ del_timer(&self->watchdog_timer);
+
+ /* Free parameter buffer */
+ if (self->ctrl_skb) {
+ dev_kfree_skb(self->ctrl_skb);
+ self->ctrl_skb = NULL;
+ }
+
+ /* Free transmit buffer */
+ if (self->tx_skb) {
+ dev_kfree_skb(self->tx_skb);
+ self->tx_skb = NULL;
+ }
+
+ if (self->ircomm) {
+ ircomm_close(self->ircomm);
+ self->ircomm = NULL;
+ }
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+}
+
+/*
+ * Function ircomm_tty_hangup (tty)
+ *
+ * This routine notifies the tty driver that it should hangup the tty
+ * device.
+ *
+ */
+static void ircomm_tty_hangup(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ struct tty_port *port = &self->port;
+ unsigned long flags;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ /* ircomm_tty_flush_buffer(tty); */
+ ircomm_tty_shutdown(self);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->tty) {
+ set_bit(TTY_IO_ERROR, &port->tty->flags);
+ tty_kref_put(port->tty);
+ }
+ port->tty = NULL;
+ port->count = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+ tty_port_set_active(port, 0);
+
+ wake_up_interruptible(&port->open_wait);
+}
+
+/*
+ * Function ircomm_tty_send_xchar (tty, ch)
+ *
+ * This routine is used to send a high-priority XON/XOFF character to
+ * the device.
+ */
+static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch)
+{
+ pr_debug("%s(), not impl\n", __func__);
+}
+
+/*
+ * Function ircomm_tty_start (tty)
+ *
+ * This routine notifies the tty driver that it resume sending
+ * characters to the tty device.
+ */
+void ircomm_tty_start(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+
+ ircomm_flow_request(self->ircomm, FLOW_START);
+}
+
+/*
+ * Function ircomm_tty_stop (tty)
+ *
+ * This routine notifies the tty driver that it should stop outputting
+ * characters to the tty device.
+ */
+static void ircomm_tty_stop(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ ircomm_flow_request(self->ircomm, FLOW_STOP);
+}
+
+/*
+ * Function ircomm_check_modem_status (self)
+ *
+ * Check for any changes in the DCE's line settings. This function should
+ * be called whenever the dce parameter settings changes, to update the
+ * flow control settings and other things
+ */
+void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
+{
+ struct tty_struct *tty;
+ int status;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ tty = tty_port_tty_get(&self->port);
+
+ status = self->settings.dce;
+
+ if (status & IRCOMM_DCE_DELTA_ANY) {
+ /*wake_up_interruptible(&self->delta_msr_wait);*/
+ }
+ if (tty_port_check_carrier(&self->port) && (status & IRCOMM_DELTA_CD)) {
+ pr_debug("%s(), ircomm%d CD now %s...\n", __func__ , self->line,
+ (status & IRCOMM_CD) ? "on" : "off");
+
+ if (status & IRCOMM_CD) {
+ wake_up_interruptible(&self->port.open_wait);
+ } else {
+ pr_debug("%s(), Doing serial hangup..\n", __func__);
+ if (tty)
+ tty_hangup(tty);
+
+ /* Hangup will remote the tty, so better break out */
+ goto put;
+ }
+ }
+ if (tty && tty_port_cts_enabled(&self->port)) {
+ if (tty->hw_stopped) {
+ if (status & IRCOMM_CTS) {
+ pr_debug("%s(), CTS tx start...\n", __func__);
+ tty->hw_stopped = 0;
+
+ /* Wake up processes blocked on open */
+ wake_up_interruptible(&self->port.open_wait);
+
+ schedule_work(&self->tqueue);
+ goto put;
+ }
+ } else {
+ if (!(status & IRCOMM_CTS)) {
+ pr_debug("%s(), CTS tx stop...\n", __func__);
+ tty->hw_stopped = 1;
+ }
+ }
+ }
+put:
+ tty_kref_put(tty);
+}
+
+/*
+ * Function ircomm_tty_data_indication (instance, sap, skb)
+ *
+ * Handle incoming data, and deliver it to the line discipline
+ *
+ */
+static int ircomm_tty_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ tty = tty_port_tty_get(&self->port);
+ if (!tty) {
+ pr_debug("%s(), no tty!\n", __func__);
+ return 0;
+ }
+
+ /*
+ * If we receive data when hardware is stopped then something is wrong.
+ * We try to poll the peers line settings to check if we are up todate.
+ * Devices like WinCE can do this, and since they don't send any
+ * params, we can just as well declare the hardware for running.
+ */
+ if (tty->hw_stopped && (self->flow == FLOW_START)) {
+ pr_debug("%s(), polling for line settings!\n", __func__);
+ ircomm_param_request(self, IRCOMM_POLL, TRUE);
+
+ /* We can just as well declare the hardware for running */
+ ircomm_tty_send_initial_parameters(self);
+ ircomm_tty_link_established(self);
+ }
+ tty_kref_put(tty);
+
+ /*
+ * Use flip buffer functions since the code may be called from interrupt
+ * context
+ */
+ tty_insert_flip_string(&self->port, skb->data, skb->len);
+ tty_flip_buffer_push(&self->port);
+
+ /* No need to kfree_skb - see ircomm_ttp_data_indication() */
+
+ return 0;
+}
+
+/*
+ * Function ircomm_tty_control_indication (instance, sap, skb)
+ *
+ * Parse all incoming parameters (easy!)
+ *
+ */
+static int ircomm_tty_control_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ int clen;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ clen = skb->data[0];
+
+ irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, clen),
+ &ircomm_param_info);
+
+ /* No need to kfree_skb - see ircomm_control_indication() */
+
+ return 0;
+}
+
+/*
+ * Function ircomm_tty_flow_indication (instance, sap, cmd)
+ *
+ * This function is called by IrTTP when it wants us to slow down the
+ * transmission of data. We just mark the hardware as stopped, and wait
+ * for IrTTP to notify us that things are OK again.
+ */
+static void ircomm_tty_flow_indication(void *instance, void *sap,
+ LOCAL_FLOW cmd)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ tty = tty_port_tty_get(&self->port);
+
+ switch (cmd) {
+ case FLOW_START:
+ pr_debug("%s(), hw start!\n", __func__);
+ if (tty)
+ tty->hw_stopped = 0;
+
+ /* ircomm_tty_do_softint will take care of the rest */
+ schedule_work(&self->tqueue);
+ break;
+ default: /* If we get here, something is very wrong, better stop */
+ case FLOW_STOP:
+ pr_debug("%s(), hw stopped!\n", __func__);
+ if (tty)
+ tty->hw_stopped = 1;
+ break;
+ }
+
+ tty_kref_put(tty);
+ self->flow = cmd;
+}
+
+#ifdef CONFIG_PROC_FS
+static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
+{
+ struct tty_struct *tty;
+ char sep;
+
+ seq_printf(m, "State: %s\n", ircomm_tty_state[self->state]);
+
+ seq_puts(m, "Service type: ");
+ if (self->service_type & IRCOMM_9_WIRE)
+ seq_puts(m, "9_WIRE");
+ else if (self->service_type & IRCOMM_3_WIRE)
+ seq_puts(m, "3_WIRE");
+ else if (self->service_type & IRCOMM_3_WIRE_RAW)
+ seq_puts(m, "3_WIRE_RAW");
+ else
+ seq_puts(m, "No common service type!\n");
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Port name: %s\n", self->settings.port_name);
+
+ seq_printf(m, "DTE status:");
+ sep = ' ';
+ if (self->settings.dte & IRCOMM_RTS) {
+ seq_printf(m, "%cRTS", sep);
+ sep = '|';
+ }
+ if (self->settings.dte & IRCOMM_DTR) {
+ seq_printf(m, "%cDTR", sep);
+ sep = '|';
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, "DCE status:");
+ sep = ' ';
+ if (self->settings.dce & IRCOMM_CTS) {
+ seq_printf(m, "%cCTS", sep);
+ sep = '|';
+ }
+ if (self->settings.dce & IRCOMM_DSR) {
+ seq_printf(m, "%cDSR", sep);
+ sep = '|';
+ }
+ if (self->settings.dce & IRCOMM_CD) {
+ seq_printf(m, "%cCD", sep);
+ sep = '|';
+ }
+ if (self->settings.dce & IRCOMM_RI) {
+ seq_printf(m, "%cRI", sep);
+ sep = '|';
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Configuration: ");
+ if (!self->settings.null_modem)
+ seq_puts(m, "DTE <-> DCE\n");
+ else
+ seq_puts(m, "DTE <-> DTE (null modem emulation)\n");
+
+ seq_printf(m, "Data rate: %d\n", self->settings.data_rate);
+
+ seq_puts(m, "Flow control:");
+ sep = ' ';
+ if (self->settings.flow_control & IRCOMM_XON_XOFF_IN) {
+ seq_printf(m, "%cXON_XOFF_IN", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_XON_XOFF_OUT) {
+ seq_printf(m, "%cXON_XOFF_OUT", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_RTS_CTS_IN) {
+ seq_printf(m, "%cRTS_CTS_IN", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_RTS_CTS_OUT) {
+ seq_printf(m, "%cRTS_CTS_OUT", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_DSR_DTR_IN) {
+ seq_printf(m, "%cDSR_DTR_IN", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_DSR_DTR_OUT) {
+ seq_printf(m, "%cDSR_DTR_OUT", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_ENQ_ACK_IN) {
+ seq_printf(m, "%cENQ_ACK_IN", sep);
+ sep = '|';
+ }
+ if (self->settings.flow_control & IRCOMM_ENQ_ACK_OUT) {
+ seq_printf(m, "%cENQ_ACK_OUT", sep);
+ sep = '|';
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Flags:");
+ sep = ' ';
+ if (tty_port_cts_enabled(&self->port)) {
+ seq_printf(m, "%cASYNC_CTS_FLOW", sep);
+ sep = '|';
+ }
+ if (tty_port_check_carrier(&self->port)) {
+ seq_printf(m, "%cASYNC_CHECK_CD", sep);
+ sep = '|';
+ }
+ if (tty_port_initialized(&self->port)) {
+ seq_printf(m, "%cASYNC_INITIALIZED", sep);
+ sep = '|';
+ }
+ if (self->port.flags & ASYNC_LOW_LATENCY) {
+ seq_printf(m, "%cASYNC_LOW_LATENCY", sep);
+ sep = '|';
+ }
+ if (tty_port_active(&self->port)) {
+ seq_printf(m, "%cASYNC_NORMAL_ACTIVE", sep);
+ sep = '|';
+ }
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
+ seq_printf(m, "Open count: %d\n", self->port.count);
+ seq_printf(m, "Max data size: %d\n", self->max_data_size);
+ seq_printf(m, "Max header size: %d\n", self->max_header_size);
+
+ tty = tty_port_tty_get(&self->port);
+ if (tty) {
+ seq_printf(m, "Hardware: %s\n",
+ tty->hw_stopped ? "Stopped" : "Running");
+ tty_kref_put(tty);
+ }
+}
+
+static int ircomm_tty_proc_show(struct seq_file *m, void *v)
+{
+ struct ircomm_tty_cb *self;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ircomm_tty->hb_spinlock, flags);
+
+ self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty);
+ while (self != NULL) {
+ if (self->magic != IRCOMM_TTY_MAGIC)
+ break;
+
+ ircomm_tty_line_info(self, m);
+ self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty);
+ }
+ spin_unlock_irqrestore(&ircomm_tty->hb_spinlock, flags);
+ return 0;
+}
+
+static int ircomm_tty_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ircomm_tty_proc_show, NULL);
+}
+
+static const struct file_operations ircomm_tty_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = ircomm_tty_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("IrCOMM serial TTY driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IRCOMM_TTY_MAJOR);
+
+module_init(ircomm_tty_init);
+module_exit(ircomm_tty_cleanup);
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
new file mode 100644
index 000000000000..0a411019c098
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
@@ -0,0 +1,987 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty_attach.c
+ * Version:
+ * Description: Code for attaching the serial driver to IrCOMM
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Jun 5 17:42:00 1999
+ * Modified at: Tue Jan 4 14:20:49 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/parameters.h>
+
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_param.h>
+#include <net/irda/ircomm_event.h>
+
+#include <net/irda/ircomm_tty.h>
+#include <net/irda/ircomm_tty_attach.h>
+
+static void ircomm_tty_ias_register(struct ircomm_tty_cb *self);
+static void ircomm_tty_discovery_indication(discinfo_t *discovery,
+ DISCOVERY_MODE mode,
+ void *priv);
+static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv);
+static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
+ int timeout);
+static void ircomm_tty_watchdog_timer_expired(void *data);
+
+static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info);
+static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info);
+static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info);
+static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info);
+static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info);
+static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info);
+
+const char *const ircomm_tty_state[] = {
+ "IRCOMM_TTY_IDLE",
+ "IRCOMM_TTY_SEARCH",
+ "IRCOMM_TTY_QUERY_PARAMETERS",
+ "IRCOMM_TTY_QUERY_LSAP_SEL",
+ "IRCOMM_TTY_SETUP",
+ "IRCOMM_TTY_READY",
+ "*** ERROR *** ",
+};
+
+static const char *const ircomm_tty_event[] __maybe_unused = {
+ "IRCOMM_TTY_ATTACH_CABLE",
+ "IRCOMM_TTY_DETACH_CABLE",
+ "IRCOMM_TTY_DATA_REQUEST",
+ "IRCOMM_TTY_DATA_INDICATION",
+ "IRCOMM_TTY_DISCOVERY_REQUEST",
+ "IRCOMM_TTY_DISCOVERY_INDICATION",
+ "IRCOMM_TTY_CONNECT_CONFIRM",
+ "IRCOMM_TTY_CONNECT_INDICATION",
+ "IRCOMM_TTY_DISCONNECT_REQUEST",
+ "IRCOMM_TTY_DISCONNECT_INDICATION",
+ "IRCOMM_TTY_WD_TIMER_EXPIRED",
+ "IRCOMM_TTY_GOT_PARAMETERS",
+ "IRCOMM_TTY_GOT_LSAPSEL",
+ "*** ERROR ****",
+};
+
+static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb, struct ircomm_tty_info *info) =
+{
+ ircomm_tty_state_idle,
+ ircomm_tty_state_search,
+ ircomm_tty_state_query_parameters,
+ ircomm_tty_state_query_lsap_sel,
+ ircomm_tty_state_setup,
+ ircomm_tty_state_ready,
+};
+
+/*
+ * Function ircomm_tty_attach_cable (driver)
+ *
+ * Try to attach cable (IrCOMM link). This function will only return
+ * when the link has been connected, or if an error condition occurs.
+ * If success, the return value is the resulting service type.
+ */
+int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
+{
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ /* Check if somebody has already connected to us */
+ if (ircomm_is_connected(self->ircomm)) {
+ pr_debug("%s(), already connected!\n", __func__);
+ return 0;
+ }
+
+ /* Make sure nobody tries to write before the link is up */
+ tty = tty_port_tty_get(&self->port);
+ if (tty) {
+ tty->hw_stopped = 1;
+ tty_kref_put(tty);
+ }
+
+ ircomm_tty_ias_register(self);
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_ATTACH_CABLE, NULL, NULL);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_detach_cable (driver)
+ *
+ * Detach cable, or cable has been detached by peer
+ *
+ */
+void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ del_timer(&self->watchdog_timer);
+
+ /* Remove discovery handler */
+ if (self->ckey) {
+ irlmp_unregister_client(self->ckey);
+ self->ckey = NULL;
+ }
+ /* Remove IrCOMM hint bits */
+ if (self->skey) {
+ irlmp_unregister_service(self->skey);
+ self->skey = NULL;
+ }
+
+ if (self->iriap) {
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+ }
+
+ /* Remove LM-IAS object */
+ if (self->obj) {
+ irias_delete_object(self->obj);
+ self->obj = NULL;
+ }
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_DETACH_CABLE, NULL, NULL);
+
+ /* Reset some values */
+ self->daddr = self->saddr = 0;
+ self->dlsap_sel = self->slsap_sel = 0;
+
+ memset(&self->settings, 0, sizeof(struct ircomm_params));
+}
+
+/*
+ * Function ircomm_tty_ias_register (self)
+ *
+ * Register with LM-IAS depending on which service type we are
+ *
+ */
+static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
+{
+ __u8 oct_seq[6];
+ __u16 hints;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ /* Compute hint bits based on service */
+ hints = irlmp_service_to_hint(S_COMM);
+ if (self->service_type & IRCOMM_3_WIRE_RAW)
+ hints |= irlmp_service_to_hint(S_PRINTER);
+
+ /* Advertise IrCOMM hint bit in discovery */
+ if (!self->skey)
+ self->skey = irlmp_register_service(hints);
+ /* Set up a discovery handler */
+ if (!self->ckey)
+ self->ckey = irlmp_register_client(hints,
+ ircomm_tty_discovery_indication,
+ NULL, (void *) self);
+
+ /* If already done, no need to do it again */
+ if (self->obj)
+ return;
+
+ if (self->service_type & IRCOMM_3_WIRE_RAW) {
+ /* Register IrLPT with LM-IAS */
+ self->obj = irias_new_object("IrLPT", IAS_IRLPT_ID);
+ irias_add_integer_attrib(self->obj, "IrDA:IrLMP:LsapSel",
+ self->slsap_sel, IAS_KERNEL_ATTR);
+ } else {
+ /* Register IrCOMM with LM-IAS */
+ self->obj = irias_new_object("IrDA:IrCOMM", IAS_IRCOMM_ID);
+ irias_add_integer_attrib(self->obj, "IrDA:TinyTP:LsapSel",
+ self->slsap_sel, IAS_KERNEL_ATTR);
+
+ /* Code the parameters into the buffer */
+ irda_param_pack(oct_seq, "bbbbbb",
+ IRCOMM_SERVICE_TYPE, 1, self->service_type,
+ IRCOMM_PORT_TYPE, 1, IRCOMM_SERIAL);
+
+ /* Register parameters with LM-IAS */
+ irias_add_octseq_attrib(self->obj, "Parameters", oct_seq, 6,
+ IAS_KERNEL_ATTR);
+ }
+ irias_insert_object(self->obj);
+}
+
+/*
+ * Function ircomm_tty_ias_unregister (self)
+ *
+ * Remove our IAS object and client hook while connected.
+ *
+ */
+static void ircomm_tty_ias_unregister(struct ircomm_tty_cb *self)
+{
+ /* Remove LM-IAS object now so it is not reused.
+ * IrCOMM deals very poorly with multiple incoming connections.
+ * It should looks a lot more like IrNET, and "dup" a server TSAP
+ * to the application TSAP (based on various rules).
+ * This is a cheap workaround allowing multiple clients to
+ * connect to us. It will not always work.
+ * Each IrCOMM socket has an IAS entry. Incoming connection will
+ * pick the first one found. So, when we are fully connected,
+ * we remove our IAS entries so that the next IAS entry is used.
+ * We do that for *both* client and server, because a server
+ * can also create client instances.
+ * Jean II */
+ if (self->obj) {
+ irias_delete_object(self->obj);
+ self->obj = NULL;
+ }
+
+#if 0
+ /* Remove discovery handler.
+ * While we are connected, we no longer need to receive
+ * discovery events. This would be the case if there is
+ * multiple IrLAP interfaces. Jean II */
+ if (self->ckey) {
+ irlmp_unregister_client(self->ckey);
+ self->ckey = NULL;
+ }
+#endif
+}
+
+/*
+ * Function ircomm_send_initial_parameters (self)
+ *
+ * Send initial parameters to the remote IrCOMM device. These parameters
+ * must be sent before any data.
+ */
+int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (self->service_type & IRCOMM_3_WIRE_RAW)
+ return 0;
+
+ /*
+ * Set default values, but only if the application for some reason
+ * haven't set them already
+ */
+ pr_debug("%s(), data-rate = %d\n", __func__ ,
+ self->settings.data_rate);
+ if (!self->settings.data_rate)
+ self->settings.data_rate = 9600;
+ pr_debug("%s(), data-format = %d\n", __func__ ,
+ self->settings.data_format);
+ if (!self->settings.data_format)
+ self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
+
+ pr_debug("%s(), flow-control = %d\n", __func__ ,
+ self->settings.flow_control);
+ /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
+
+ /* Do not set delta values for the initial parameters */
+ self->settings.dte = IRCOMM_DTR | IRCOMM_RTS;
+
+ /* Only send service type parameter when we are the client */
+ if (self->client)
+ ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE);
+ ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
+ ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
+
+ /* For a 3 wire service, we just flush the last parameter and return */
+ if (self->settings.service_type == IRCOMM_3_WIRE) {
+ ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
+ return 0;
+ }
+
+ /* Only 9-wire service types continue here */
+ ircomm_param_request(self, IRCOMM_FLOW_CONTROL, FALSE);
+#if 0
+ ircomm_param_request(self, IRCOMM_XON_XOFF, FALSE);
+ ircomm_param_request(self, IRCOMM_ENQ_ACK, FALSE);
+#endif
+ /* Notify peer that we are ready to receive data */
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+
+ return 0;
+}
+
+/*
+ * Function ircomm_tty_discovery_indication (discovery)
+ *
+ * Remote device is discovered, try query the remote IAS to see which
+ * device it is, and which services it has.
+ *
+ */
+static void ircomm_tty_discovery_indication(discinfo_t *discovery,
+ DISCOVERY_MODE mode,
+ void *priv)
+{
+ struct ircomm_tty_cb *self;
+ struct ircomm_tty_info info;
+
+ /* Important note :
+ * We need to drop all passive discoveries.
+ * The LSAP management of IrComm is deficient and doesn't deal
+ * with the case of two instance connecting to each other
+ * simultaneously (it will deadlock in LMP).
+ * The proper fix would be to use the same technique as in IrNET,
+ * to have one server socket and separate instances for the
+ * connecting/connected socket.
+ * The workaround is to drop passive discovery, which drastically
+ * reduce the probability of this happening.
+ * Jean II */
+ if(mode == DISCOVERY_PASSIVE)
+ return;
+
+ info.daddr = discovery->daddr;
+ info.saddr = discovery->saddr;
+
+ self = priv;
+ ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION,
+ NULL, &info);
+}
+
+/*
+ * Function ircomm_tty_disconnect_indication (instance, sap, reason, skb)
+ *
+ * Link disconnected
+ *
+ */
+void ircomm_tty_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ tty = tty_port_tty_get(&self->port);
+ if (!tty)
+ return;
+
+ /* This will stop control data transfers */
+ self->flow = FLOW_STOP;
+
+ /* Stop data transfers */
+ tty->hw_stopped = 1;
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL,
+ NULL);
+ tty_kref_put(tty);
+}
+
+/*
+ * Function ircomm_tty_getvalue_confirm (result, obj_id, value, priv)
+ *
+ * Got result from the IAS query we make
+ *
+ */
+static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value,
+ void *priv)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ /* We probably don't need to make any more queries */
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ pr_debug("%s(), got NULL value!\n", __func__);
+ return;
+ }
+
+ switch (value->type) {
+ case IAS_OCT_SEQ:
+ pr_debug("%s(), got octet sequence\n", __func__);
+
+ irda_param_extract_all(self, value->t.oct_seq, value->len,
+ &ircomm_param_info);
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_GOT_PARAMETERS, NULL,
+ NULL);
+ break;
+ case IAS_INTEGER:
+ /* Got LSAP selector */
+ pr_debug("%s(), got lsapsel = %d\n", __func__ ,
+ value->t.integer);
+
+ if (value->t.integer == -1) {
+ pr_debug("%s(), invalid value!\n", __func__);
+ } else
+ self->dlsap_sel = value->t.integer;
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL);
+ break;
+ case IAS_MISSING:
+ pr_debug("%s(), got IAS_MISSING\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), got unknown type!\n", __func__);
+ break;
+ }
+ irias_delete_value(value);
+}
+
+/*
+ * Function ircomm_tty_connect_confirm (instance, sap, qos, max_sdu_size, skb)
+ *
+ * Connection confirmed
+ *
+ */
+void ircomm_tty_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_data_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ self->client = TRUE;
+ self->max_data_size = max_data_size;
+ self->max_header_size = max_header_size;
+ self->flow = FLOW_START;
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_CONFIRM, NULL, NULL);
+
+ /* No need to kfree_skb - see ircomm_ttp_connect_confirm() */
+}
+
+/*
+ * Function ircomm_tty_connect_indication (instance, sap, qos, max_sdu_size,
+ * skb)
+ *
+ * we are discovered and being requested to connect by remote device !
+ *
+ */
+void ircomm_tty_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_data_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
+ int clen;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ self->client = FALSE;
+ self->max_data_size = max_data_size;
+ self->max_header_size = max_header_size;
+ self->flow = FLOW_START;
+
+ clen = skb->data[0];
+ if (clen)
+ irda_param_extract_all(self, skb->data+1,
+ IRDA_MIN(skb->len, clen),
+ &ircomm_param_info);
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL);
+
+ /* No need to kfree_skb - see ircomm_ttp_connect_indication() */
+}
+
+/*
+ * Function ircomm_tty_link_established (self)
+ *
+ * Called when the IrCOMM link is established
+ *
+ */
+void ircomm_tty_link_established(struct ircomm_tty_cb *self)
+{
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ tty = tty_port_tty_get(&self->port);
+ if (!tty)
+ return;
+
+ del_timer(&self->watchdog_timer);
+
+ /*
+ * IrCOMM link is now up, and if we are not using hardware
+ * flow-control, then declare the hardware as running. Otherwise we
+ * will have to wait for the peer device (DCE) to raise the CTS
+ * line.
+ */
+ if (tty_port_cts_enabled(&self->port) &&
+ ((self->settings.dce & IRCOMM_CTS) == 0)) {
+ pr_debug("%s(), waiting for CTS ...\n", __func__);
+ goto put;
+ } else {
+ pr_debug("%s(), starting hardware!\n", __func__);
+
+ tty->hw_stopped = 0;
+
+ /* Wake up processes blocked on open */
+ wake_up_interruptible(&self->port.open_wait);
+ }
+
+ schedule_work(&self->tqueue);
+put:
+ tty_kref_put(tty);
+}
+
+/*
+ * Function ircomm_tty_start_watchdog_timer (self, timeout)
+ *
+ * Start the watchdog timer. This timer is used to make sure that any
+ * connection attempt is successful, and if not, we will retry after
+ * the timeout
+ */
+static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
+ int timeout)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
+ ircomm_tty_watchdog_timer_expired);
+}
+
+/*
+ * Function ircomm_tty_watchdog_timer_expired (data)
+ *
+ * Called when the connect procedure have taken to much time.
+ *
+ */
+static void ircomm_tty_watchdog_timer_expired(void *data)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ ircomm_tty_do_event(self, IRCOMM_TTY_WD_TIMER_EXPIRED, NULL, NULL);
+}
+
+
+/*
+ * Function ircomm_tty_do_event (self, event, skb)
+ *
+ * Process event
+ *
+ */
+int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb, struct ircomm_tty_info *info)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
+
+ return (*state[self->state])(self, event, skb, info);
+}
+
+/*
+ * Function ircomm_tty_next_state (self, state)
+ *
+ * Switch state
+ *
+ */
+static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_STATE state)
+{
+ /*
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ pr_debug("%s: next state=%s, service type=%d\n", __func__ ,
+ ircomm_tty_state[self->state], self->service_type);
+ */
+ self->state = state;
+}
+
+/*
+ * Function ircomm_tty_state_idle (self, event, skb, info)
+ *
+ * Just hanging around
+ *
+ */
+static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ switch (event) {
+ case IRCOMM_TTY_ATTACH_CABLE:
+ /* Try to discover any remote devices */
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
+
+ irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
+ break;
+ case IRCOMM_TTY_DISCOVERY_INDICATION:
+ self->daddr = info->daddr;
+ self->saddr = info->saddr;
+
+ if (self->iriap) {
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ ircomm_tty_getvalue_confirm);
+
+ iriap_getvaluebyclass_request(self->iriap,
+ self->saddr, self->daddr,
+ "IrDA:IrCOMM", "Parameters");
+
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
+ break;
+ case IRCOMM_TTY_CONNECT_INDICATION:
+ del_timer(&self->watchdog_timer);
+
+ /* Accept connection */
+ ircomm_connect_response(self->ircomm, NULL);
+ ircomm_tty_next_state(self, IRCOMM_TTY_READY);
+ break;
+ case IRCOMM_TTY_WD_TIMER_EXPIRED:
+ /* Just stay idle */
+ break;
+ case IRCOMM_TTY_DETACH_CABLE:
+ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_tty_state_search (self, event, skb, info)
+ *
+ * Trying to discover an IrCOMM device
+ *
+ */
+static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
+
+ switch (event) {
+ case IRCOMM_TTY_DISCOVERY_INDICATION:
+ self->daddr = info->daddr;
+ self->saddr = info->saddr;
+
+ if (self->iriap) {
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ ircomm_tty_getvalue_confirm);
+
+ if (self->service_type == IRCOMM_3_WIRE_RAW) {
+ iriap_getvaluebyclass_request(self->iriap, self->saddr,
+ self->daddr, "IrLPT",
+ "IrDA:IrLMP:LsapSel");
+ ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
+ } else {
+ iriap_getvaluebyclass_request(self->iriap, self->saddr,
+ self->daddr,
+ "IrDA:IrCOMM",
+ "Parameters");
+
+ ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
+ }
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ break;
+ case IRCOMM_TTY_CONNECT_INDICATION:
+ del_timer(&self->watchdog_timer);
+ ircomm_tty_ias_unregister(self);
+
+ /* Accept connection */
+ ircomm_connect_response(self->ircomm, NULL);
+ ircomm_tty_next_state(self, IRCOMM_TTY_READY);
+ break;
+ case IRCOMM_TTY_WD_TIMER_EXPIRED:
+#if 1
+ /* Give up */
+#else
+ /* Try to discover any remote devices */
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
+#endif
+ break;
+ case IRCOMM_TTY_DETACH_CABLE:
+ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_tty_state_query (self, event, skb, info)
+ *
+ * Querying the remote LM-IAS for IrCOMM parameters
+ *
+ */
+static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
+
+ switch (event) {
+ case IRCOMM_TTY_GOT_PARAMETERS:
+ if (self->iriap) {
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ ircomm_tty_getvalue_confirm);
+
+ iriap_getvaluebyclass_request(self->iriap, self->saddr,
+ self->daddr, "IrDA:IrCOMM",
+ "IrDA:TinyTP:LsapSel");
+
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
+ break;
+ case IRCOMM_TTY_WD_TIMER_EXPIRED:
+ /* Go back to search mode */
+ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ break;
+ case IRCOMM_TTY_CONNECT_INDICATION:
+ del_timer(&self->watchdog_timer);
+ ircomm_tty_ias_unregister(self);
+
+ /* Accept connection */
+ ircomm_connect_response(self->ircomm, NULL);
+ ircomm_tty_next_state(self, IRCOMM_TTY_READY);
+ break;
+ case IRCOMM_TTY_DETACH_CABLE:
+ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_tty_state_query_lsap_sel (self, event, skb, info)
+ *
+ * Query remote LM-IAS for the LSAP selector which we can connect to
+ *
+ */
+static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
+
+ switch (event) {
+ case IRCOMM_TTY_GOT_LSAPSEL:
+ /* Connect to remote device */
+ ret = ircomm_connect_request(self->ircomm, self->dlsap_sel,
+ self->saddr, self->daddr,
+ NULL, self->service_type);
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ ircomm_tty_next_state(self, IRCOMM_TTY_SETUP);
+ break;
+ case IRCOMM_TTY_WD_TIMER_EXPIRED:
+ /* Go back to search mode */
+ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ break;
+ case IRCOMM_TTY_CONNECT_INDICATION:
+ del_timer(&self->watchdog_timer);
+ ircomm_tty_ias_unregister(self);
+
+ /* Accept connection */
+ ircomm_connect_response(self->ircomm, NULL);
+ ircomm_tty_next_state(self, IRCOMM_TTY_READY);
+ break;
+ case IRCOMM_TTY_DETACH_CABLE:
+ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_tty_state_setup (self, event, skb, info)
+ *
+ * Trying to connect
+ *
+ */
+static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
+
+ switch (event) {
+ case IRCOMM_TTY_CONNECT_CONFIRM:
+ del_timer(&self->watchdog_timer);
+ ircomm_tty_ias_unregister(self);
+
+ /*
+ * Send initial parameters. This will also send out queued
+ * parameters waiting for the connection to come up
+ */
+ ircomm_tty_send_initial_parameters(self);
+ ircomm_tty_link_established(self);
+ ircomm_tty_next_state(self, IRCOMM_TTY_READY);
+ break;
+ case IRCOMM_TTY_CONNECT_INDICATION:
+ del_timer(&self->watchdog_timer);
+ ircomm_tty_ias_unregister(self);
+
+ /* Accept connection */
+ ircomm_connect_response(self->ircomm, NULL);
+ ircomm_tty_next_state(self, IRCOMM_TTY_READY);
+ break;
+ case IRCOMM_TTY_WD_TIMER_EXPIRED:
+ /* Go back to search mode */
+ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+ break;
+ case IRCOMM_TTY_DETACH_CABLE:
+ /* ircomm_disconnect_request(self->ircomm, NULL); */
+ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Function ircomm_tty_state_ready (self, event, skb, info)
+ *
+ * IrCOMM is now connected
+ *
+ */
+static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
+ IRCOMM_TTY_EVENT event,
+ struct sk_buff *skb,
+ struct ircomm_tty_info *info)
+{
+ int ret = 0;
+
+ switch (event) {
+ case IRCOMM_TTY_DATA_REQUEST:
+ ret = ircomm_data_request(self->ircomm, skb);
+ break;
+ case IRCOMM_TTY_DETACH_CABLE:
+ ircomm_disconnect_request(self->ircomm, NULL);
+ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
+ break;
+ case IRCOMM_TTY_DISCONNECT_INDICATION:
+ ircomm_tty_ias_register(self);
+ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
+ ircomm_tty_start_watchdog_timer(self, 3*HZ);
+
+ if (tty_port_check_carrier(&self->port)) {
+ /* Drop carrier */
+ self->settings.dce = IRCOMM_DELTA_CD;
+ ircomm_tty_check_modem_status(self);
+ } else {
+ pr_debug("%s(), hanging up!\n", __func__);
+ tty_port_tty_hangup(&self->port, false);
+ }
+ break;
+ default:
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c b/drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c
new file mode 100644
index 000000000000..171c3dee760e
--- /dev/null
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c
@@ -0,0 +1,291 @@
+/*********************************************************************
+ *
+ * Filename: ircomm_tty_ioctl.c
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Jun 10 14:39:09 1999
+ * Modified at: Wed Jan 5 14:45:43 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+
+#include <net/irda/ircomm_core.h>
+#include <net/irda/ircomm_param.h>
+#include <net/irda/ircomm_tty_attach.h>
+#include <net/irda/ircomm_tty.h>
+
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+/*
+ * Function ircomm_tty_change_speed (driver)
+ *
+ * Change speed of the driver. If the remote device is a DCE, then this
+ * should make it change the speed of its serial port
+ */
+static void ircomm_tty_change_speed(struct ircomm_tty_cb *self,
+ struct tty_struct *tty)
+{
+ unsigned int cflag, cval;
+ int baud;
+
+ if (!self->ircomm)
+ return;
+
+ cflag = tty->termios.c_cflag;
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5: cval = IRCOMM_WSIZE_5; break;
+ case CS6: cval = IRCOMM_WSIZE_6; break;
+ case CS7: cval = IRCOMM_WSIZE_7; break;
+ case CS8: cval = IRCOMM_WSIZE_8; break;
+ default: cval = IRCOMM_WSIZE_5; break;
+ }
+ if (cflag & CSTOPB)
+ cval |= IRCOMM_2_STOP_BIT;
+
+ if (cflag & PARENB)
+ cval |= IRCOMM_PARITY_ENABLE;
+ if (!(cflag & PARODD))
+ cval |= IRCOMM_PARITY_EVEN;
+
+ /* Determine divisor based on baud rate */
+ baud = tty_get_baud_rate(tty);
+ if (!baud)
+ baud = 9600; /* B0 transition handled in rs_set_termios */
+
+ self->settings.data_rate = baud;
+ ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
+
+ /* CTS flow control flag and modem status interrupts */
+ tty_port_set_cts_flow(&self->port, cflag & CRTSCTS);
+ if (cflag & CRTSCTS) {
+ self->settings.flow_control |= IRCOMM_RTS_CTS_IN;
+ /* This got me. Bummer. Jean II */
+ if (self->service_type == IRCOMM_3_WIRE_RAW)
+ net_warn_ratelimited("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n",
+ __func__);
+ } else {
+ self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
+ }
+ tty_port_set_check_carrier(&self->port, ~cflag & CLOCAL);
+
+ self->settings.data_format = cval;
+
+ ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
+ ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
+}
+
+/*
+ * Function ircomm_tty_set_termios (tty, old_termios)
+ *
+ * This routine allows the tty driver to be notified when device's
+ * termios settings have changed. Note that a well-designed tty driver
+ * should be prepared to accept the case where old == NULL, and try to
+ * do something rational.
+ */
+void ircomm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ unsigned int cflag = tty->termios.c_cflag;
+
+ if ((cflag == old_termios->c_cflag) &&
+ (RELEVANT_IFLAG(tty->termios.c_iflag) ==
+ RELEVANT_IFLAG(old_termios->c_iflag)))
+ {
+ return;
+ }
+
+ ircomm_tty_change_speed(self, tty);
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
+ self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS);
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+ self->settings.dte |= IRCOMM_DTR;
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
+ self->settings.dte |= IRCOMM_RTS;
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty))
+ {
+ tty->hw_stopped = 0;
+ ircomm_tty_start(tty);
+ }
+}
+
+/*
+ * Function ircomm_tty_tiocmget (tty)
+ *
+ *
+ *
+ */
+int ircomm_tty_tiocmget(struct tty_struct *tty)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ unsigned int result;
+
+ if (tty_io_error(tty))
+ return -EIO;
+
+ result = ((self->settings.dte & IRCOMM_RTS) ? TIOCM_RTS : 0)
+ | ((self->settings.dte & IRCOMM_DTR) ? TIOCM_DTR : 0)
+ | ((self->settings.dce & IRCOMM_CD) ? TIOCM_CAR : 0)
+ | ((self->settings.dce & IRCOMM_RI) ? TIOCM_RNG : 0)
+ | ((self->settings.dce & IRCOMM_DSR) ? TIOCM_DSR : 0)
+ | ((self->settings.dce & IRCOMM_CTS) ? TIOCM_CTS : 0);
+ return result;
+}
+
+/*
+ * Function ircomm_tty_tiocmset (tty, set, clear)
+ *
+ *
+ *
+ */
+int ircomm_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+
+ if (tty_io_error(tty))
+ return -EIO;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
+ if (set & TIOCM_RTS)
+ self->settings.dte |= IRCOMM_RTS;
+ if (set & TIOCM_DTR)
+ self->settings.dte |= IRCOMM_DTR;
+
+ if (clear & TIOCM_RTS)
+ self->settings.dte &= ~IRCOMM_RTS;
+ if (clear & TIOCM_DTR)
+ self->settings.dte &= ~IRCOMM_DTR;
+
+ if ((set|clear) & TIOCM_RTS)
+ self->settings.dte |= IRCOMM_DELTA_RTS;
+ if ((set|clear) & TIOCM_DTR)
+ self->settings.dte |= IRCOMM_DELTA_DTR;
+
+ ircomm_param_request(self, IRCOMM_DTE, TRUE);
+
+ return 0;
+}
+
+/*
+ * Function get_serial_info (driver, retinfo)
+ *
+ *
+ *
+ */
+static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
+ struct serial_struct __user *retinfo)
+{
+ struct serial_struct info;
+
+ memset(&info, 0, sizeof(info));
+ info.line = self->line;
+ info.flags = self->port.flags;
+ info.baud_base = self->settings.data_rate;
+ info.close_delay = self->port.close_delay;
+ info.closing_wait = self->port.closing_wait;
+
+ /* For compatibility */
+ info.type = PORT_16550A;
+
+ if (copy_to_user(retinfo, &info, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Function set_serial_info (driver, new_info)
+ *
+ *
+ *
+ */
+static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
+ struct serial_struct __user *new_info)
+{
+ return 0;
+}
+
+/*
+ * Function ircomm_tty_ioctl (tty, cmd, arg)
+ *
+ *
+ *
+ */
+int ircomm_tty_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
+ int ret = 0;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
+ (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
+ if (tty_io_error(tty))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ ret = ircomm_tty_get_serial_info(self, (struct serial_struct __user *) arg);
+ break;
+ case TIOCSSERIAL:
+ ret = ircomm_tty_set_serial_info(self, (struct serial_struct __user *) arg);
+ break;
+ case TIOCMIWAIT:
+ pr_debug("(), TIOCMIWAIT, not impl!\n");
+ break;
+
+ case TIOCGICOUNT:
+ pr_debug("%s(), TIOCGICOUNT not impl!\n", __func__);
+ return 0;
+ default:
+ ret = -ENOIOCTLCMD; /* ioctls which we must ignore */
+ }
+ return ret;
+}
+
+
+
diff --git a/drivers/staging/irda/net/irda_device.c b/drivers/staging/irda/net/irda_device.c
new file mode 100644
index 000000000000..890b90d055d5
--- /dev/null
+++ b/drivers/staging/irda/net/irda_device.c
@@ -0,0 +1,316 @@
+/*********************************************************************
+ *
+ * Filename: irda_device.c
+ * Version: 0.9
+ * Description: Utility functions used by the device drivers
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Oct 9 09:22:27 1999
+ * Modified at: Sun Jan 23 17:41:24 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/capability.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/ioctls.h>
+#include <linux/uaccess.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <net/irda/irda_device.h>
+#include <net/irda/irlap.h>
+#include <net/irda/timer.h>
+#include <net/irda/wrapper.h>
+
+static void __irda_task_delete(struct irda_task *task);
+
+static hashbin_t *dongles = NULL;
+static hashbin_t *tasks = NULL;
+
+static void irda_task_timer_expired(void *data);
+
+int __init irda_device_init( void)
+{
+ dongles = hashbin_new(HB_NOLOCK);
+ if (dongles == NULL) {
+ net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&dongles->hb_spinlock);
+
+ tasks = hashbin_new(HB_LOCK);
+ if (tasks == NULL) {
+ net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n");
+ hashbin_delete(dongles, NULL);
+ return -ENOMEM;
+ }
+
+ /* We no longer initialise the driver ourselves here, we let
+ * the system do it for us... - Jean II */
+
+ return 0;
+}
+
+static void leftover_dongle(void *arg)
+{
+ struct dongle_reg *reg = arg;
+ net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n",
+ reg->type);
+}
+
+void irda_device_cleanup(void)
+{
+ hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete);
+
+ hashbin_delete(dongles, leftover_dongle);
+}
+
+/*
+ * Function irda_device_set_media_busy (self, status)
+ *
+ * Called when we have detected that another station is transmitting
+ * in contention mode.
+ */
+void irda_device_set_media_busy(struct net_device *dev, int status)
+{
+ struct irlap_cb *self;
+
+ pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE");
+
+ self = (struct irlap_cb *) dev->atalk_ptr;
+
+ /* Some drivers may enable the receive interrupt before calling
+ * irlap_open(), or they may disable the receive interrupt
+ * after calling irlap_close().
+ * The IrDA stack is protected from this in irlap_driver_rcv().
+ * However, the driver calls directly the wrapper, that calls
+ * us directly. Make sure we protect ourselves.
+ * Jean II */
+ if (!self || self->magic != LAP_MAGIC)
+ return;
+
+ if (status) {
+ self->media_busy = TRUE;
+ if (status == SMALL)
+ irlap_start_mbusy_timer(self, SMALLBUSY_TIMEOUT);
+ else
+ irlap_start_mbusy_timer(self, MEDIABUSY_TIMEOUT);
+ pr_debug("Media busy!\n");
+ } else {
+ self->media_busy = FALSE;
+ irlap_stop_mbusy_timer(self);
+ }
+}
+EXPORT_SYMBOL(irda_device_set_media_busy);
+
+
+/*
+ * Function irda_device_is_receiving (dev)
+ *
+ * Check if the device driver is currently receiving data
+ *
+ */
+int irda_device_is_receiving(struct net_device *dev)
+{
+ struct if_irda_req req;
+ int ret;
+
+ if (!dev->netdev_ops->ndo_do_ioctl) {
+ net_err_ratelimited("%s: do_ioctl not impl. by device driver\n",
+ __func__);
+ return -1;
+ }
+
+ ret = (dev->netdev_ops->ndo_do_ioctl)(dev, (struct ifreq *) &req,
+ SIOCGRECEIVING);
+ if (ret < 0)
+ return ret;
+
+ return req.ifr_receiving;
+}
+
+static void __irda_task_delete(struct irda_task *task)
+{
+ del_timer(&task->timer);
+
+ kfree(task);
+}
+
+static void irda_task_delete(struct irda_task *task)
+{
+ /* Unregister task */
+ hashbin_remove(tasks, (long) task, NULL);
+
+ __irda_task_delete(task);
+}
+
+/*
+ * Function irda_task_kick (task)
+ *
+ * Tries to execute a task possible multiple times until the task is either
+ * finished, or askes for a timeout. When a task is finished, we do post
+ * processing, and notify the parent task, that is waiting for this task
+ * to complete.
+ */
+static int irda_task_kick(struct irda_task *task)
+{
+ int finished = TRUE;
+ int count = 0;
+ int timeout;
+
+ IRDA_ASSERT(task != NULL, return -1;);
+ IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;);
+
+ /* Execute task until it's finished, or askes for a timeout */
+ do {
+ timeout = task->function(task);
+ if (count++ > 100) {
+ net_err_ratelimited("%s: error in task handler!\n",
+ __func__);
+ irda_task_delete(task);
+ return TRUE;
+ }
+ } while ((timeout == 0) && (task->state != IRDA_TASK_DONE));
+
+ if (timeout < 0) {
+ net_err_ratelimited("%s: Error executing task!\n", __func__);
+ irda_task_delete(task);
+ return TRUE;
+ }
+
+ /* Check if we are finished */
+ if (task->state == IRDA_TASK_DONE) {
+ del_timer(&task->timer);
+
+ /* Do post processing */
+ if (task->finished)
+ task->finished(task);
+
+ /* Notify parent */
+ if (task->parent) {
+ /* Check if parent is waiting for us to complete */
+ if (task->parent->state == IRDA_TASK_CHILD_WAIT) {
+ task->parent->state = IRDA_TASK_CHILD_DONE;
+
+ /* Stop timer now that we are here */
+ del_timer(&task->parent->timer);
+
+ /* Kick parent task */
+ irda_task_kick(task->parent);
+ }
+ }
+ irda_task_delete(task);
+ } else if (timeout > 0) {
+ irda_start_timer(&task->timer, timeout, (void *) task,
+ irda_task_timer_expired);
+ finished = FALSE;
+ } else {
+ pr_debug("%s(), not finished, and no timeout!\n",
+ __func__);
+ finished = FALSE;
+ }
+
+ return finished;
+}
+
+/*
+ * Function irda_task_timer_expired (data)
+ *
+ * Task time has expired. We now try to execute task (again), and restart
+ * the timer if the task has not finished yet
+ */
+static void irda_task_timer_expired(void *data)
+{
+ struct irda_task *task;
+
+ task = data;
+
+ irda_task_kick(task);
+}
+
+/*
+ * Function irda_device_setup (dev)
+ *
+ * This function should be used by low level device drivers in a similar way
+ * as ether_setup() is used by normal network device drivers
+ */
+static void irda_device_setup(struct net_device *dev)
+{
+ dev->hard_header_len = 0;
+ dev->addr_len = LAP_ALEN;
+
+ dev->type = ARPHRD_IRDA;
+ dev->tx_queue_len = 8; /* Window size + 1 s-frame */
+
+ memset(dev->broadcast, 0xff, LAP_ALEN);
+
+ dev->mtu = 2048;
+ dev->flags = IFF_NOARP;
+}
+
+/*
+ * Funciton alloc_irdadev
+ * Allocates and sets up an IRDA device in a manner similar to
+ * alloc_etherdev.
+ */
+struct net_device *alloc_irdadev(int sizeof_priv)
+{
+ return alloc_netdev(sizeof_priv, "irda%d", NET_NAME_UNKNOWN,
+ irda_device_setup);
+}
+EXPORT_SYMBOL(alloc_irdadev);
+
+#ifdef CONFIG_ISA_DMA_API
+/*
+ * Function setup_dma (idev, buffer, count, mode)
+ *
+ * Setup the DMA channel. Commonly used by LPC FIR drivers
+ *
+ */
+void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode)
+{
+ unsigned long flags;
+
+ flags = claim_dma_lock();
+
+ disable_dma(channel);
+ clear_dma_ff(channel);
+ set_dma_mode(channel, mode);
+ set_dma_addr(channel, buffer);
+ set_dma_count(channel, count);
+ enable_dma(channel);
+
+ release_dma_lock(flags);
+}
+EXPORT_SYMBOL(irda_setup_dma);
+#endif
diff --git a/drivers/staging/irda/net/iriap.c b/drivers/staging/irda/net/iriap.c
new file mode 100644
index 000000000000..1138eaf5c682
--- /dev/null
+++ b/drivers/staging/irda/net/iriap.c
@@ -0,0 +1,1085 @@
+/*********************************************************************
+ *
+ * Filename: iriap.c
+ * Version: 0.8
+ * Description: Information Access Protocol (IAP)
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Aug 21 00:02:07 1997
+ * Modified at: Sat Dec 25 16:42:42 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/iriap_event.h>
+#include <net/irda/iriap.h>
+
+/* FIXME: This one should go in irlmp.c */
+static const char *const ias_charset_types[] __maybe_unused = {
+ "CS_ASCII",
+ "CS_ISO_8859_1",
+ "CS_ISO_8859_2",
+ "CS_ISO_8859_3",
+ "CS_ISO_8859_4",
+ "CS_ISO_8859_5",
+ "CS_ISO_8859_6",
+ "CS_ISO_8859_7",
+ "CS_ISO_8859_8",
+ "CS_ISO_8859_9",
+ "CS_UNICODE"
+};
+
+static hashbin_t *iriap = NULL;
+static void *service_handle;
+
+static void __iriap_close(struct iriap_cb *self);
+static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode);
+static void iriap_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *skb);
+static void iriap_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+static void iriap_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb);
+static int iriap_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+
+static void iriap_watchdog_timer_expired(void *data);
+
+static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
+ int timeout)
+{
+ irda_start_timer(&self->watchdog_timer, timeout, self,
+ iriap_watchdog_timer_expired);
+}
+
+static struct lock_class_key irias_objects_key;
+
+/*
+ * Function iriap_init (void)
+ *
+ * Initializes the IrIAP layer, called by the module initialization code
+ * in irmod.c
+ */
+int __init iriap_init(void)
+{
+ struct ias_object *obj;
+ struct iriap_cb *server;
+ __u8 oct_seq[6];
+ __u16 hints;
+
+ /* Allocate master array */
+ iriap = hashbin_new(HB_LOCK);
+ if (!iriap)
+ return -ENOMEM;
+
+ /* Object repository - defined in irias_object.c */
+ irias_objects = hashbin_new(HB_LOCK);
+ if (!irias_objects) {
+ net_warn_ratelimited("%s: Can't allocate irias_objects hashbin!\n",
+ __func__);
+ hashbin_delete(iriap, NULL);
+ return -ENOMEM;
+ }
+
+ lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
+ "irias_objects");
+
+ /*
+ * Register some default services for IrLMP
+ */
+ hints = irlmp_service_to_hint(S_COMPUTER);
+ service_handle = irlmp_register_service(hints);
+
+ /* Register the Device object with LM-IAS */
+ obj = irias_new_object("Device", IAS_DEVICE_ID);
+ irias_add_string_attrib(obj, "DeviceName", "Linux", IAS_KERNEL_ATTR);
+
+ oct_seq[0] = 0x01; /* Version 1 */
+ oct_seq[1] = 0x00; /* IAS support bits */
+ oct_seq[2] = 0x00; /* LM-MUX support bits */
+#ifdef CONFIG_IRDA_ULTRA
+ oct_seq[2] |= 0x04; /* Connectionless Data support */
+#endif
+ irias_add_octseq_attrib(obj, "IrLMPSupport", oct_seq, 3,
+ IAS_KERNEL_ATTR);
+ irias_insert_object(obj);
+
+ /*
+ * Register server support with IrLMP so we can accept incoming
+ * connections
+ */
+ server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
+ if (!server) {
+ pr_debug("%s(), unable to open server\n", __func__);
+ return -1;
+ }
+ iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);
+
+ return 0;
+}
+
+/*
+ * Function iriap_cleanup (void)
+ *
+ * Initializes the IrIAP layer, called by the module cleanup code in
+ * irmod.c
+ */
+void iriap_cleanup(void)
+{
+ irlmp_unregister_service(service_handle);
+
+ hashbin_delete(iriap, (FREE_FUNC) __iriap_close);
+ hashbin_delete(irias_objects, (FREE_FUNC) __irias_delete_object);
+}
+
+/*
+ * Function iriap_open (void)
+ *
+ * Opens an instance of the IrIAP layer, and registers with IrLMP
+ */
+struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
+ CONFIRM_CALLBACK callback)
+{
+ struct iriap_cb *self;
+
+ self = kzalloc(sizeof(*self), GFP_ATOMIC);
+ if (!self)
+ return NULL;
+
+ /*
+ * Initialize instance
+ */
+
+ self->magic = IAS_MAGIC;
+ self->mode = mode;
+ if (mode == IAS_CLIENT) {
+ if (iriap_register_lsap(self, slsap_sel, mode)) {
+ kfree(self);
+ return NULL;
+ }
+ }
+
+ self->confirm = callback;
+ self->priv = priv;
+
+ /* iriap_getvaluebyclass_request() will construct packets before
+ * we connect, so this must have a sane value... Jean II */
+ self->max_header_size = LMP_MAX_HEADER;
+
+ init_timer(&self->watchdog_timer);
+
+ hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL);
+
+ /* Initialize state machines */
+ iriap_next_client_state(self, S_DISCONNECT);
+ iriap_next_call_state(self, S_MAKE_CALL);
+ iriap_next_server_state(self, R_DISCONNECT);
+ iriap_next_r_connect_state(self, R_WAITING);
+
+ return self;
+}
+EXPORT_SYMBOL(iriap_open);
+
+/*
+ * Function __iriap_close (self)
+ *
+ * Removes (deallocates) the IrIAP instance
+ *
+ */
+static void __iriap_close(struct iriap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ del_timer(&self->watchdog_timer);
+
+ if (self->request_skb)
+ dev_kfree_skb(self->request_skb);
+
+ self->magic = 0;
+
+ kfree(self);
+}
+
+/*
+ * Function iriap_close (void)
+ *
+ * Closes IrIAP and deregisters with IrLMP
+ */
+void iriap_close(struct iriap_cb *self)
+{
+ struct iriap_cb *entry;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ if (self->lsap) {
+ irlmp_close_lsap(self->lsap);
+ self->lsap = NULL;
+ }
+
+ entry = (struct iriap_cb *) hashbin_remove(iriap, (long) self, NULL);
+ IRDA_ASSERT(entry == self, return;);
+
+ __iriap_close(self);
+}
+EXPORT_SYMBOL(iriap_close);
+
+static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
+{
+ notify_t notify;
+
+ irda_notify_init(&notify);
+ notify.connect_confirm = iriap_connect_confirm;
+ notify.connect_indication = iriap_connect_indication;
+ notify.disconnect_indication = iriap_disconnect_indication;
+ notify.data_indication = iriap_data_indication;
+ notify.instance = self;
+ if (mode == IAS_CLIENT)
+ strcpy(notify.name, "IrIAS cli");
+ else
+ strcpy(notify.name, "IrIAS srv");
+
+ self->lsap = irlmp_open_lsap(slsap_sel, &notify, 0);
+ if (self->lsap == NULL) {
+ net_err_ratelimited("%s: Unable to allocated LSAP!\n",
+ __func__);
+ return -1;
+ }
+ self->slsap_sel = self->lsap->slsap_sel;
+
+ return 0;
+}
+
+/*
+ * Function iriap_disconnect_indication (handle, reason)
+ *
+ * Got disconnect, so clean up everything associated with this connection
+ *
+ */
+static void iriap_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb)
+{
+ struct iriap_cb *self;
+
+ pr_debug("%s(), reason=%s [%d]\n", __func__,
+ irlmp_reason_str(reason), reason);
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ IRDA_ASSERT(iriap != NULL, return;);
+
+ del_timer(&self->watchdog_timer);
+
+ /* Not needed */
+ if (skb)
+ dev_kfree_skb(skb);
+
+ if (self->mode == IAS_CLIENT) {
+ pr_debug("%s(), disconnect as client\n", __func__);
+
+
+ iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION,
+ NULL);
+ /*
+ * Inform service user that the request failed by sending
+ * it a NULL value. Warning, the client might close us, so
+ * remember no to use self anymore after calling confirm
+ */
+ if (self->confirm)
+ self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
+ } else {
+ pr_debug("%s(), disconnect as server\n", __func__);
+ iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION,
+ NULL);
+ iriap_close(self);
+ }
+}
+
+/*
+ * Function iriap_disconnect_request (handle)
+ */
+static void iriap_disconnect_request(struct iriap_cb *self)
+{
+ struct sk_buff *tx_skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ pr_debug("%s(), Could not allocate an sk_buff of length %d\n",
+ __func__, LMP_MAX_HEADER);
+ return;
+ }
+
+ /*
+ * Reserve space for MUX control and LAP header
+ */
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+
+ irlmp_disconnect_request(self->lsap, tx_skb);
+}
+
+/*
+ * Function iriap_getvaluebyclass (addr, name, attr)
+ *
+ * Retrieve all values from attribute in all objects with given class
+ * name
+ */
+int iriap_getvaluebyclass_request(struct iriap_cb *self,
+ __u32 saddr, __u32 daddr,
+ char *name, char *attr)
+{
+ struct sk_buff *tx_skb;
+ int name_len, attr_len, skb_len;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return -1;);
+
+ /* Client must supply the destination device address */
+ if (!daddr)
+ return -1;
+
+ self->daddr = daddr;
+ self->saddr = saddr;
+
+ /*
+ * Save operation, so we know what the later indication is about
+ */
+ self->operation = GET_VALUE_BY_CLASS;
+
+ /* Give ourselves 10 secs to finish this operation */
+ iriap_start_watchdog_timer(self, 10*HZ);
+
+ name_len = strlen(name); /* Up to IAS_MAX_CLASSNAME = 60 */
+ attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
+
+ skb_len = self->max_header_size+2+name_len+1+attr_len+4;
+ tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(tx_skb, self->max_header_size);
+ skb_put(tx_skb, 3+name_len+attr_len);
+ frame = tx_skb->data;
+
+ /* Build frame */
+ frame[0] = IAP_LST | GET_VALUE_BY_CLASS;
+ frame[1] = name_len; /* Insert length of name */
+ memcpy(frame+2, name, name_len); /* Insert name */
+ frame[2+name_len] = attr_len; /* Insert length of attr */
+ memcpy(frame+3+name_len, attr, attr_len); /* Insert attr */
+
+ iriap_do_client_event(self, IAP_CALL_REQUEST_GVBC, tx_skb);
+
+ /* Drop reference count - see state_s_disconnect(). */
+ dev_kfree_skb(tx_skb);
+
+ return 0;
+}
+EXPORT_SYMBOL(iriap_getvaluebyclass_request);
+
+/*
+ * Function iriap_getvaluebyclass_confirm (self, skb)
+ *
+ * Got result from GetValueByClass command. Parse it and return result
+ * to service user.
+ *
+ */
+static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
+ struct sk_buff *skb)
+{
+ struct ias_value *value;
+ int charset;
+ __u32 value_len;
+ __u32 tmp_cpu32;
+ __u16 obj_id;
+ __u16 len;
+ __u8 type;
+ __u8 *fp;
+ int n;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /* Initialize variables */
+ fp = skb->data;
+ n = 2;
+
+ /* Get length, MSB first */
+ len = get_unaligned_be16(fp + n);
+ n += 2;
+
+ pr_debug("%s(), len=%d\n", __func__, len);
+
+ /* Get object ID, MSB first */
+ obj_id = get_unaligned_be16(fp + n);
+ n += 2;
+
+ type = fp[n++];
+ pr_debug("%s(), Value type = %d\n", __func__, type);
+
+ switch (type) {
+ case IAS_INTEGER:
+ memcpy(&tmp_cpu32, fp+n, 4); n += 4;
+ be32_to_cpus(&tmp_cpu32);
+ value = irias_new_integer_value(tmp_cpu32);
+
+ /* Legal values restricted to 0x01-0x6f, page 15 irttp */
+ pr_debug("%s(), lsap=%d\n", __func__, value->t.integer);
+ break;
+ case IAS_STRING:
+ charset = fp[n++];
+
+ switch (charset) {
+ case CS_ASCII:
+ break;
+/* case CS_ISO_8859_1: */
+/* case CS_ISO_8859_2: */
+/* case CS_ISO_8859_3: */
+/* case CS_ISO_8859_4: */
+/* case CS_ISO_8859_5: */
+/* case CS_ISO_8859_6: */
+/* case CS_ISO_8859_7: */
+/* case CS_ISO_8859_8: */
+/* case CS_ISO_8859_9: */
+/* case CS_UNICODE: */
+ default:
+ pr_debug("%s(), charset [%d] %s, not supported\n",
+ __func__, charset,
+ charset < ARRAY_SIZE(ias_charset_types) ?
+ ias_charset_types[charset] :
+ "(unknown)");
+
+ /* Aborting, close connection! */
+ iriap_disconnect_request(self);
+ return;
+ /* break; */
+ }
+ value_len = fp[n++];
+ pr_debug("%s(), strlen=%d\n", __func__, value_len);
+
+ /* Make sure the string is null-terminated */
+ if (n + value_len < skb->len)
+ fp[n + value_len] = 0x00;
+ pr_debug("Got string %s\n", fp+n);
+
+ /* Will truncate to IAS_MAX_STRING bytes */
+ value = irias_new_string_value(fp+n);
+ break;
+ case IAS_OCT_SEQ:
+ value_len = get_unaligned_be16(fp + n);
+ n += 2;
+
+ /* Will truncate to IAS_MAX_OCTET_STRING bytes */
+ value = irias_new_octseq_value(fp+n, value_len);
+ break;
+ default:
+ value = irias_new_missing_value();
+ break;
+ }
+
+ /* Finished, close connection! */
+ iriap_disconnect_request(self);
+
+ /* Warning, the client might close us, so remember no to use self
+ * anymore after calling confirm
+ */
+ if (self->confirm)
+ self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
+ else {
+ pr_debug("%s(), missing handler!\n", __func__);
+ irias_delete_value(value);
+ }
+}
+
+/*
+ * Function iriap_getvaluebyclass_response ()
+ *
+ * Send answer back to remote LM-IAS
+ *
+ */
+static void iriap_getvaluebyclass_response(struct iriap_cb *self,
+ __u16 obj_id,
+ __u8 ret_code,
+ struct ias_value *value)
+{
+ struct sk_buff *tx_skb;
+ int n;
+ __be32 tmp_be32;
+ __be16 tmp_be16;
+ __u8 *fp;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+ IRDA_ASSERT(value != NULL, return;);
+ IRDA_ASSERT(value->len <= 1024, return;);
+
+ /* Initialize variables */
+ n = 0;
+
+ /*
+ * We must adjust the size of the response after the length of the
+ * value. We add 32 bytes because of the 6 bytes for the frame and
+ * max 5 bytes for the value coding.
+ */
+ tx_skb = alloc_skb(value->len + self->max_header_size + 32,
+ GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(tx_skb, self->max_header_size);
+ skb_put(tx_skb, 6);
+
+ fp = tx_skb->data;
+
+ /* Build frame */
+ fp[n++] = GET_VALUE_BY_CLASS | IAP_LST;
+ fp[n++] = ret_code;
+
+ /* Insert list length (MSB first) */
+ tmp_be16 = htons(0x0001);
+ memcpy(fp+n, &tmp_be16, 2); n += 2;
+
+ /* Insert object identifier ( MSB first) */
+ tmp_be16 = cpu_to_be16(obj_id);
+ memcpy(fp+n, &tmp_be16, 2); n += 2;
+
+ switch (value->type) {
+ case IAS_STRING:
+ skb_put(tx_skb, 3 + value->len);
+ fp[n++] = value->type;
+ fp[n++] = 0; /* ASCII */
+ fp[n++] = (__u8) value->len;
+ memcpy(fp+n, value->t.string, value->len); n+=value->len;
+ break;
+ case IAS_INTEGER:
+ skb_put(tx_skb, 5);
+ fp[n++] = value->type;
+
+ tmp_be32 = cpu_to_be32(value->t.integer);
+ memcpy(fp+n, &tmp_be32, 4); n += 4;
+ break;
+ case IAS_OCT_SEQ:
+ skb_put(tx_skb, 3 + value->len);
+ fp[n++] = value->type;
+
+ tmp_be16 = cpu_to_be16(value->len);
+ memcpy(fp+n, &tmp_be16, 2); n += 2;
+ memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len;
+ break;
+ case IAS_MISSING:
+ pr_debug("%s: sending IAS_MISSING\n", __func__);
+ skb_put(tx_skb, 1);
+ fp[n++] = value->type;
+ break;
+ default:
+ pr_debug("%s(), type not implemented!\n", __func__);
+ break;
+ }
+ iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb);
+
+ /* Drop reference count - see state_r_execute(). */
+ dev_kfree_skb(tx_skb);
+}
+
+/*
+ * Function iriap_getvaluebyclass_indication (self, skb)
+ *
+ * getvaluebyclass is requested from peer LM-IAS
+ *
+ */
+static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
+ struct sk_buff *skb)
+{
+ struct ias_object *obj;
+ struct ias_attrib *attrib;
+ int name_len;
+ int attr_len;
+ char name[IAS_MAX_CLASSNAME + 1]; /* 60 bytes */
+ char attr[IAS_MAX_ATTRIBNAME + 1]; /* 60 bytes */
+ __u8 *fp;
+ int n;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ fp = skb->data;
+ n = 1;
+
+ name_len = fp[n++];
+
+ IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;);
+
+ memcpy(name, fp+n, name_len); n+=name_len;
+ name[name_len] = '\0';
+
+ attr_len = fp[n++];
+
+ IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;);
+
+ memcpy(attr, fp+n, attr_len); n+=attr_len;
+ attr[attr_len] = '\0';
+
+ pr_debug("LM-IAS: Looking up %s: %s\n", name, attr);
+ obj = irias_find_object(name);
+
+ if (obj == NULL) {
+ pr_debug("LM-IAS: Object %s not found\n", name);
+ iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
+ &irias_missing);
+ return;
+ }
+ pr_debug("LM-IAS: found %s, id=%d\n", obj->name, obj->id);
+
+ attrib = irias_find_attrib(obj, attr);
+ if (attrib == NULL) {
+ pr_debug("LM-IAS: Attribute %s not found\n", attr);
+ iriap_getvaluebyclass_response(self, obj->id,
+ IAS_ATTRIB_UNKNOWN,
+ &irias_missing);
+ return;
+ }
+
+ /* We have a match; send the value. */
+ iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
+ attrib->value);
+}
+
+/*
+ * Function iriap_send_ack (void)
+ *
+ * Currently not used
+ *
+ */
+void iriap_send_ack(struct iriap_cb *self)
+{
+ struct sk_buff *tx_skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ tx_skb = alloc_skb(LMP_MAX_HEADER + 1, GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(tx_skb, self->max_header_size);
+ skb_put(tx_skb, 1);
+ frame = tx_skb->data;
+
+ /* Build frame */
+ frame[0] = IAP_LST | IAP_ACK | self->operation;
+
+ irlmp_data_request(self->lsap, tx_skb);
+}
+
+void iriap_connect_request(struct iriap_cb *self)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ ret = irlmp_connect_request(self->lsap, LSAP_IAS,
+ self->saddr, self->daddr,
+ NULL, NULL);
+ if (ret < 0) {
+ pr_debug("%s(), connect failed!\n", __func__);
+ self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
+ }
+}
+
+/*
+ * Function iriap_connect_confirm (handle, skb)
+ *
+ * LSAP connection confirmed!
+ *
+ */
+static void iriap_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_seg_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct iriap_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ self->max_data_size = max_seg_size;
+ self->max_header_size = max_header_size;
+
+ del_timer(&self->watchdog_timer);
+
+ iriap_do_client_event(self, IAP_LM_CONNECT_CONFIRM, skb);
+
+ /* Drop reference count - see state_s_make_call(). */
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function iriap_connect_indication ( handle, skb)
+ *
+ * Remote LM-IAS is requesting connection
+ *
+ */
+static void iriap_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_seg_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct iriap_cb *self, *new;
+
+ self = instance;
+
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(self != NULL, goto out;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, goto out;);
+
+ /* Start new server */
+ new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
+ if (!new) {
+ pr_debug("%s(), open failed\n", __func__);
+ goto out;
+ }
+
+ /* Now attach up the new "socket" */
+ new->lsap = irlmp_dup(self->lsap, new);
+ if (!new->lsap) {
+ pr_debug("%s(), dup failed!\n", __func__);
+ goto out;
+ }
+
+ new->max_data_size = max_seg_size;
+ new->max_header_size = max_header_size;
+
+ /* Clean up the original one to keep it in listen state */
+ irlmp_listen(self->lsap);
+
+ iriap_do_server_event(new, IAP_LM_CONNECT_INDICATION, skb);
+
+out:
+ /* Drop reference count - see state_r_disconnect(). */
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function iriap_data_indication (handle, skb)
+ *
+ * Receives data from connection identified by handle from IrLMP
+ *
+ */
+static int iriap_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct iriap_cb *self;
+ __u8 *frame;
+ __u8 opcode;
+
+ self = instance;
+
+ IRDA_ASSERT(skb != NULL, return 0;);
+ IRDA_ASSERT(self != NULL, goto out;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, goto out;);
+
+ frame = skb->data;
+
+ if (self->mode == IAS_SERVER) {
+ /* Call server */
+ pr_debug("%s(), Calling server!\n", __func__);
+ iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb);
+ goto out;
+ }
+ opcode = frame[0];
+ if (~opcode & IAP_LST) {
+ net_warn_ratelimited("%s:, IrIAS multiframe commands or results is not implemented yet!\n",
+ __func__);
+ goto out;
+ }
+
+ /* Check for ack frames since they don't contain any data */
+ if (opcode & IAP_ACK) {
+ pr_debug("%s() Got ack frame!\n", __func__);
+ goto out;
+ }
+
+ opcode &= ~IAP_LST; /* Mask away LST bit */
+
+ switch (opcode) {
+ case GET_INFO_BASE:
+ pr_debug("IrLMP GetInfoBaseDetails not implemented!\n");
+ break;
+ case GET_VALUE_BY_CLASS:
+ iriap_do_call_event(self, IAP_RECV_F_LST, NULL);
+
+ switch (frame[1]) {
+ case IAS_SUCCESS:
+ iriap_getvaluebyclass_confirm(self, skb);
+ break;
+ case IAS_CLASS_UNKNOWN:
+ pr_debug("%s(), No such class!\n", __func__);
+ /* Finished, close connection! */
+ iriap_disconnect_request(self);
+
+ /*
+ * Warning, the client might close us, so remember
+ * no to use self anymore after calling confirm
+ */
+ if (self->confirm)
+ self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
+ self->priv);
+ break;
+ case IAS_ATTRIB_UNKNOWN:
+ pr_debug("%s(), No such attribute!\n", __func__);
+ /* Finished, close connection! */
+ iriap_disconnect_request(self);
+
+ /*
+ * Warning, the client might close us, so remember
+ * no to use self anymore after calling confirm
+ */
+ if (self->confirm)
+ self->confirm(IAS_ATTRIB_UNKNOWN, 0, NULL,
+ self->priv);
+ break;
+ }
+ break;
+ default:
+ pr_debug("%s(), Unknown op-code: %02x\n", __func__,
+ opcode);
+ break;
+ }
+
+out:
+ /* Cleanup - sub-calls will have done skb_get() as needed. */
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Function iriap_call_indication (self, skb)
+ *
+ * Received call to server from peer LM-IAS
+ *
+ */
+void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
+{
+ __u8 *fp;
+ __u8 opcode;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ fp = skb->data;
+
+ opcode = fp[0];
+ if (~opcode & 0x80) {
+ net_warn_ratelimited("%s: IrIAS multiframe commands or results is not implemented yet!\n",
+ __func__);
+ return;
+ }
+ opcode &= 0x7f; /* Mask away LST bit */
+
+ switch (opcode) {
+ case GET_INFO_BASE:
+ net_warn_ratelimited("%s: GetInfoBaseDetails not implemented yet!\n",
+ __func__);
+ break;
+ case GET_VALUE_BY_CLASS:
+ iriap_getvaluebyclass_indication(self, skb);
+ break;
+ }
+ /* skb will be cleaned up in iriap_data_indication */
+}
+
+/*
+ * Function iriap_watchdog_timer_expired (data)
+ *
+ * Query has taken too long time, so abort
+ *
+ */
+static void iriap_watchdog_timer_expired(void *data)
+{
+ struct iriap_cb *self = (struct iriap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ /* iriap_close(self); */
+}
+
+#ifdef CONFIG_PROC_FS
+
+static const char *const ias_value_types[] = {
+ "IAS_MISSING",
+ "IAS_INTEGER",
+ "IAS_OCT_SEQ",
+ "IAS_STRING"
+};
+
+static inline struct ias_object *irias_seq_idx(loff_t pos)
+{
+ struct ias_object *obj;
+
+ for (obj = (struct ias_object *) hashbin_get_first(irias_objects);
+ obj; obj = (struct ias_object *) hashbin_get_next(irias_objects)) {
+ if (pos-- == 0)
+ break;
+ }
+
+ return obj;
+}
+
+static void *irias_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ spin_lock_irq(&irias_objects->hb_spinlock);
+
+ return *pos ? irias_seq_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *irias_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+
+ return (v == SEQ_START_TOKEN)
+ ? (void *) hashbin_get_first(irias_objects)
+ : (void *) hashbin_get_next(irias_objects);
+}
+
+static void irias_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_irq(&irias_objects->hb_spinlock);
+}
+
+static int irias_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "LM-IAS Objects:\n");
+ else {
+ struct ias_object *obj = v;
+ struct ias_attrib *attrib;
+
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -EINVAL;);
+
+ seq_printf(seq, "name: %s, id=%d\n",
+ obj->name, obj->id);
+
+ /* Careful for priority inversions here !
+ * All other uses of attrib spinlock are independent of
+ * the object spinlock, so we are safe. Jean II */
+ spin_lock(&obj->attribs->hb_spinlock);
+
+ /* List all attributes for this object */
+ for (attrib = (struct ias_attrib *) hashbin_get_first(obj->attribs);
+ attrib != NULL;
+ attrib = (struct ias_attrib *) hashbin_get_next(obj->attribs)) {
+
+ IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC,
+ goto outloop; );
+
+ seq_printf(seq, " - Attribute name: \"%s\", ",
+ attrib->name);
+ seq_printf(seq, "value[%s]: ",
+ ias_value_types[attrib->value->type]);
+
+ switch (attrib->value->type) {
+ case IAS_INTEGER:
+ seq_printf(seq, "%d\n",
+ attrib->value->t.integer);
+ break;
+ case IAS_STRING:
+ seq_printf(seq, "\"%s\"\n",
+ attrib->value->t.string);
+ break;
+ case IAS_OCT_SEQ:
+ seq_printf(seq, "octet sequence (%d bytes)\n",
+ attrib->value->len);
+ break;
+ case IAS_MISSING:
+ seq_puts(seq, "missing\n");
+ break;
+ default:
+ seq_printf(seq, "type %d?\n",
+ attrib->value->type);
+ }
+ seq_putc(seq, '\n');
+
+ }
+ IRDA_ASSERT_LABEL(outloop:)
+ spin_unlock(&obj->attribs->hb_spinlock);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations irias_seq_ops = {
+ .start = irias_seq_start,
+ .next = irias_seq_next,
+ .stop = irias_seq_stop,
+ .show = irias_seq_show,
+};
+
+static int irias_seq_open(struct inode *inode, struct file *file)
+{
+ IRDA_ASSERT( irias_objects != NULL, return -EINVAL;);
+
+ return seq_open(file, &irias_seq_ops);
+}
+
+const struct file_operations irias_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = irias_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif /* PROC_FS */
diff --git a/drivers/staging/irda/net/iriap_event.c b/drivers/staging/irda/net/iriap_event.c
new file mode 100644
index 000000000000..e6098b2e048a
--- /dev/null
+++ b/drivers/staging/irda/net/iriap_event.c
@@ -0,0 +1,496 @@
+/*********************************************************************
+ *
+ * Filename: iriap_event.c
+ * Version: 0.1
+ * Description: IAP Finite State Machine
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Aug 21 00:02:07 1997
+ * Modified at: Wed Mar 1 11:28:34 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/slab.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/iriap_event.h>
+
+static void state_s_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_connecting (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_call (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+static void state_s_make_call (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_calling (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_outstanding (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_replying (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_s_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+static void state_r_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_r_call (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_r_waiting (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_r_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_r_receiving (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_r_execute (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+static void state_r_returning (struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb);
+
+static void (*iriap_state[])(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb) = {
+ /* Client FSM */
+ state_s_disconnect,
+ state_s_connecting,
+ state_s_call,
+
+ /* S-Call FSM */
+ state_s_make_call,
+ state_s_calling,
+ state_s_outstanding,
+ state_s_replying,
+ state_s_wait_for_call,
+ state_s_wait_active,
+
+ /* Server FSM */
+ state_r_disconnect,
+ state_r_call,
+
+ /* R-Connect FSM */
+ state_r_waiting,
+ state_r_wait_active,
+ state_r_receiving,
+ state_r_execute,
+ state_r_returning,
+};
+
+void iriap_next_client_state(struct iriap_cb *self, IRIAP_STATE state)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ self->client_state = state;
+}
+
+void iriap_next_call_state(struct iriap_cb *self, IRIAP_STATE state)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ self->call_state = state;
+}
+
+void iriap_next_server_state(struct iriap_cb *self, IRIAP_STATE state)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ self->server_state = state;
+}
+
+void iriap_next_r_connect_state(struct iriap_cb *self, IRIAP_STATE state)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ self->r_connect_state = state;
+}
+
+void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ (*iriap_state[ self->client_state]) (self, event, skb);
+}
+
+void iriap_do_call_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ (*iriap_state[ self->call_state]) (self, event, skb);
+}
+
+void iriap_do_server_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ (*iriap_state[ self->server_state]) (self, event, skb);
+}
+
+void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ (*iriap_state[ self->r_connect_state]) (self, event, skb);
+}
+
+
+/*
+ * Function state_s_disconnect (event, skb)
+ *
+ * S-Disconnect, The device has no LSAP connection to a particular
+ * remote device.
+ */
+static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ switch (event) {
+ case IAP_CALL_REQUEST_GVBC:
+ iriap_next_client_state(self, S_CONNECTING);
+ IRDA_ASSERT(self->request_skb == NULL, return;);
+ /* Don't forget to refcount it -
+ * see iriap_getvaluebyclass_request(). */
+ skb_get(skb);
+ self->request_skb = skb;
+ iriap_connect_request(self);
+ break;
+ case IAP_LM_DISCONNECT_INDICATION:
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/*
+ * Function state_s_connecting (self, event, skb)
+ *
+ * S-Connecting
+ *
+ */
+static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ switch (event) {
+ case IAP_LM_CONNECT_CONFIRM:
+ /*
+ * Jump to S-Call FSM
+ */
+ iriap_do_call_event(self, IAP_CALL_REQUEST, skb);
+ /* iriap_call_request(self, 0,0,0); */
+ iriap_next_client_state(self, S_CALL);
+ break;
+ case IAP_LM_DISCONNECT_INDICATION:
+ /* Abort calls */
+ iriap_next_call_state(self, S_MAKE_CALL);
+ iriap_next_client_state(self, S_DISCONNECT);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/*
+ * Function state_s_call (self, event, skb)
+ *
+ * S-Call, The device can process calls to a specific remote
+ * device. Whenever the LSAP connection is disconnected, this state
+ * catches that event and clears up
+ */
+static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+
+ switch (event) {
+ case IAP_LM_DISCONNECT_INDICATION:
+ /* Abort calls */
+ iriap_next_call_state(self, S_MAKE_CALL);
+ iriap_next_client_state(self, S_DISCONNECT);
+ break;
+ default:
+ pr_debug("state_s_call: Unknown event %d\n", event);
+ break;
+ }
+}
+
+/*
+ * Function state_s_make_call (event, skb)
+ *
+ * S-Make-Call
+ *
+ */
+static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ switch (event) {
+ case IAP_CALL_REQUEST:
+ /* Already refcounted - see state_s_disconnect() */
+ tx_skb = self->request_skb;
+ self->request_skb = NULL;
+
+ irlmp_data_request(self->lsap, tx_skb);
+ iriap_next_call_state(self, S_OUTSTANDING);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/*
+ * Function state_s_calling (event, skb)
+ *
+ * S-Calling
+ *
+ */
+static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), Not implemented\n", __func__);
+}
+
+/*
+ * Function state_s_outstanding (event, skb)
+ *
+ * S-Outstanding, The device is waiting for a response to a command
+ *
+ */
+static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+
+ switch (event) {
+ case IAP_RECV_F_LST:
+ /*iriap_send_ack(self);*/
+ /*LM_Idle_request(idle); */
+
+ iriap_next_call_state(self, S_WAIT_FOR_CALL);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/*
+ * Function state_s_replying (event, skb)
+ *
+ * S-Replying, The device is collecting a multiple part response
+ */
+static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), Not implemented\n", __func__);
+}
+
+/*
+ * Function state_s_wait_for_call (event, skb)
+ *
+ * S-Wait-for-Call
+ *
+ */
+static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), Not implemented\n", __func__);
+}
+
+
+/*
+ * Function state_s_wait_active (event, skb)
+ *
+ * S-Wait-Active
+ *
+ */
+static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), Not implemented\n", __func__);
+}
+
+/**************************************************************************
+ *
+ * Server FSM
+ *
+ **************************************************************************/
+
+/*
+ * Function state_r_disconnect (self, event, skb)
+ *
+ * LM-IAS server is disconnected (not processing any requests!)
+ *
+ */
+static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb;
+
+ switch (event) {
+ case IAP_LM_CONNECT_INDICATION:
+ tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
+ if (tx_skb == NULL)
+ return;
+
+ /* Reserve space for MUX_CONTROL and LAP header */
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+
+ irlmp_connect_response(self->lsap, tx_skb);
+ /*LM_Idle_request(idle); */
+
+ iriap_next_server_state(self, R_CALL);
+
+ /*
+ * Jump to R-Connect FSM, we skip R-Waiting since we do not
+ * care about LM_Idle_request()!
+ */
+ iriap_next_r_connect_state(self, R_RECEIVING);
+ break;
+ default:
+ pr_debug("%s(), unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/*
+ * Function state_r_call (self, event, skb)
+ */
+static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ switch (event) {
+ case IAP_LM_DISCONNECT_INDICATION:
+ /* Abort call */
+ iriap_next_server_state(self, R_DISCONNECT);
+ iriap_next_r_connect_state(self, R_WAITING);
+ break;
+ default:
+ pr_debug("%s(), unknown event!\n", __func__);
+ break;
+ }
+}
+
+/*
+ * R-Connect FSM
+ */
+
+/*
+ * Function state_r_waiting (self, event, skb)
+ */
+static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), Not implemented\n", __func__);
+}
+
+static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), Not implemented\n", __func__);
+}
+
+/*
+ * Function state_r_receiving (self, event, skb)
+ *
+ * We are receiving a command
+ *
+ */
+static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ switch (event) {
+ case IAP_RECV_F_LST:
+ iriap_next_r_connect_state(self, R_EXECUTE);
+
+ iriap_call_indication(self, skb);
+ break;
+ default:
+ pr_debug("%s(), unknown event!\n", __func__);
+ break;
+ }
+}
+
+/*
+ * Function state_r_execute (self, event, skb)
+ *
+ * The server is processing the request
+ *
+ */
+static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
+
+ switch (event) {
+ case IAP_CALL_RESPONSE:
+ /*
+ * Since we don't implement the Waiting state, we return
+ * to state Receiving instead, DB.
+ */
+ iriap_next_r_connect_state(self, R_RECEIVING);
+
+ /* Don't forget to refcount it - see
+ * iriap_getvaluebyclass_response(). */
+ skb_get(skb);
+
+ irlmp_data_request(self->lsap, skb);
+ break;
+ default:
+ pr_debug("%s(), unknown event!\n", __func__);
+ break;
+ }
+}
+
+static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), event=%d\n", __func__, event);
+
+ switch (event) {
+ case IAP_RECV_F_LST:
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/staging/irda/net/irias_object.c b/drivers/staging/irda/net/irias_object.c
new file mode 100644
index 000000000000..53b86d0e1630
--- /dev/null
+++ b/drivers/staging/irda/net/irias_object.c
@@ -0,0 +1,555 @@
+/*********************************************************************
+ *
+ * Filename: irias_object.c
+ * Version: 0.3
+ * Description: IAS object database and functions
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Oct 1 22:50:04 1998
+ * Modified at: Wed Dec 15 11:23:16 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/module.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irias_object.h>
+
+hashbin_t *irias_objects;
+
+/*
+ * Used when a missing value needs to be returned
+ */
+struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}};
+
+
+/*
+ * Function ias_new_object (name, id)
+ *
+ * Create a new IAS object
+ *
+ */
+struct ias_object *irias_new_object( char *name, int id)
+{
+ struct ias_object *obj;
+
+ obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
+ if (obj == NULL) {
+ net_warn_ratelimited("%s(), Unable to allocate object!\n",
+ __func__);
+ return NULL;
+ }
+
+ obj->magic = IAS_OBJECT_MAGIC;
+ obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
+ if (!obj->name) {
+ net_warn_ratelimited("%s(), Unable to allocate name!\n",
+ __func__);
+ kfree(obj);
+ return NULL;
+ }
+ obj->id = id;
+
+ /* Locking notes : the attrib spinlock has lower precendence
+ * than the objects spinlock. Never grap the objects spinlock
+ * while holding any attrib spinlock (risk of deadlock). Jean II */
+ obj->attribs = hashbin_new(HB_LOCK);
+
+ if (obj->attribs == NULL) {
+ net_warn_ratelimited("%s(), Unable to allocate attribs!\n",
+ __func__);
+ kfree(obj->name);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
+}
+EXPORT_SYMBOL(irias_new_object);
+
+/*
+ * Function irias_delete_attrib (attrib)
+ *
+ * Delete given attribute and deallocate all its memory
+ *
+ */
+static void __irias_delete_attrib(struct ias_attrib *attrib)
+{
+ IRDA_ASSERT(attrib != NULL, return;);
+ IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC, return;);
+
+ kfree(attrib->name);
+
+ irias_delete_value(attrib->value);
+ attrib->magic = ~IAS_ATTRIB_MAGIC;
+
+ kfree(attrib);
+}
+
+void __irias_delete_object(struct ias_object *obj)
+{
+ IRDA_ASSERT(obj != NULL, return;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
+
+ kfree(obj->name);
+
+ hashbin_delete(obj->attribs, (FREE_FUNC) __irias_delete_attrib);
+
+ obj->magic = ~IAS_OBJECT_MAGIC;
+
+ kfree(obj);
+}
+
+/*
+ * Function irias_delete_object (obj)
+ *
+ * Remove object from hashbin and deallocate all attributes associated with
+ * with this object and the object itself
+ *
+ */
+int irias_delete_object(struct ias_object *obj)
+{
+ struct ias_object *node;
+
+ IRDA_ASSERT(obj != NULL, return -1;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -1;);
+
+ /* Remove from list */
+ node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj);
+ if (!node)
+ pr_debug("%s(), object already removed!\n",
+ __func__);
+
+ /* Destroy */
+ __irias_delete_object(obj);
+
+ return 0;
+}
+EXPORT_SYMBOL(irias_delete_object);
+
+/*
+ * Function irias_delete_attrib (obj)
+ *
+ * Remove attribute from hashbin and, if it was the last attribute of
+ * the object, remove the object as well.
+ *
+ */
+int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib,
+ int cleanobject)
+{
+ struct ias_attrib *node;
+
+ IRDA_ASSERT(obj != NULL, return -1;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -1;);
+ IRDA_ASSERT(attrib != NULL, return -1;);
+
+ /* Remove attribute from object */
+ node = hashbin_remove_this(obj->attribs, (irda_queue_t *) attrib);
+ if (!node)
+ return 0; /* Already removed or non-existent */
+
+ /* Deallocate attribute */
+ __irias_delete_attrib(node);
+
+ /* Check if object has still some attributes, destroy it if none.
+ * At first glance, this look dangerous, as the kernel reference
+ * various IAS objects. However, we only use this function on
+ * user attributes, not kernel attributes, so there is no risk
+ * of deleting a kernel object this way. Jean II */
+ node = (struct ias_attrib *) hashbin_get_first(obj->attribs);
+ if (cleanobject && !node)
+ irias_delete_object(obj);
+
+ return 0;
+}
+
+/*
+ * Function irias_insert_object (obj)
+ *
+ * Insert an object into the LM-IAS database
+ *
+ */
+void irias_insert_object(struct ias_object *obj)
+{
+ IRDA_ASSERT(obj != NULL, return;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
+
+ hashbin_insert(irias_objects, (irda_queue_t *) obj, 0, obj->name);
+}
+EXPORT_SYMBOL(irias_insert_object);
+
+/*
+ * Function irias_find_object (name)
+ *
+ * Find object with given name
+ *
+ */
+struct ias_object *irias_find_object(char *name)
+{
+ IRDA_ASSERT(name != NULL, return NULL;);
+
+ /* Unsafe (locking), object might change */
+ return hashbin_lock_find(irias_objects, 0, name);
+}
+EXPORT_SYMBOL(irias_find_object);
+
+/*
+ * Function irias_find_attrib (obj, name)
+ *
+ * Find named attribute in object
+ *
+ */
+struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name)
+{
+ struct ias_attrib *attrib;
+
+ IRDA_ASSERT(obj != NULL, return NULL;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return NULL;);
+ IRDA_ASSERT(name != NULL, return NULL;);
+
+ attrib = hashbin_lock_find(obj->attribs, 0, name);
+ if (attrib == NULL)
+ return NULL;
+
+ /* Unsafe (locking), attrib might change */
+ return attrib;
+}
+
+/*
+ * Function irias_add_attribute (obj, attrib)
+ *
+ * Add attribute to object
+ *
+ */
+static void irias_add_attrib(struct ias_object *obj, struct ias_attrib *attrib,
+ int owner)
+{
+ IRDA_ASSERT(obj != NULL, return;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
+
+ IRDA_ASSERT(attrib != NULL, return;);
+ IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC, return;);
+
+ /* Set if attrib is owned by kernel or user space */
+ attrib->value->owner = owner;
+
+ hashbin_insert(obj->attribs, (irda_queue_t *) attrib, 0, attrib->name);
+}
+
+/*
+ * Function irias_object_change_attribute (obj_name, attrib_name, new_value)
+ *
+ * Change the value of an objects attribute.
+ *
+ */
+int irias_object_change_attribute(char *obj_name, char *attrib_name,
+ struct ias_value *new_value)
+{
+ struct ias_object *obj;
+ struct ias_attrib *attrib;
+ unsigned long flags;
+
+ /* Find object */
+ obj = hashbin_lock_find(irias_objects, 0, obj_name);
+ if (obj == NULL) {
+ net_warn_ratelimited("%s: Unable to find object: %s\n",
+ __func__, obj_name);
+ return -1;
+ }
+
+ /* Slightly unsafe (obj might get removed under us) */
+ spin_lock_irqsave(&obj->attribs->hb_spinlock, flags);
+
+ /* Find attribute */
+ attrib = hashbin_find(obj->attribs, 0, attrib_name);
+ if (attrib == NULL) {
+ net_warn_ratelimited("%s: Unable to find attribute: %s\n",
+ __func__, attrib_name);
+ spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
+ return -1;
+ }
+
+ if ( attrib->value->type != new_value->type) {
+ pr_debug("%s(), changing value type not allowed!\n",
+ __func__);
+ spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
+ return -1;
+ }
+
+ /* Delete old value */
+ irias_delete_value(attrib->value);
+
+ /* Insert new value */
+ attrib->value = new_value;
+
+ /* Success */
+ spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(irias_object_change_attribute);
+
+/*
+ * Function irias_object_add_integer_attrib (obj, name, value)
+ *
+ * Add an integer attribute to an LM-IAS object
+ *
+ */
+void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
+ int owner)
+{
+ struct ias_attrib *attrib;
+
+ IRDA_ASSERT(obj != NULL, return;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
+ IRDA_ASSERT(name != NULL, return;);
+
+ attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
+ if (attrib == NULL) {
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
+ return;
+ }
+
+ attrib->magic = IAS_ATTRIB_MAGIC;
+ attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
+
+ /* Insert value */
+ attrib->value = irias_new_integer_value(value);
+ if (!attrib->name || !attrib->value) {
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
+ if (attrib->value)
+ irias_delete_value(attrib->value);
+ kfree(attrib->name);
+ kfree(attrib);
+ return;
+ }
+
+ irias_add_attrib(obj, attrib, owner);
+}
+EXPORT_SYMBOL(irias_add_integer_attrib);
+
+ /*
+ * Function irias_add_octseq_attrib (obj, name, octet_seq, len)
+ *
+ * Add a octet sequence attribute to an LM-IAS object
+ *
+ */
+
+void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
+ int len, int owner)
+{
+ struct ias_attrib *attrib;
+
+ IRDA_ASSERT(obj != NULL, return;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
+
+ IRDA_ASSERT(name != NULL, return;);
+ IRDA_ASSERT(octets != NULL, return;);
+
+ attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
+ if (attrib == NULL) {
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
+ return;
+ }
+
+ attrib->magic = IAS_ATTRIB_MAGIC;
+ attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
+
+ attrib->value = irias_new_octseq_value( octets, len);
+ if (!attrib->name || !attrib->value) {
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
+ if (attrib->value)
+ irias_delete_value(attrib->value);
+ kfree(attrib->name);
+ kfree(attrib);
+ return;
+ }
+
+ irias_add_attrib(obj, attrib, owner);
+}
+EXPORT_SYMBOL(irias_add_octseq_attrib);
+
+/*
+ * Function irias_object_add_string_attrib (obj, string)
+ *
+ * Add a string attribute to an LM-IAS object
+ *
+ */
+void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
+ int owner)
+{
+ struct ias_attrib *attrib;
+
+ IRDA_ASSERT(obj != NULL, return;);
+ IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
+
+ IRDA_ASSERT(name != NULL, return;);
+ IRDA_ASSERT(value != NULL, return;);
+
+ attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
+ if (attrib == NULL) {
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
+ return;
+ }
+
+ attrib->magic = IAS_ATTRIB_MAGIC;
+ attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
+
+ attrib->value = irias_new_string_value(value);
+ if (!attrib->name || !attrib->value) {
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
+ if (attrib->value)
+ irias_delete_value(attrib->value);
+ kfree(attrib->name);
+ kfree(attrib);
+ return;
+ }
+
+ irias_add_attrib(obj, attrib, owner);
+}
+EXPORT_SYMBOL(irias_add_string_attrib);
+
+/*
+ * Function irias_new_integer_value (integer)
+ *
+ * Create new IAS integer value
+ *
+ */
+struct ias_value *irias_new_integer_value(int integer)
+{
+ struct ias_value *value;
+
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ if (value == NULL)
+ return NULL;
+
+ value->type = IAS_INTEGER;
+ value->len = 4;
+ value->t.integer = integer;
+
+ return value;
+}
+EXPORT_SYMBOL(irias_new_integer_value);
+
+/*
+ * Function irias_new_string_value (string)
+ *
+ * Create new IAS string value
+ *
+ * Per IrLMP 1.1, 4.3.3.2, strings are up to 256 chars - Jean II
+ */
+struct ias_value *irias_new_string_value(char *string)
+{
+ struct ias_value *value;
+
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ if (value == NULL)
+ return NULL;
+
+ value->type = IAS_STRING;
+ value->charset = CS_ASCII;
+ value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
+ if (!value->t.string) {
+ net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__);
+ kfree(value);
+ return NULL;
+ }
+
+ value->len = strlen(value->t.string);
+
+ return value;
+}
+
+/*
+ * Function irias_new_octseq_value (octets, len)
+ *
+ * Create new IAS octet-sequence value
+ *
+ * Per IrLMP 1.1, 4.3.3.2, octet-sequence are up to 1024 bytes - Jean II
+ */
+struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
+{
+ struct ias_value *value;
+
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ if (value == NULL)
+ return NULL;
+
+ value->type = IAS_OCT_SEQ;
+ /* Check length */
+ if(len > IAS_MAX_OCTET_STRING)
+ len = IAS_MAX_OCTET_STRING;
+ value->len = len;
+
+ value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC);
+ if (value->t.oct_seq == NULL){
+ net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__);
+ kfree(value);
+ return NULL;
+ }
+ return value;
+}
+
+struct ias_value *irias_new_missing_value(void)
+{
+ struct ias_value *value;
+
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ if (value == NULL)
+ return NULL;
+
+ value->type = IAS_MISSING;
+
+ return value;
+}
+
+/*
+ * Function irias_delete_value (value)
+ *
+ * Delete IAS value
+ *
+ */
+void irias_delete_value(struct ias_value *value)
+{
+ IRDA_ASSERT(value != NULL, return;);
+
+ switch (value->type) {
+ case IAS_INTEGER: /* Fallthrough */
+ case IAS_MISSING:
+ /* No need to deallocate */
+ break;
+ case IAS_STRING:
+ /* Deallocate string */
+ kfree(value->t.string);
+ break;
+ case IAS_OCT_SEQ:
+ /* Deallocate byte stream */
+ kfree(value->t.oct_seq);
+ break;
+ default:
+ pr_debug("%s(), Unknown value type!\n", __func__);
+ break;
+ }
+ kfree(value);
+}
+EXPORT_SYMBOL(irias_delete_value);
diff --git a/drivers/staging/irda/net/irlan/Kconfig b/drivers/staging/irda/net/irlan/Kconfig
new file mode 100644
index 000000000000..951abc2e3a7f
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/Kconfig
@@ -0,0 +1,14 @@
+config IRLAN
+ tristate "IrLAN protocol"
+ depends on IRDA
+ help
+ Say Y here if you want to build support for the IrLAN protocol.
+ To compile it as a module, choose M here: the module will be called
+ irlan. IrLAN emulates an Ethernet and makes it possible to put up
+ a wireless LAN using infrared beams.
+
+ The IrLAN protocol can be used to talk with infrared access points
+ like the HP NetbeamIR, or the ESI JetEye NET. You can also connect
+ to another Linux machine running the IrLAN protocol for ad-hoc
+ networking!
+
diff --git a/drivers/staging/irda/net/irlan/Makefile b/drivers/staging/irda/net/irlan/Makefile
new file mode 100644
index 000000000000..94eefbc8e6b9
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux IrDA IrLAN protocol layer.
+#
+
+obj-$(CONFIG_IRLAN) += irlan.o
+
+irlan-y := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o
diff --git a/drivers/staging/irda/net/irlan/irlan_client.c b/drivers/staging/irda/net/irlan/irlan_client.c
new file mode 100644
index 000000000000..c5837a40c78e
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_client.c
@@ -0,0 +1,559 @@
+/*********************************************************************
+ *
+ * Filename: irlan_client.c
+ * Version: 0.9
+ * Description: IrDA LAN Access Protocol (IrLAN) Client
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Tue Dec 14 15:47:02 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/bitops.h>
+#include <net/arp.h>
+
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/iriap.h>
+#include <net/irda/timer.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_event.h>
+#include <net/irda/irlan_eth.h>
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_client.h>
+
+#undef CONFIG_IRLAN_GRATUITOUS_ARP
+
+static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *);
+static int irlan_client_ctrl_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *);
+static void irlan_check_response_param(struct irlan_cb *self, char *param,
+ char *value, int val_len);
+static void irlan_client_open_ctrl_tsap(struct irlan_cb *self);
+
+static void irlan_client_kick_timer_expired(void *data)
+{
+ struct irlan_cb *self = (struct irlan_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * If we are in peer mode, the client may not have got the discovery
+ * indication it needs to make progress. If the client is still in
+ * IDLE state, we must kick it to, but only if the provider is not IDLE
+ */
+ if ((self->provider.access_type == ACCESS_PEER) &&
+ (self->client.state == IRLAN_IDLE) &&
+ (self->provider.state != IRLAN_IDLE)) {
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+ }
+}
+
+static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
+{
+ irda_start_timer(&self->client.kick_timer, timeout, (void *) self,
+ irlan_client_kick_timer_expired);
+}
+
+/*
+ * Function irlan_client_wakeup (self, saddr, daddr)
+ *
+ * Wake up client
+ *
+ */
+void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * Check if we are already awake, or if we are a provider in direct
+ * mode (in that case we must leave the client idle
+ */
+ if ((self->client.state != IRLAN_IDLE) ||
+ (self->provider.access_type == ACCESS_DIRECT))
+ {
+ pr_debug("%s(), already awake!\n", __func__);
+ return;
+ }
+
+ /* Addresses may have changed! */
+ self->saddr = saddr;
+ self->daddr = daddr;
+
+ if (self->disconnect_reason == LM_USER_REQUEST) {
+ pr_debug("%s(), still stopped by user\n", __func__);
+ return;
+ }
+
+ /* Open TSAPs */
+ irlan_client_open_ctrl_tsap(self);
+ irlan_open_data_tsap(self);
+
+ irlan_do_client_event(self, IRLAN_DISCOVERY_INDICATION, NULL);
+
+ /* Start kick timer */
+ irlan_client_start_kick_timer(self, 2*HZ);
+}
+
+/*
+ * Function irlan_discovery_indication (daddr)
+ *
+ * Remote device with IrLAN server support discovered
+ *
+ */
+void irlan_client_discovery_indication(discinfo_t *discovery,
+ DISCOVERY_MODE mode,
+ void *priv)
+{
+ struct irlan_cb *self;
+ __u32 saddr, daddr;
+
+ IRDA_ASSERT(discovery != NULL, return;);
+
+ /*
+ * I didn't check it, but I bet that IrLAN suffer from the same
+ * deficiency as IrComm and doesn't handle two instances
+ * simultaneously connecting to each other.
+ * Same workaround, drop passive discoveries.
+ * Jean II */
+ if(mode == DISCOVERY_PASSIVE)
+ return;
+
+ saddr = discovery->saddr;
+ daddr = discovery->daddr;
+
+ /* Find instance */
+ rcu_read_lock();
+ self = irlan_get_any();
+ if (self) {
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;);
+
+ pr_debug("%s(), Found instance (%08x)!\n", __func__ ,
+ daddr);
+
+ irlan_client_wakeup(self, saddr, daddr);
+ }
+IRDA_ASSERT_LABEL(out:)
+ rcu_read_unlock();
+}
+
+/*
+ * Function irlan_client_data_indication (handle, skb)
+ *
+ * This function gets the data that is received on the control channel
+ *
+ */
+static int irlan_client_ctrl_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
+
+ /* Ready for a new command */
+ pr_debug("%s(), clearing tx_busy\n", __func__);
+ self->client.tx_busy = FALSE;
+
+ /* Check if we have some queued commands waiting to be sent */
+ irlan_run_ctrl_tx_queue(self);
+
+ return 0;
+}
+
+static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *userdata)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+ struct sk_buff *skb;
+
+ pr_debug("%s(), reason=%d\n", __func__ , reason);
+
+ self = instance;
+ tsap = sap;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+ IRDA_ASSERT(tsap != NULL, return;);
+ IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
+
+ IRDA_ASSERT(tsap == self->client.tsap_ctrl, return;);
+
+ /* Remove frames queued on the control channel */
+ while ((skb = skb_dequeue(&self->client.txq)) != NULL) {
+ dev_kfree_skb(skb);
+ }
+ self->client.tx_busy = FALSE;
+
+ irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
+}
+
+/*
+ * Function irlan_client_open_tsaps (self)
+ *
+ * Initialize callbacks and open IrTTP TSAPs
+ *
+ */
+static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
+{
+ struct tsap_cb *tsap;
+ notify_t notify;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if already open */
+ if (self->client.tsap_ctrl)
+ return;
+
+ irda_notify_init(&notify);
+
+ /* Set up callbacks */
+ notify.data_indication = irlan_client_ctrl_data_indication;
+ notify.connect_confirm = irlan_client_ctrl_connect_confirm;
+ notify.disconnect_indication = irlan_client_ctrl_disconnect_indication;
+ notify.instance = self;
+ strlcpy(notify.name, "IrLAN ctrl (c)", sizeof(notify.name));
+
+ tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
+ if (!tsap) {
+ pr_debug("%s(), Got no tsap!\n", __func__);
+ return;
+ }
+ self->client.tsap_ctrl = tsap;
+}
+
+/*
+ * Function irlan_client_connect_confirm (handle, skb)
+ *
+ * Connection to peer IrLAN laye confirmed
+ *
+ */
+static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ self->client.max_sdu_size = max_sdu_size;
+ self->client.max_header_size = max_header_size;
+
+ /* TODO: we could set the MTU depending on the max_sdu_size */
+
+ irlan_do_client_event(self, IRLAN_CONNECT_COMPLETE, NULL);
+}
+
+/*
+ * Function print_ret_code (code)
+ *
+ * Print return code of request to peer IrLAN layer.
+ *
+ */
+static void print_ret_code(__u8 code)
+{
+ switch(code) {
+ case 0:
+ printk(KERN_INFO "Success\n");
+ break;
+ case 1:
+ net_warn_ratelimited("IrLAN: Insufficient resources\n");
+ break;
+ case 2:
+ net_warn_ratelimited("IrLAN: Invalid command format\n");
+ break;
+ case 3:
+ net_warn_ratelimited("IrLAN: Command not supported\n");
+ break;
+ case 4:
+ net_warn_ratelimited("IrLAN: Parameter not supported\n");
+ break;
+ case 5:
+ net_warn_ratelimited("IrLAN: Value not supported\n");
+ break;
+ case 6:
+ net_warn_ratelimited("IrLAN: Not open\n");
+ break;
+ case 7:
+ net_warn_ratelimited("IrLAN: Authentication required\n");
+ break;
+ case 8:
+ net_warn_ratelimited("IrLAN: Invalid password\n");
+ break;
+ case 9:
+ net_warn_ratelimited("IrLAN: Protocol error\n");
+ break;
+ case 255:
+ net_warn_ratelimited("IrLAN: Asynchronous status\n");
+ break;
+ }
+}
+
+/*
+ * Function irlan_client_parse_response (self, skb)
+ *
+ * Extract all parameters from received buffer, then feed them to
+ * check_params for parsing
+ */
+void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
+{
+ __u8 *frame;
+ __u8 *ptr;
+ int count;
+ int ret;
+ __u16 val_len;
+ int i;
+ char *name;
+ char *value;
+
+ IRDA_ASSERT(skb != NULL, return;);
+
+ pr_debug("%s() skb->len=%d\n", __func__ , (int)skb->len);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ if (!skb) {
+ net_err_ratelimited("%s(), Got NULL skb!\n", __func__);
+ return;
+ }
+ frame = skb->data;
+
+ /*
+ * Check return code and print it if not success
+ */
+ if (frame[0]) {
+ print_ret_code(frame[0]);
+ return;
+ }
+
+ name = kmalloc(255, GFP_ATOMIC);
+ if (!name)
+ return;
+ value = kmalloc(1016, GFP_ATOMIC);
+ if (!value) {
+ kfree(name);
+ return;
+ }
+
+ /* How many parameters? */
+ count = frame[1];
+
+ pr_debug("%s(), got %d parameters\n", __func__ , count);
+
+ ptr = frame+2;
+
+ /* For all parameters */
+ for (i=0; i<count;i++) {
+ ret = irlan_extract_param(ptr, name, value, &val_len);
+ if (ret < 0) {
+ pr_debug("%s(), IrLAN, Error!\n", __func__);
+ break;
+ }
+ ptr += ret;
+ irlan_check_response_param(self, name, value, val_len);
+ }
+ /* Cleanup */
+ kfree(name);
+ kfree(value);
+}
+
+/*
+ * Function irlan_check_response_param (self, param, value, val_len)
+ *
+ * Check which parameter is received and update local variables
+ *
+ */
+static void irlan_check_response_param(struct irlan_cb *self, char *param,
+ char *value, int val_len)
+{
+ __u16 tmp_cpu; /* Temporary value in host order */
+ __u8 *bytes;
+ int i;
+
+ pr_debug("%s(), parm=%s\n", __func__ , param);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Media type */
+ if (strcmp(param, "MEDIA") == 0) {
+ if (strcmp(value, "802.3") == 0)
+ self->media = MEDIA_802_3;
+ else
+ self->media = MEDIA_802_5;
+ return;
+ }
+ if (strcmp(param, "FILTER_TYPE") == 0) {
+ if (strcmp(value, "DIRECTED") == 0)
+ self->client.filter_type |= IRLAN_DIRECTED;
+ else if (strcmp(value, "FUNCTIONAL") == 0)
+ self->client.filter_type |= IRLAN_FUNCTIONAL;
+ else if (strcmp(value, "GROUP") == 0)
+ self->client.filter_type |= IRLAN_GROUP;
+ else if (strcmp(value, "MAC_FRAME") == 0)
+ self->client.filter_type |= IRLAN_MAC_FRAME;
+ else if (strcmp(value, "MULTICAST") == 0)
+ self->client.filter_type |= IRLAN_MULTICAST;
+ else if (strcmp(value, "BROADCAST") == 0)
+ self->client.filter_type |= IRLAN_BROADCAST;
+ else if (strcmp(value, "IPX_SOCKET") == 0)
+ self->client.filter_type |= IRLAN_IPX_SOCKET;
+
+ }
+ if (strcmp(param, "ACCESS_TYPE") == 0) {
+ if (strcmp(value, "DIRECT") == 0)
+ self->client.access_type = ACCESS_DIRECT;
+ else if (strcmp(value, "PEER") == 0)
+ self->client.access_type = ACCESS_PEER;
+ else if (strcmp(value, "HOSTED") == 0)
+ self->client.access_type = ACCESS_HOSTED;
+ else {
+ pr_debug("%s(), unknown access type!\n", __func__);
+ }
+ }
+ /* IRLAN version */
+ if (strcmp(param, "IRLAN_VER") == 0) {
+ pr_debug("IrLAN version %d.%d\n", (__u8)value[0],
+ (__u8)value[1]);
+
+ self->version[0] = value[0];
+ self->version[1] = value[1];
+ return;
+ }
+ /* Which remote TSAP to use for data channel */
+ if (strcmp(param, "DATA_CHAN") == 0) {
+ self->dtsap_sel_data = value[0];
+ pr_debug("Data TSAP = %02x\n", self->dtsap_sel_data);
+ return;
+ }
+ if (strcmp(param, "CON_ARB") == 0) {
+ memcpy(&tmp_cpu, value, 2); /* Align value */
+ le16_to_cpus(&tmp_cpu); /* Convert to host order */
+ self->client.recv_arb_val = tmp_cpu;
+ pr_debug("%s(), receive arb val=%d\n", __func__ ,
+ self->client.recv_arb_val);
+ }
+ if (strcmp(param, "MAX_FRAME") == 0) {
+ memcpy(&tmp_cpu, value, 2); /* Align value */
+ le16_to_cpus(&tmp_cpu); /* Convert to host order */
+ self->client.max_frame = tmp_cpu;
+ pr_debug("%s(), max frame=%d\n", __func__ ,
+ self->client.max_frame);
+ }
+
+ /* RECONNECT_KEY, in case the link goes down! */
+ if (strcmp(param, "RECONNECT_KEY") == 0) {
+ pr_debug("Got reconnect key: ");
+ /* for (i = 0; i < val_len; i++) */
+/* printk("%02x", value[i]); */
+ memcpy(self->client.reconnect_key, value, val_len);
+ self->client.key_len = val_len;
+ pr_debug("\n");
+ }
+ /* FILTER_ENTRY, have we got an ethernet address? */
+ if (strcmp(param, "FILTER_ENTRY") == 0) {
+ bytes = value;
+ pr_debug("Ethernet address = %pM\n", bytes);
+ for (i = 0; i < 6; i++)
+ self->dev->dev_addr[i] = bytes[i];
+ }
+}
+
+/*
+ * Function irlan_client_get_value_confirm (obj_id, value)
+ *
+ * Got results from remote LM-IAS
+ *
+ */
+void irlan_client_get_value_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
+{
+ struct irlan_cb *self;
+
+ IRDA_ASSERT(priv != NULL, return;);
+
+ self = priv;
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* We probably don't need to make any more queries */
+ iriap_close(self->client.iriap);
+ self->client.iriap = NULL;
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ pr_debug("%s(), got NULL value!\n", __func__);
+ irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL,
+ NULL);
+ return;
+ }
+
+ switch (value->type) {
+ case IAS_INTEGER:
+ self->dtsap_sel_ctrl = value->t.integer;
+
+ if (value->t.integer != -1) {
+ irlan_do_client_event(self, IRLAN_IAS_PROVIDER_AVAIL,
+ NULL);
+ return;
+ }
+ irias_delete_value(value);
+ break;
+ default:
+ pr_debug("%s(), unknown type!\n", __func__);
+ break;
+ }
+ irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL);
+}
diff --git a/drivers/staging/irda/net/irlan/irlan_client_event.c b/drivers/staging/irda/net/irlan/irlan_client_event.c
new file mode 100644
index 000000000000..cc93fabbbb19
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_client_event.c
@@ -0,0 +1,511 @@
+/*********************************************************************
+ *
+ * Filename: irlan_client_event.c
+ * Version: 0.9
+ * Description: IrLAN client state machine
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sun Dec 26 21:52:24 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/timer.h>
+#include <net/irda/irmod.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irttp.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_client.h>
+#include <net/irda/irlan_event.h>
+
+static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) =
+{
+ irlan_client_state_idle,
+ irlan_client_state_query,
+ irlan_client_state_conn,
+ irlan_client_state_info,
+ irlan_client_state_media,
+ irlan_client_state_open,
+ irlan_client_state_wait,
+ irlan_client_state_arb,
+ irlan_client_state_data,
+ irlan_client_state_close,
+ irlan_client_state_sync
+};
+
+void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ (*state[ self->client.state]) (self, event, skb);
+}
+
+/*
+ * Function irlan_client_state_idle (event, skb, info)
+ *
+ * IDLE, We are waiting for an indication that there is a provider
+ * available.
+ */
+static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch (event) {
+ case IRLAN_DISCOVERY_INDICATION:
+ if (self->client.iriap) {
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ self->client.iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ irlan_client_get_value_confirm);
+ /* Get some values from peer IAS */
+ irlan_next_client_state(self, IRLAN_QUERY);
+ iriap_getvaluebyclass_request(self->client.iriap,
+ self->saddr, self->daddr,
+ "IrLAN", "IrDA:TinyTP:LsapSel");
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_query (event, skb, info)
+ *
+ * QUERY, We have queryed the remote IAS and is ready to connect
+ * to provider, just waiting for the confirm.
+ *
+ */
+static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_IAS_PROVIDER_AVAIL:
+ IRDA_ASSERT(self->dtsap_sel_ctrl != 0, return -1;);
+
+ self->client.open_retries = 0;
+
+ irttp_connect_request(self->client.tsap_ctrl,
+ self->dtsap_sel_ctrl,
+ self->saddr, self->daddr, NULL,
+ IRLAN_MTU, NULL);
+ irlan_next_client_state(self, IRLAN_CONN);
+ break;
+ case IRLAN_IAS_PROVIDER_NOT_AVAIL:
+ pr_debug("%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__);
+ irlan_next_client_state(self, IRLAN_IDLE);
+
+ /* Give the client a kick! */
+ if ((self->provider.access_type == ACCESS_PEER) &&
+ (self->provider.state != IRLAN_IDLE))
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_conn (event, skb, info)
+ *
+ * CONN, We have connected to a provider but has not issued any
+ * commands yet.
+ *
+ */
+static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch (event) {
+ case IRLAN_CONNECT_COMPLETE:
+ /* Send getinfo cmd */
+ irlan_get_provider_info(self);
+ irlan_next_client_state(self, IRLAN_INFO);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_info (self, event, skb, info)
+ *
+ * INFO, We have issued a GetInfo command and is awaiting a reply.
+ */
+static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch (event) {
+ case IRLAN_DATA_INDICATION:
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ irlan_client_parse_response(self, skb);
+
+ irlan_next_client_state(self, IRLAN_MEDIA);
+
+ irlan_get_media_char(self);
+ break;
+
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_media (self, event, skb, info)
+ *
+ * MEDIA, The irlan_client has issued a GetMedia command and is awaiting a
+ * reply.
+ *
+ */
+static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ irlan_client_parse_response(self, skb);
+ irlan_open_data_channel(self);
+ irlan_next_client_state(self, IRLAN_OPEN);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_open (self, event, skb, info)
+ *
+ * OPEN, The irlan_client has issued a OpenData command and is awaiting a
+ * reply
+ *
+ */
+static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ struct qos_info qos;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ irlan_client_parse_response(self, skb);
+
+ /*
+ * Check if we have got the remote TSAP for data
+ * communications
+ */
+ IRDA_ASSERT(self->dtsap_sel_data != 0, return -1;);
+
+ /* Check which access type we are dealing with */
+ switch (self->client.access_type) {
+ case ACCESS_PEER:
+ if (self->provider.state == IRLAN_OPEN) {
+
+ irlan_next_client_state(self, IRLAN_ARB);
+ irlan_do_client_event(self, IRLAN_CHECK_CON_ARB,
+ NULL);
+ } else {
+
+ irlan_next_client_state(self, IRLAN_WAIT);
+ }
+ break;
+ case ACCESS_DIRECT:
+ case ACCESS_HOSTED:
+ qos.link_disc_time.bits = 0x01; /* 3 secs */
+
+ irttp_connect_request(self->tsap_data,
+ self->dtsap_sel_data,
+ self->saddr, self->daddr, &qos,
+ IRLAN_MTU, NULL);
+
+ irlan_next_client_state(self, IRLAN_DATA);
+ break;
+ default:
+ pr_debug("%s(), unknown access type!\n", __func__);
+ break;
+ }
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_wait (self, event, skb, info)
+ *
+ * WAIT, The irlan_client is waiting for the local provider to enter the
+ * provider OPEN state.
+ *
+ */
+static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_PROVIDER_SIGNAL:
+ irlan_next_client_state(self, IRLAN_ARB);
+ irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, NULL);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ struct qos_info qos;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_CHECK_CON_ARB:
+ if (self->client.recv_arb_val == self->provider.send_arb_val) {
+ irlan_next_client_state(self, IRLAN_CLOSE);
+ irlan_close_data_channel(self);
+ } else if (self->client.recv_arb_val <
+ self->provider.send_arb_val)
+ {
+ qos.link_disc_time.bits = 0x01; /* 3 secs */
+
+ irlan_next_client_state(self, IRLAN_DATA);
+ irttp_connect_request(self->tsap_data,
+ self->dtsap_sel_data,
+ self->saddr, self->daddr, &qos,
+ IRLAN_MTU, NULL);
+ } else if (self->client.recv_arb_val >
+ self->provider.send_arb_val)
+ {
+ pr_debug("%s(), lost the battle :-(\n", __func__);
+ }
+ break;
+ case IRLAN_DATA_CONNECT_INDICATION:
+ irlan_next_client_state(self, IRLAN_DATA);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_data (self, event, skb, info)
+ *
+ * DATA, The data channel is connected, allowing data transfers between
+ * the local and remote machines.
+ *
+ */
+static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ irlan_client_parse_response(self, skb);
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_close (self, event, skb, info)
+ *
+ *
+ *
+ */
+static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_sync (self, event, skb, info)
+ *
+ *
+ *
+ */
+static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/drivers/staging/irda/net/irlan/irlan_common.c b/drivers/staging/irda/net/irlan/irlan_common.c
new file mode 100644
index 000000000000..481bbc2a4349
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_common.c
@@ -0,0 +1,1176 @@
+/*********************************************************************
+ *
+ * Filename: irlan_common.c
+ * Version: 0.9
+ * Description: IrDA LAN Access Protocol Implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sun Dec 26 21:53:10 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/random.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/iriap.h>
+#include <net/irda/timer.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_client.h>
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_eth.h>
+#include <net/irda/irlan_filter.h>
+
+
+/* extern char sysctl_devname[]; */
+
+/*
+ * Master structure
+ */
+static LIST_HEAD(irlans);
+
+static void *ckey;
+static void *skey;
+
+/* Module parameters */
+static bool eth; /* Use "eth" or "irlan" name for devices */
+static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
+
+#ifdef CONFIG_PROC_FS
+static const char *const irlan_access[] = {
+ "UNKNOWN",
+ "DIRECT",
+ "PEER",
+ "HOSTED"
+};
+
+static const char *const irlan_media[] = {
+ "UNKNOWN",
+ "802.3",
+ "802.5"
+};
+
+extern struct proc_dir_entry *proc_irda;
+
+static int irlan_seq_open(struct inode *inode, struct file *file);
+
+static const struct file_operations irlan_fops = {
+ .owner = THIS_MODULE,
+ .open = irlan_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+extern struct proc_dir_entry *proc_irda;
+#endif /* CONFIG_PROC_FS */
+
+static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr);
+static void __irlan_close(struct irlan_cb *self);
+static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
+ __u8 value_byte, __u16 value_short,
+ __u8 *value_array, __u16 value_len);
+static void irlan_open_unicast_addr(struct irlan_cb *self);
+static void irlan_get_unicast_addr(struct irlan_cb *self);
+void irlan_close_tsaps(struct irlan_cb *self);
+
+/*
+ * Function irlan_init (void)
+ *
+ * Initialize IrLAN layer
+ *
+ */
+static int __init irlan_init(void)
+{
+ struct irlan_cb *new;
+ __u16 hints;
+
+#ifdef CONFIG_PROC_FS
+ { struct proc_dir_entry *proc;
+ proc = proc_create("irlan", 0, proc_irda, &irlan_fops);
+ if (!proc) {
+ printk(KERN_ERR "irlan_init: can't create /proc entry!\n");
+ return -ENODEV;
+ }
+ }
+#endif /* CONFIG_PROC_FS */
+
+ hints = irlmp_service_to_hint(S_LAN);
+
+ /* Register with IrLMP as a client */
+ ckey = irlmp_register_client(hints, &irlan_client_discovery_indication,
+ NULL, NULL);
+ if (!ckey)
+ goto err_ckey;
+
+ /* Register with IrLMP as a service */
+ skey = irlmp_register_service(hints);
+ if (!skey)
+ goto err_skey;
+
+ /* Start the master IrLAN instance (the only one for now) */
+ new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY);
+ if (!new)
+ goto err_open;
+
+ /* The master will only open its (listen) control TSAP */
+ irlan_provider_open_ctrl_tsap(new);
+
+ /* Do some fast discovery! */
+ irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
+
+ return 0;
+
+err_open:
+ irlmp_unregister_service(skey);
+err_skey:
+ irlmp_unregister_client(ckey);
+err_ckey:
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("irlan", proc_irda);
+#endif /* CONFIG_PROC_FS */
+
+ return -ENOMEM;
+}
+
+static void __exit irlan_cleanup(void)
+{
+ struct irlan_cb *self, *next;
+
+ irlmp_unregister_client(ckey);
+ irlmp_unregister_service(skey);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("irlan", proc_irda);
+#endif /* CONFIG_PROC_FS */
+
+ /* Cleanup any leftover network devices */
+ rtnl_lock();
+ list_for_each_entry_safe(self, next, &irlans, dev_list) {
+ __irlan_close(self);
+ }
+ rtnl_unlock();
+}
+
+/*
+ * Function irlan_open (void)
+ *
+ * Open new instance of a client/provider, we should only register the
+ * network device if this instance is ment for a particular client/provider
+ */
+static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
+{
+ struct net_device *dev;
+ struct irlan_cb *self;
+
+ /* Create network device with irlan */
+ dev = alloc_irlandev(eth ? "eth%d" : "irlan%d");
+ if (!dev)
+ return NULL;
+
+ self = netdev_priv(dev);
+ self->dev = dev;
+
+ /*
+ * Initialize local device structure
+ */
+ self->magic = IRLAN_MAGIC;
+ self->saddr = saddr;
+ self->daddr = daddr;
+
+ /* Provider access can only be PEER, DIRECT, or HOSTED */
+ self->provider.access_type = access;
+ if (access == ACCESS_DIRECT) {
+ /*
+ * Since we are emulating an IrLAN sever we will have to
+ * give ourself an ethernet address!
+ */
+ dev->dev_addr[0] = 0x40;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x00;
+ dev->dev_addr[3] = 0x00;
+ get_random_bytes(dev->dev_addr+4, 1);
+ get_random_bytes(dev->dev_addr+5, 1);
+ }
+
+ self->media = MEDIA_802_3;
+ self->disconnect_reason = LM_USER_REQUEST;
+ init_timer(&self->watchdog_timer);
+ init_timer(&self->client.kick_timer);
+ init_waitqueue_head(&self->open_wait);
+
+ skb_queue_head_init(&self->client.txq);
+
+ irlan_next_client_state(self, IRLAN_IDLE);
+ irlan_next_provider_state(self, IRLAN_IDLE);
+
+ if (register_netdev(dev)) {
+ pr_debug("%s(), register_netdev() failed!\n",
+ __func__);
+ self = NULL;
+ free_netdev(dev);
+ } else {
+ rtnl_lock();
+ list_add_rcu(&self->dev_list, &irlans);
+ rtnl_unlock();
+ }
+
+ return self;
+}
+/*
+ * Function __irlan_close (self)
+ *
+ * This function closes and deallocates the IrLAN client instances. Be
+ * aware that other functions which calls client_close() must
+ * remove self from irlans list first.
+ */
+static void __irlan_close(struct irlan_cb *self)
+{
+ ASSERT_RTNL();
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ del_timer_sync(&self->watchdog_timer);
+ del_timer_sync(&self->client.kick_timer);
+
+ /* Close all open connections and remove TSAPs */
+ irlan_close_tsaps(self);
+
+ if (self->client.iriap)
+ iriap_close(self->client.iriap);
+
+ /* Remove frames queued on the control channel */
+ skb_queue_purge(&self->client.txq);
+
+ /* Unregister and free self via destructor */
+ unregister_netdevice(self->dev);
+}
+
+/* Find any instance of irlan, used for client discovery wakeup */
+struct irlan_cb *irlan_get_any(void)
+{
+ struct irlan_cb *self;
+
+ list_for_each_entry_rcu(self, &irlans, dev_list) {
+ return self;
+ }
+ return NULL;
+}
+
+/*
+ * Function irlan_connect_indication (instance, sap, qos, max_sdu_size, skb)
+ *
+ * Here we receive the connect indication for the data channel
+ *
+ */
+static void irlan_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ self = instance;
+ tsap = sap;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+ IRDA_ASSERT(tsap == self->tsap_data,return;);
+
+ self->max_sdu_size = max_sdu_size;
+ self->max_header_size = max_header_size;
+
+ pr_debug("%s: We are now connected!\n", __func__);
+
+ del_timer(&self->watchdog_timer);
+
+ /* If you want to pass the skb to *both* state machines, you will
+ * need to skb_clone() it, so that you don't free it twice.
+ * As the state machines don't need it, git rid of it here...
+ * Jean II */
+ if (skb)
+ dev_kfree_skb(skb);
+
+ irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
+ irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
+
+ if (self->provider.access_type == ACCESS_PEER) {
+ /*
+ * Data channel is open, so we are now allowed to
+ * configure the remote filter
+ */
+ irlan_get_unicast_addr(self);
+ irlan_open_unicast_addr(self);
+ }
+ /* Ready to transfer Ethernet frames (at last) */
+ netif_start_queue(self->dev); /* Clear reason */
+}
+
+static void irlan_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ self->max_sdu_size = max_sdu_size;
+ self->max_header_size = max_header_size;
+
+ /* TODO: we could set the MTU depending on the max_sdu_size */
+
+ pr_debug("%s: We are now connected!\n", __func__);
+ del_timer(&self->watchdog_timer);
+
+ /*
+ * Data channel is open, so we are now allowed to configure the remote
+ * filter
+ */
+ irlan_get_unicast_addr(self);
+ irlan_open_unicast_addr(self);
+
+ /* Open broadcast and multicast filter by default */
+ irlan_set_broadcast_filter(self, TRUE);
+ irlan_set_multicast_filter(self, TRUE);
+
+ /* Ready to transfer Ethernet frames */
+ netif_start_queue(self->dev);
+ self->disconnect_reason = 0; /* Clear reason */
+ wake_up_interruptible(&self->open_wait);
+}
+
+/*
+ * Function irlan_client_disconnect_indication (handle)
+ *
+ * Callback function for the IrTTP layer. Indicates a disconnection of
+ * the specified connection (handle)
+ */
+static void irlan_disconnect_indication(void *instance,
+ void *sap, LM_REASON reason,
+ struct sk_buff *userdata)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ pr_debug("%s(), reason=%d\n", __func__ , reason);
+
+ self = instance;
+ tsap = sap;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+ IRDA_ASSERT(tsap != NULL, return;);
+ IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
+
+ IRDA_ASSERT(tsap == self->tsap_data, return;);
+
+ pr_debug("IrLAN, data channel disconnected by peer!\n");
+
+ /* Save reason so we know if we should try to reconnect or not */
+ self->disconnect_reason = reason;
+
+ switch (reason) {
+ case LM_USER_REQUEST: /* User request */
+ pr_debug("%s(), User requested\n", __func__);
+ break;
+ case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */
+ pr_debug("%s(), Unexpected IrLAP disconnect\n", __func__);
+ break;
+ case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */
+ pr_debug("%s(), IrLAP connect failed\n", __func__);
+ break;
+ case LM_LAP_RESET: /* IrLAP reset */
+ pr_debug("%s(), IrLAP reset\n", __func__);
+ break;
+ case LM_INIT_DISCONNECT:
+ pr_debug("%s(), IrLMP connect failed\n", __func__);
+ break;
+ default:
+ net_err_ratelimited("%s(), Unknown disconnect reason\n",
+ __func__);
+ break;
+ }
+
+ /* If you want to pass the skb to *both* state machines, you will
+ * need to skb_clone() it, so that you don't free it twice.
+ * As the state machines don't need it, git rid of it here...
+ * Jean II */
+ if (userdata)
+ dev_kfree_skb(userdata);
+
+ irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
+ irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
+
+ wake_up_interruptible(&self->open_wait);
+}
+
+void irlan_open_data_tsap(struct irlan_cb *self)
+{
+ struct tsap_cb *tsap;
+ notify_t notify;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if already open */
+ if (self->tsap_data)
+ return;
+
+ irda_notify_init(&notify);
+
+ notify.data_indication = irlan_eth_receive;
+ notify.udata_indication = irlan_eth_receive;
+ notify.connect_indication = irlan_connect_indication;
+ notify.connect_confirm = irlan_connect_confirm;
+ notify.flow_indication = irlan_eth_flow_indication;
+ notify.disconnect_indication = irlan_disconnect_indication;
+ notify.instance = self;
+ strlcpy(notify.name, "IrLAN data", sizeof(notify.name));
+
+ tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
+ if (!tsap) {
+ pr_debug("%s(), Got no tsap!\n", __func__);
+ return;
+ }
+ self->tsap_data = tsap;
+
+ /*
+ * This is the data TSAP selector which we will pass to the client
+ * when the client ask for it.
+ */
+ self->stsap_sel_data = self->tsap_data->stsap_sel;
+}
+
+void irlan_close_tsaps(struct irlan_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Disconnect and close all open TSAP connections */
+ if (self->tsap_data) {
+ irttp_disconnect_request(self->tsap_data, NULL, P_NORMAL);
+ irttp_close_tsap(self->tsap_data);
+ self->tsap_data = NULL;
+ }
+ if (self->client.tsap_ctrl) {
+ irttp_disconnect_request(self->client.tsap_ctrl, NULL,
+ P_NORMAL);
+ irttp_close_tsap(self->client.tsap_ctrl);
+ self->client.tsap_ctrl = NULL;
+ }
+ if (self->provider.tsap_ctrl) {
+ irttp_disconnect_request(self->provider.tsap_ctrl, NULL,
+ P_NORMAL);
+ irttp_close_tsap(self->provider.tsap_ctrl);
+ self->provider.tsap_ctrl = NULL;
+ }
+ self->disconnect_reason = LM_USER_REQUEST;
+}
+
+/*
+ * Function irlan_ias_register (self, tsap_sel)
+ *
+ * Register with LM-IAS
+ *
+ */
+void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel)
+{
+ struct ias_object *obj;
+ struct ias_value *new_value;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * Check if object has already been registered by a previous provider.
+ * If that is the case, we just change the value of the attribute
+ */
+ if (!irias_find_object("IrLAN")) {
+ obj = irias_new_object("IrLAN", IAS_IRLAN_ID);
+ irias_add_integer_attrib(obj, "IrDA:TinyTP:LsapSel", tsap_sel,
+ IAS_KERNEL_ATTR);
+ irias_insert_object(obj);
+ } else {
+ new_value = irias_new_integer_value(tsap_sel);
+ irias_object_change_attribute("IrLAN", "IrDA:TinyTP:LsapSel",
+ new_value);
+ }
+
+ /* Register PnP object only if not registered before */
+ if (!irias_find_object("PnP")) {
+ obj = irias_new_object("PnP", IAS_PNP_ID);
+#if 0
+ irias_add_string_attrib(obj, "Name", sysctl_devname,
+ IAS_KERNEL_ATTR);
+#else
+ irias_add_string_attrib(obj, "Name", "Linux", IAS_KERNEL_ATTR);
+#endif
+ irias_add_string_attrib(obj, "DeviceID", "HWP19F0",
+ IAS_KERNEL_ATTR);
+ irias_add_integer_attrib(obj, "CompCnt", 1, IAS_KERNEL_ATTR);
+ if (self->provider.access_type == ACCESS_PEER)
+ irias_add_string_attrib(obj, "Comp#01", "PNP8389",
+ IAS_KERNEL_ATTR);
+ else
+ irias_add_string_attrib(obj, "Comp#01", "PNP8294",
+ IAS_KERNEL_ATTR);
+
+ irias_add_string_attrib(obj, "Manufacturer",
+ "Linux-IrDA Project", IAS_KERNEL_ATTR);
+ irias_insert_object(obj);
+ }
+}
+
+/*
+ * Function irlan_run_ctrl_tx_queue (self)
+ *
+ * Try to send the next command in the control transmit queue
+ *
+ */
+int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+
+ if (irda_lock(&self->client.tx_busy) == FALSE)
+ return -EBUSY;
+
+ skb = skb_dequeue(&self->client.txq);
+ if (!skb) {
+ self->client.tx_busy = FALSE;
+ return 0;
+ }
+
+ /* Check that it's really possible to send commands */
+ if ((self->client.tsap_ctrl == NULL) ||
+ (self->client.state == IRLAN_IDLE))
+ {
+ self->client.tx_busy = FALSE;
+ dev_kfree_skb(skb);
+ return -1;
+ }
+ pr_debug("%s(), sending ...\n", __func__);
+
+ return irttp_data_request(self->client.tsap_ctrl, skb);
+}
+
+/*
+ * Function irlan_ctrl_data_request (self, skb)
+ *
+ * This function makes sure that commands on the control channel is being
+ * sent in a command/response fashion
+ */
+static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb)
+{
+ /* Queue command */
+ skb_queue_tail(&self->client.txq, skb);
+
+ /* Try to send command */
+ irlan_run_ctrl_tx_queue(self);
+}
+
+/*
+ * Function irlan_get_provider_info (self)
+ *
+ * Send Get Provider Information command to peer IrLAN layer
+ *
+ */
+void irlan_get_provider_info(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER,
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ frame[0] = CMD_GET_PROVIDER_INFO;
+ frame[1] = 0x00; /* Zero parameters */
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function irlan_open_data_channel (self)
+ *
+ * Send an Open Data Command to provider
+ *
+ */
+void irlan_open_data_channel(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3") +
+ IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "DIRECT"),
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ /* Build frame */
+ frame[0] = CMD_OPEN_DATA_CHANNEL;
+ frame[1] = 0x02; /* Two parameters */
+
+ irlan_insert_string_param(skb, "MEDIA", "802.3");
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
+ /* irlan_insert_string_param(skb, "MODE", "UNRELIABLE"); */
+
+/* self->use_udata = TRUE; */
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+void irlan_close_data_channel(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if the TSAP is still there */
+ if (self->client.tsap_ctrl == NULL)
+ return;
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN"),
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ /* Build frame */
+ frame[0] = CMD_CLOSE_DATA_CHAN;
+ frame[1] = 0x01; /* One parameter */
+
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function irlan_open_unicast_addr (self)
+ *
+ * Make IrLAN provider accept ethernet frames addressed to the unicast
+ * address.
+ *
+ */
+static void irlan_open_unicast_addr(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_MODE", "FILTER"),
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ frame[0] = CMD_FILTER_OPERATION;
+ frame[1] = 0x03; /* Three parameters */
+ irlan_insert_byte_param(skb, "DATA_CHAN" , self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
+ irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function irlan_set_broadcast_filter (self, status)
+ *
+ * Make IrLAN provider accept ethernet frames addressed to the broadcast
+ * address. Be careful with the use of this one, since there may be a lot
+ * of broadcast traffic out there. We can still function without this
+ * one but then _we_ have to initiate all communication with other
+ * hosts, since ARP request for this host will not be answered.
+ */
+void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
+ /* We may waste one byte here...*/
+ IRLAN_STRING_PARAMETER_LEN("FILTER_MODE", "FILTER"),
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ frame[0] = CMD_FILTER_OPERATION;
+ frame[1] = 0x03; /* Three parameters */
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
+ if (status)
+ irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
+ else
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function irlan_set_multicast_filter (self, status)
+ *
+ * Make IrLAN provider accept ethernet frames addressed to the multicast
+ * address.
+ *
+ */
+void irlan_set_multicast_filter(struct irlan_cb *self, int status)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
+ /* We may waste one byte here...*/
+ IRLAN_STRING_PARAMETER_LEN("FILTER_MODE", "NONE"),
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ frame[0] = CMD_FILTER_OPERATION;
+ frame[1] = 0x03; /* Three parameters */
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
+ if (status)
+ irlan_insert_string_param(skb, "FILTER_MODE", "ALL");
+ else
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function irlan_get_unicast_addr (self)
+ *
+ * Retrieves the unicast address from the IrLAN provider. This address
+ * will be inserted into the devices structure, so the ethernet layer
+ * can construct its packets.
+ *
+ */
+static void irlan_get_unicast_addr(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_OPERATION",
+ "DYNAMIC"),
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ frame[0] = CMD_FILTER_OPERATION;
+ frame[1] = 0x03; /* Three parameters */
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
+ irlan_insert_string_param(skb, "FILTER_OPERATION", "DYNAMIC");
+
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function irlan_get_media_char (self)
+ *
+ *
+ *
+ */
+void irlan_get_media_char(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3"),
+ GFP_ATOMIC);
+
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->client.max_header_size);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ /* Build frame */
+ frame[0] = CMD_GET_MEDIA_CHAR;
+ frame[1] = 0x01; /* One parameter */
+
+ irlan_insert_string_param(skb, "MEDIA", "802.3");
+ irlan_ctrl_data_request(self, skb);
+}
+
+/*
+ * Function insert_byte_param (skb, param, value)
+ *
+ * Insert byte parameter into frame
+ *
+ */
+int irlan_insert_byte_param(struct sk_buff *skb, char *param, __u8 value)
+{
+ return __irlan_insert_param(skb, param, IRLAN_BYTE, value, 0, NULL, 0);
+}
+
+int irlan_insert_short_param(struct sk_buff *skb, char *param, __u16 value)
+{
+ return __irlan_insert_param(skb, param, IRLAN_SHORT, 0, value, NULL, 0);
+}
+
+/*
+ * Function insert_string (skb, param, value)
+ *
+ * Insert string parameter into frame
+ *
+ */
+int irlan_insert_string_param(struct sk_buff *skb, char *param, char *string)
+{
+ int string_len = strlen(string);
+
+ return __irlan_insert_param(skb, param, IRLAN_ARRAY, 0, 0, string,
+ string_len);
+}
+
+/*
+ * Function insert_array_param(skb, param, value, len_value)
+ *
+ * Insert array parameter into frame
+ *
+ */
+int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *array,
+ __u16 array_len)
+{
+ return __irlan_insert_param(skb, name, IRLAN_ARRAY, 0, 0, array,
+ array_len);
+}
+
+/*
+ * Function insert_param (skb, param, value, byte)
+ *
+ * Insert parameter at end of buffer, structure of a parameter is:
+ *
+ * -----------------------------------------------------------------------
+ * | Name Length[1] | Param Name[1..255] | Val Length[2] | Value[0..1016]|
+ * -----------------------------------------------------------------------
+ */
+static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
+ __u8 value_byte, __u16 value_short,
+ __u8 *value_array, __u16 value_len)
+{
+ __u8 *frame;
+ __u8 param_len;
+ __le16 tmp_le; /* Temporary value in little endian format */
+ int n=0;
+
+ if (skb == NULL) {
+ pr_debug("%s(), Got NULL skb\n", __func__);
+ return 0;
+ }
+
+ param_len = strlen(param);
+ switch (type) {
+ case IRLAN_BYTE:
+ value_len = 1;
+ break;
+ case IRLAN_SHORT:
+ value_len = 2;
+ break;
+ case IRLAN_ARRAY:
+ IRDA_ASSERT(value_array != NULL, return 0;);
+ IRDA_ASSERT(value_len > 0, return 0;);
+ break;
+ default:
+ pr_debug("%s(), Unknown parameter type!\n", __func__);
+ return 0;
+ }
+
+ /* Insert at end of sk-buffer */
+ frame = skb_tail_pointer(skb);
+
+ /* Make space for data */
+ if (skb_tailroom(skb) < (param_len+value_len+3)) {
+ pr_debug("%s(), No more space at end of skb\n", __func__);
+ return 0;
+ }
+ skb_put(skb, param_len+value_len+3);
+
+ /* Insert parameter length */
+ frame[n++] = param_len;
+
+ /* Insert parameter */
+ memcpy(frame+n, param, param_len); n += param_len;
+
+ /* Insert value length (2 byte little endian format, LSB first) */
+ tmp_le = cpu_to_le16(value_len);
+ memcpy(frame+n, &tmp_le, 2); n += 2; /* To avoid alignment problems */
+
+ /* Insert value */
+ switch (type) {
+ case IRLAN_BYTE:
+ frame[n++] = value_byte;
+ break;
+ case IRLAN_SHORT:
+ tmp_le = cpu_to_le16(value_short);
+ memcpy(frame+n, &tmp_le, 2); n += 2;
+ break;
+ case IRLAN_ARRAY:
+ memcpy(frame+n, value_array, value_len); n+=value_len;
+ break;
+ default:
+ break;
+ }
+ IRDA_ASSERT(n == (param_len+value_len+3), return 0;);
+
+ return param_len+value_len+3;
+}
+
+/*
+ * Function irlan_extract_param (buf, name, value, len)
+ *
+ * Extracts a single parameter name/value pair from buffer and updates
+ * the buffer pointer to point to the next name/value pair.
+ */
+int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
+{
+ __u8 name_len;
+ __u16 val_len;
+ int n=0;
+
+ /* get length of parameter name (1 byte) */
+ name_len = buf[n++];
+
+ if (name_len > 254) {
+ pr_debug("%s(), name_len > 254\n", __func__);
+ return -RSP_INVALID_COMMAND_FORMAT;
+ }
+
+ /* get parameter name */
+ memcpy(name, buf+n, name_len);
+ name[name_len] = '\0';
+ n+=name_len;
+
+ /*
+ * Get length of parameter value (2 bytes in little endian
+ * format)
+ */
+ memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
+ le16_to_cpus(&val_len); n+=2;
+
+ if (val_len >= 1016) {
+ pr_debug("%s(), parameter length to long\n", __func__);
+ return -RSP_INVALID_COMMAND_FORMAT;
+ }
+ *len = val_len;
+
+ /* get parameter value */
+ memcpy(value, buf+n, val_len);
+ value[val_len] = '\0';
+ n+=val_len;
+
+ pr_debug("Parameter: %s ", name);
+ pr_debug("Value: %s\n", value);
+
+ return n;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Start of reading /proc entries.
+ * Return entry at pos,
+ * or start_token to indicate print header line
+ * or NULL if end of file
+ */
+static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ rcu_read_lock();
+ return seq_list_start_head(&irlans, *pos);
+}
+
+/* Return entry after v, and increment pos */
+static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &irlans, pos);
+}
+
+/* End of reading /proc file */
+static void irlan_seq_stop(struct seq_file *seq, void *v)
+{
+ rcu_read_unlock();
+}
+
+
+/*
+ * Show one entry in /proc file.
+ */
+static int irlan_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == &irlans)
+ seq_puts(seq, "IrLAN instances:\n");
+ else {
+ struct irlan_cb *self = list_entry(v, struct irlan_cb, dev_list);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ seq_printf(seq,"ifname: %s,\n",
+ self->dev->name);
+ seq_printf(seq,"client state: %s, ",
+ irlan_state[ self->client.state]);
+ seq_printf(seq,"provider state: %s,\n",
+ irlan_state[ self->provider.state]);
+ seq_printf(seq,"saddr: %#08x, ",
+ self->saddr);
+ seq_printf(seq,"daddr: %#08x\n",
+ self->daddr);
+ seq_printf(seq,"version: %d.%d,\n",
+ self->version[1], self->version[0]);
+ seq_printf(seq,"access type: %s\n",
+ irlan_access[self->client.access_type]);
+ seq_printf(seq,"media: %s\n",
+ irlan_media[self->media]);
+
+ seq_printf(seq,"local filter:\n");
+ seq_printf(seq,"remote filter: ");
+ irlan_print_filter(seq, self->client.filter_type);
+ seq_printf(seq,"tx busy: %s\n",
+ netif_queue_stopped(self->dev) ? "TRUE" : "FALSE");
+
+ seq_putc(seq,'\n');
+ }
+ return 0;
+}
+
+static const struct seq_operations irlan_seq_ops = {
+ .start = irlan_seq_start,
+ .next = irlan_seq_next,
+ .stop = irlan_seq_stop,
+ .show = irlan_seq_show,
+};
+
+static int irlan_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &irlan_seq_ops);
+}
+#endif
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("The Linux IrDA LAN protocol");
+MODULE_LICENSE("GPL");
+
+module_param(eth, bool, 0);
+MODULE_PARM_DESC(eth, "Name devices ethX (0) or irlanX (1)");
+module_param(access, int, 0);
+MODULE_PARM_DESC(access, "Access type DIRECT=1, PEER=2, HOSTED=3");
+
+module_init(irlan_init);
+module_exit(irlan_cleanup);
+
diff --git a/drivers/staging/irda/net/irlan/irlan_eth.c b/drivers/staging/irda/net/irlan/irlan_eth.c
new file mode 100644
index 000000000000..3be852808a9d
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_eth.c
@@ -0,0 +1,340 @@
+/*********************************************************************
+ *
+ * Filename: irlan_eth.c
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Thu Oct 15 08:37:58 1998
+ * Modified at: Tue Mar 21 09:06:41 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <net/arp.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_client.h>
+#include <net/irda/irlan_event.h>
+#include <net/irda/irlan_eth.h>
+
+static int irlan_eth_open(struct net_device *dev);
+static int irlan_eth_close(struct net_device *dev);
+static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void irlan_eth_set_multicast_list(struct net_device *dev);
+
+static const struct net_device_ops irlan_eth_netdev_ops = {
+ .ndo_open = irlan_eth_open,
+ .ndo_stop = irlan_eth_close,
+ .ndo_start_xmit = irlan_eth_xmit,
+ .ndo_set_rx_mode = irlan_eth_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * Function irlan_eth_setup (dev)
+ *
+ * The network device initialization function.
+ *
+ */
+static void irlan_eth_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &irlan_eth_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+
+ /*
+ * Lets do all queueing in IrTTP instead of this device driver.
+ * Queueing here as well can introduce some strange latency
+ * problems, which we will avoid by setting the queue size to 0.
+ */
+ /*
+ * The bugs in IrTTP and IrLAN that created this latency issue
+ * have now been fixed, and we can propagate flow control properly
+ * to the network layer. However, this requires a minimal queue of
+ * packets for the device.
+ * Without flow control, the Tx Queue is 14 (ttp) + 0 (dev) = 14
+ * With flow control, the Tx Queue is 7 (ttp) + 4 (dev) = 11
+ * See irlan_eth_flow_indication()...
+ * Note : this number was randomly selected and would need to
+ * be adjusted.
+ * Jean II */
+ dev->tx_queue_len = 4;
+}
+
+/*
+ * Function alloc_irlandev
+ *
+ * Allocate network device and control block
+ *
+ */
+struct net_device *alloc_irlandev(const char *name)
+{
+ return alloc_netdev(sizeof(struct irlan_cb), name, NET_NAME_UNKNOWN,
+ irlan_eth_setup);
+}
+
+/*
+ * Function irlan_eth_open (dev)
+ *
+ * Network device has been opened by user
+ *
+ */
+static int irlan_eth_open(struct net_device *dev)
+{
+ struct irlan_cb *self = netdev_priv(dev);
+
+ /* Ready to play! */
+ netif_stop_queue(dev); /* Wait until data link is ready */
+
+ /* We are now open, so time to do some work */
+ self->disconnect_reason = 0;
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+
+ /* Make sure we have a hardware address before we return,
+ so DHCP clients gets happy */
+ return wait_event_interruptible(self->open_wait,
+ !self->tsap_data->connected);
+}
+
+/*
+ * Function irlan_eth_close (dev)
+ *
+ * Stop the ether network device, his function will usually be called by
+ * ifconfig down. We should now disconnect the link, We start the
+ * close timer, so that the instance will be removed if we are unable
+ * to discover the remote device after the disconnect.
+ */
+static int irlan_eth_close(struct net_device *dev)
+{
+ struct irlan_cb *self = netdev_priv(dev);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ irlan_close_data_channel(self);
+ irlan_close_tsaps(self);
+
+ irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
+ irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
+
+ /* Remove frames queued on the control channel */
+ skb_queue_purge(&self->client.txq);
+
+ self->client.tx_busy = 0;
+
+ return 0;
+}
+
+/*
+ * Function irlan_eth_tx (skb)
+ *
+ * Transmits ethernet frames over IrDA link.
+ *
+ */
+static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct irlan_cb *self = netdev_priv(dev);
+ int ret;
+ unsigned int len;
+
+ /* skb headroom large enough to contain all IrDA-headers? */
+ if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
+ struct sk_buff *new_skb =
+ skb_realloc_headroom(skb, self->max_header_size);
+
+ /* We have to free the original skb anyway */
+ dev_kfree_skb(skb);
+
+ /* Did the realloc succeed? */
+ if (new_skb == NULL)
+ return NETDEV_TX_OK;
+
+ /* Use the new skb instead */
+ skb = new_skb;
+ }
+
+ netif_trans_update(dev);
+
+ len = skb->len;
+ /* Now queue the packet in the transport layer */
+ if (self->use_udata)
+ ret = irttp_udata_request(self->tsap_data, skb);
+ else
+ ret = irttp_data_request(self->tsap_data, skb);
+
+ if (ret < 0) {
+ /*
+ * IrTTPs tx queue is full, so we just have to
+ * drop the frame! You might think that we should
+ * just return -1 and don't deallocate the frame,
+ * but that is dangerous since it's possible that
+ * we have replaced the original skb with a new
+ * one with larger headroom, and that would really
+ * confuse do_dev_queue_xmit() in dev.c! I have
+ * tried :-) DB
+ */
+ /* irttp_data_request already free the packet */
+ dev->stats.tx_dropped++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function irlan_eth_receive (handle, skb)
+ *
+ * This function gets the data that is received on the data channel
+ *
+ */
+int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
+{
+ struct irlan_cb *self = instance;
+ struct net_device *dev = self->dev;
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ return 0;
+ }
+ if (skb->len < ETH_HLEN) {
+ pr_debug("%s() : IrLAN frame too short (%d)\n",
+ __func__, skb->len);
+ dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ /*
+ * Adopt this frame! Important to set all these fields since they
+ * might have been previously set by the low level IrDA network
+ * device driver
+ */
+ skb->protocol = eth_type_trans(skb, dev); /* Remove eth header */
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+
+ netif_rx(skb); /* Eat it! */
+
+ return 0;
+}
+
+/*
+ * Function irlan_eth_flow (status)
+ *
+ * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by
+ * controlling the queue stop/start.
+ *
+ * The IrDA link layer has the advantage to have flow control, and
+ * IrTTP now properly handles that. Flow controlling the higher layers
+ * prevent us to drop Tx packets in here (up to 15% for a TCP socket,
+ * more for UDP socket).
+ * Also, this allow us to reduce the overall transmit queue, which means
+ * less latency in case of mixed traffic.
+ * Jean II
+ */
+void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
+{
+ struct irlan_cb *self;
+ struct net_device *dev;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ dev = self->dev;
+
+ IRDA_ASSERT(dev != NULL, return;);
+
+ pr_debug("%s() : flow %s ; running %d\n", __func__,
+ flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
+ netif_running(dev));
+
+ switch (flow) {
+ case FLOW_STOP:
+ /* IrTTP is full, stop higher layers */
+ netif_stop_queue(dev);
+ break;
+ case FLOW_START:
+ default:
+ /* Tell upper layers that its time to transmit frames again */
+ /* Schedule network layer */
+ netif_wake_queue(dev);
+ break;
+ }
+}
+
+/*
+ * Function set_multicast_list (dev)
+ *
+ * Configure the filtering of the device
+ *
+ */
+#define HW_MAX_ADDRS 4 /* Must query to get it! */
+static void irlan_eth_set_multicast_list(struct net_device *dev)
+{
+ struct irlan_cb *self = netdev_priv(dev);
+
+ /* Check if data channel has been connected yet */
+ if (self->client.state != IRLAN_DATA) {
+ pr_debug("%s(), delaying!\n", __func__);
+ return;
+ }
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ net_warn_ratelimited("Promiscuous mode not implemented by IrLAN!\n");
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS) {
+ /* Disable promiscuous mode, use normal mode. */
+ pr_debug("%s(), Setting multicast filter\n", __func__);
+ /* hardware_set_filter(NULL); */
+
+ irlan_set_multicast_filter(self, TRUE);
+ } else if (!netdev_mc_empty(dev)) {
+ pr_debug("%s(), Setting multicast filter\n", __func__);
+ /* Walk the address list, and load the filter */
+ /* hardware_set_filter(dev->mc_list); */
+
+ irlan_set_multicast_filter(self, TRUE);
+ } else {
+ pr_debug("%s(), Clearing multicast filter\n", __func__);
+ irlan_set_multicast_filter(self, FALSE);
+ }
+
+ if (dev->flags & IFF_BROADCAST)
+ irlan_set_broadcast_filter(self, TRUE);
+ else
+ irlan_set_broadcast_filter(self, FALSE);
+}
diff --git a/drivers/staging/irda/net/irlan/irlan_event.c b/drivers/staging/irda/net/irlan/irlan_event.c
new file mode 100644
index 000000000000..9a1cc11c16f6
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_event.c
@@ -0,0 +1,60 @@
+/*********************************************************************
+ *
+ * Filename: irlan_event.c
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Oct 20 09:10:16 1998
+ * Modified at: Sat Oct 30 12:59:01 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <net/irda/irlan_event.h>
+
+const char * const irlan_state[] = {
+ "IRLAN_IDLE",
+ "IRLAN_QUERY",
+ "IRLAN_CONN",
+ "IRLAN_INFO",
+ "IRLAN_MEDIA",
+ "IRLAN_OPEN",
+ "IRLAN_WAIT",
+ "IRLAN_ARB",
+ "IRLAN_DATA",
+ "IRLAN_CLOSE",
+ "IRLAN_SYNC",
+};
+
+void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
+{
+ pr_debug("%s(), %s\n", __func__ , irlan_state[state]);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ self->client.state = state;
+}
+
+void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state)
+{
+ pr_debug("%s(), %s\n", __func__ , irlan_state[state]);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ self->provider.state = state;
+}
+
diff --git a/drivers/staging/irda/net/irlan/irlan_filter.c b/drivers/staging/irda/net/irlan/irlan_filter.c
new file mode 100644
index 000000000000..e755e90b2f26
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_filter.c
@@ -0,0 +1,240 @@
+/*********************************************************************
+ *
+ * Filename: irlan_filter.c
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Jan 29 11:16:38 1999
+ * Modified at: Sat Oct 30 12:58:45 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_filter.h>
+
+/*
+ * Function irlan_filter_request (self, skb)
+ *
+ * Handle filter request from client peer device
+ *
+ */
+void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ if ((self->provider.filter_type == IRLAN_DIRECTED) &&
+ (self->provider.filter_operation == DYNAMIC))
+ {
+ pr_debug("Giving peer a dynamic Ethernet address\n");
+ self->provider.mac_address[0] = 0x40;
+ self->provider.mac_address[1] = 0x00;
+ self->provider.mac_address[2] = 0x00;
+ self->provider.mac_address[3] = 0x00;
+
+ /* Use arbitration value to generate MAC address */
+ if (self->provider.access_type == ACCESS_PEER) {
+ self->provider.mac_address[4] =
+ self->provider.send_arb_val & 0xff;
+ self->provider.mac_address[5] =
+ (self->provider.send_arb_val >> 8) & 0xff;
+ } else {
+ /* Just generate something for now */
+ get_random_bytes(self->provider.mac_address+4, 1);
+ get_random_bytes(self->provider.mac_address+5, 1);
+ }
+
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x03;
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
+ irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001);
+ irlan_insert_array_param(skb, "FILTER_ENTRY",
+ self->provider.mac_address, 6);
+ return;
+ }
+
+ if ((self->provider.filter_type == IRLAN_DIRECTED) &&
+ (self->provider.filter_mode == FILTER))
+ {
+ pr_debug("Directed filter on\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_DIRECTED) &&
+ (self->provider.filter_mode == NONE))
+ {
+ pr_debug("Directed filter off\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+
+ if ((self->provider.filter_type == IRLAN_BROADCAST) &&
+ (self->provider.filter_mode == FILTER))
+ {
+ pr_debug("Broadcast filter on\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_BROADCAST) &&
+ (self->provider.filter_mode == NONE))
+ {
+ pr_debug("Broadcast filter off\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_MULTICAST) &&
+ (self->provider.filter_mode == FILTER))
+ {
+ pr_debug("Multicast filter on\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_MULTICAST) &&
+ (self->provider.filter_mode == NONE))
+ {
+ pr_debug("Multicast filter off\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_MULTICAST) &&
+ (self->provider.filter_operation == GET))
+ {
+ pr_debug("Multicast filter get\n");
+ skb->data[0] = 0x00; /* Success? */
+ skb->data[1] = 0x02;
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
+ irlan_insert_short_param(skb, "MAX_ENTRY", 16);
+ return;
+ }
+ skb->data[0] = 0x00; /* Command not supported */
+ skb->data[1] = 0x00;
+
+ pr_debug("Not implemented!\n");
+}
+
+/*
+ * Function check_request_param (self, param, value)
+ *
+ * Check parameters in request from peer device
+ *
+ */
+void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ pr_debug("%s, %s\n", param, value);
+
+ /*
+ * This is experimental!! DB.
+ */
+ if (strcmp(param, "MODE") == 0) {
+ self->use_udata = TRUE;
+ return;
+ }
+
+ /*
+ * FILTER_TYPE
+ */
+ if (strcmp(param, "FILTER_TYPE") == 0) {
+ if (strcmp(value, "DIRECTED") == 0) {
+ self->provider.filter_type = IRLAN_DIRECTED;
+ return;
+ }
+ if (strcmp(value, "MULTICAST") == 0) {
+ self->provider.filter_type = IRLAN_MULTICAST;
+ return;
+ }
+ if (strcmp(value, "BROADCAST") == 0) {
+ self->provider.filter_type = IRLAN_BROADCAST;
+ return;
+ }
+ }
+ /*
+ * FILTER_MODE
+ */
+ if (strcmp(param, "FILTER_MODE") == 0) {
+ if (strcmp(value, "ALL") == 0) {
+ self->provider.filter_mode = ALL;
+ return;
+ }
+ if (strcmp(value, "FILTER") == 0) {
+ self->provider.filter_mode = FILTER;
+ return;
+ }
+ if (strcmp(value, "NONE") == 0) {
+ self->provider.filter_mode = FILTER;
+ return;
+ }
+ }
+ /*
+ * FILTER_OPERATION
+ */
+ if (strcmp(param, "FILTER_OPERATION") == 0) {
+ if (strcmp(value, "DYNAMIC") == 0) {
+ self->provider.filter_operation = DYNAMIC;
+ return;
+ }
+ if (strcmp(value, "GET") == 0) {
+ self->provider.filter_operation = GET;
+ return;
+ }
+ }
+}
+
+/*
+ * Function irlan_print_filter (filter_type, buf)
+ *
+ * Print status of filter. Used by /proc file system
+ *
+ */
+#ifdef CONFIG_PROC_FS
+#define MASK2STR(m,s) { .mask = m, .str = s }
+
+void irlan_print_filter(struct seq_file *seq, int filter_type)
+{
+ static struct {
+ int mask;
+ const char *str;
+ } filter_mask2str[] = {
+ MASK2STR(IRLAN_DIRECTED, "DIRECTED"),
+ MASK2STR(IRLAN_FUNCTIONAL, "FUNCTIONAL"),
+ MASK2STR(IRLAN_GROUP, "GROUP"),
+ MASK2STR(IRLAN_MAC_FRAME, "MAC_FRAME"),
+ MASK2STR(IRLAN_MULTICAST, "MULTICAST"),
+ MASK2STR(IRLAN_BROADCAST, "BROADCAST"),
+ MASK2STR(IRLAN_IPX_SOCKET, "IPX_SOCKET"),
+ MASK2STR(0, NULL)
+ }, *p;
+
+ for (p = filter_mask2str; p->str; p++) {
+ if (filter_type & p->mask)
+ seq_printf(seq, "%s ", p->str);
+ }
+ seq_putc(seq, '\n');
+}
+#undef MASK2STR
+#endif
diff --git a/drivers/staging/irda/net/irlan/irlan_provider.c b/drivers/staging/irda/net/irlan/irlan_provider.c
new file mode 100644
index 000000000000..15c292cf2644
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_provider.c
@@ -0,0 +1,408 @@
+/*********************************************************************
+ *
+ * Filename: irlan_provider.c
+ * Version: 0.9
+ * Description: IrDA LAN Access Protocol Implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sat Oct 30 12:52:10 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/iriap.h>
+#include <net/irda/timer.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_eth.h>
+#include <net/irda/irlan_event.h>
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_filter.h>
+#include <net/irda/irlan_client.h>
+
+static void irlan_provider_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+
+/*
+ * Function irlan_provider_control_data_indication (handle, skb)
+ *
+ * This function gets the data that is received on the control channel
+ *
+ */
+static int irlan_provider_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+ __u8 code;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ code = skb->data[0];
+ switch(code) {
+ case CMD_GET_PROVIDER_INFO:
+ pr_debug("Got GET_PROVIDER_INFO command!\n");
+ irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb);
+ break;
+
+ case CMD_GET_MEDIA_CHAR:
+ pr_debug("Got GET_MEDIA_CHAR command!\n");
+ irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb);
+ break;
+ case CMD_OPEN_DATA_CHANNEL:
+ pr_debug("Got OPEN_DATA_CHANNEL command!\n");
+ irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb);
+ break;
+ case CMD_FILTER_OPERATION:
+ pr_debug("Got FILTER_OPERATION command!\n");
+ irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb);
+ break;
+ case CMD_RECONNECT_DATA_CHAN:
+ pr_debug("%s(), Got RECONNECT_DATA_CHAN command\n", __func__);
+ pr_debug("%s(), NOT IMPLEMENTED\n", __func__);
+ break;
+ case CMD_CLOSE_DATA_CHAN:
+ pr_debug("Got CLOSE_DATA_CHAN command!\n");
+ pr_debug("%s(), NOT IMPLEMENTED\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown command!\n", __func__);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Function irlan_provider_connect_indication (handle, skb, priv)
+ *
+ * Got connection from peer IrLAN client
+ *
+ */
+static void irlan_provider_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ self = instance;
+ tsap = sap;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;);
+ IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;);
+
+ self->provider.max_sdu_size = max_sdu_size;
+ self->provider.max_header_size = max_header_size;
+
+ irlan_do_provider_event(self, IRLAN_CONNECT_INDICATION, NULL);
+
+ /*
+ * If we are in peer mode, the client may not have got the discovery
+ * indication it needs to make progress. If the client is still in
+ * IDLE state, we must kick it.
+ */
+ if ((self->provider.access_type == ACCESS_PEER) &&
+ (self->client.state == IRLAN_IDLE))
+ {
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+ }
+}
+
+/*
+ * Function irlan_provider_connect_response (handle)
+ *
+ * Accept incoming connection
+ *
+ */
+void irlan_provider_connect_response(struct irlan_cb *self,
+ struct tsap_cb *tsap)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Just accept */
+ irttp_connect_response(tsap, IRLAN_MTU, NULL);
+}
+
+static void irlan_provider_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *userdata)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ pr_debug("%s(), reason=%d\n", __func__ , reason);
+
+ self = instance;
+ tsap = sap;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+ IRDA_ASSERT(tsap != NULL, return;);
+ IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
+
+ IRDA_ASSERT(tsap == self->provider.tsap_ctrl, return;);
+
+ irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
+}
+
+/*
+ * Function irlan_parse_open_data_cmd (self, skb)
+ *
+ *
+ *
+ */
+int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb);
+
+ /* Open data channel */
+ irlan_open_data_tsap(self);
+
+ return ret;
+}
+
+/*
+ * Function parse_command (skb)
+ *
+ * Extract all parameters from received buffer, then feed them to
+ * check_params for parsing
+ *
+ */
+int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
+ struct sk_buff *skb)
+{
+ __u8 *frame;
+ __u8 *ptr;
+ int count;
+ __u16 val_len;
+ int i;
+ char *name;
+ char *value;
+ int ret = RSP_SUCCESS;
+
+ IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
+
+ pr_debug("%s(), skb->len=%d\n", __func__ , (int)skb->len);
+
+ IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
+
+ if (!skb)
+ return -RSP_PROTOCOL_ERROR;
+
+ frame = skb->data;
+
+ name = kmalloc(255, GFP_ATOMIC);
+ if (!name)
+ return -RSP_INSUFFICIENT_RESOURCES;
+ value = kmalloc(1016, GFP_ATOMIC);
+ if (!value) {
+ kfree(name);
+ return -RSP_INSUFFICIENT_RESOURCES;
+ }
+
+ /* How many parameters? */
+ count = frame[1];
+
+ pr_debug("Got %d parameters\n", count);
+
+ ptr = frame+2;
+
+ /* For all parameters */
+ for (i=0; i<count;i++) {
+ ret = irlan_extract_param(ptr, name, value, &val_len);
+ if (ret < 0) {
+ pr_debug("%s(), IrLAN, Error!\n", __func__);
+ break;
+ }
+ ptr+=ret;
+ ret = RSP_SUCCESS;
+ irlan_check_command_param(self, name, value);
+ }
+ /* Cleanup */
+ kfree(name);
+ kfree(value);
+
+ return ret;
+}
+
+/*
+ * Function irlan_provider_send_reply (self, info)
+ *
+ * Send reply to query to peer IrLAN layer
+ *
+ */
+void irlan_provider_send_reply(struct irlan_cb *self, int command,
+ int ret_code)
+{
+ struct sk_buff *skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
+ /* Bigger param length comes from CMD_GET_MEDIA_CHAR */
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
+ IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
+ GFP_ATOMIC);
+
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, self->provider.max_header_size);
+ skb_put(skb, 2);
+
+ switch (command) {
+ case CMD_GET_PROVIDER_INFO:
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x02; /* 2 parameters */
+ switch (self->media) {
+ case MEDIA_802_3:
+ irlan_insert_string_param(skb, "MEDIA", "802.3");
+ break;
+ case MEDIA_802_5:
+ irlan_insert_string_param(skb, "MEDIA", "802.5");
+ break;
+ default:
+ pr_debug("%s(), unknown media type!\n", __func__);
+ break;
+ }
+ irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
+ break;
+
+ case CMD_GET_MEDIA_CHAR:
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x05; /* 5 parameters */
+ irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
+ irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
+ irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
+
+ switch (self->provider.access_type) {
+ case ACCESS_DIRECT:
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
+ break;
+ case ACCESS_PEER:
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "PEER");
+ break;
+ case ACCESS_HOSTED:
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED");
+ break;
+ default:
+ pr_debug("%s(), Unknown access type\n", __func__);
+ break;
+ }
+ irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee);
+ break;
+ case CMD_OPEN_DATA_CHANNEL:
+ skb->data[0] = 0x00; /* Success */
+ if (self->provider.send_arb_val) {
+ skb->data[1] = 0x03; /* 3 parameters */
+ irlan_insert_short_param(skb, "CON_ARB",
+ self->provider.send_arb_val);
+ } else
+ skb->data[1] = 0x02; /* 2 parameters */
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data);
+ irlan_insert_string_param(skb, "RECONNECT_KEY", "LINUX RULES!");
+ break;
+ case CMD_FILTER_OPERATION:
+ irlan_filter_request(self, skb);
+ break;
+ default:
+ pr_debug("%s(), Unknown command!\n", __func__);
+ break;
+ }
+
+ irttp_data_request(self->provider.tsap_ctrl, skb);
+}
+
+/*
+ * Function irlan_provider_register(void)
+ *
+ * Register provider support so we can accept incoming connections.
+ *
+ */
+int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
+{
+ struct tsap_cb *tsap;
+ notify_t notify;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ /* Check if already open */
+ if (self->provider.tsap_ctrl)
+ return -1;
+
+ /*
+ * First register well known control TSAP
+ */
+ irda_notify_init(&notify);
+ notify.data_indication = irlan_provider_data_indication;
+ notify.connect_indication = irlan_provider_connect_indication;
+ notify.disconnect_indication = irlan_provider_disconnect_indication;
+ notify.instance = self;
+ strlcpy(notify.name, "IrLAN ctrl (p)", sizeof(notify.name));
+
+ tsap = irttp_open_tsap(LSAP_ANY, 1, &notify);
+ if (!tsap) {
+ pr_debug("%s(), Got no tsap!\n", __func__);
+ return -1;
+ }
+ self->provider.tsap_ctrl = tsap;
+
+ /* Register with LM-IAS */
+ irlan_ias_register(self, tsap->stsap_sel);
+
+ return 0;
+}
+
diff --git a/drivers/staging/irda/net/irlan/irlan_provider_event.c b/drivers/staging/irda/net/irlan/irlan_provider_event.c
new file mode 100644
index 000000000000..9c4f7f51d6b5
--- /dev/null
+++ b/drivers/staging/irda/net/irlan/irlan_provider_event.c
@@ -0,0 +1,233 @@
+/*********************************************************************
+ *
+ * Filename: irlan_provider_event.c
+ * Version: 0.9
+ * Description: IrLAN provider state machine)
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Sat Oct 30 12:52:41 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <net/irda/irda.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irttp.h>
+
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_event.h>
+
+static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb) =
+{
+ irlan_provider_state_idle,
+ NULL, /* Query */
+ NULL, /* Info */
+ irlan_provider_state_info,
+ NULL, /* Media */
+ irlan_provider_state_open,
+ NULL, /* Wait */
+ NULL, /* Arb */
+ irlan_provider_state_data,
+ NULL, /* Close */
+ NULL, /* Sync */
+};
+
+void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(*state[ self->provider.state] != NULL, return;);
+
+ (*state[self->provider.state]) (self, event, skb);
+}
+
+/*
+ * Function irlan_provider_state_idle (event, skb, info)
+ *
+ * IDLE, We are waiting for an indication that there is a provider
+ * available.
+ */
+static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_CONNECT_INDICATION:
+ irlan_provider_connect_response( self, self->provider.tsap_ctrl);
+ irlan_next_provider_state( self, IRLAN_INFO);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_provider_state_info (self, event, skb, info)
+ *
+ * INFO, We have issued a GetInfo command and is awaiting a reply.
+ */
+static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_GET_INFO_CMD:
+ /* Be sure to use 802.3 in case of peer mode */
+ if (self->provider.access_type == ACCESS_PEER) {
+ self->media = MEDIA_802_3;
+
+ /* Check if client has started yet */
+ if (self->client.state == IRLAN_IDLE) {
+ /* This should get the client going */
+ irlmp_discovery_request(8);
+ }
+ }
+
+ irlan_provider_send_reply(self, CMD_GET_PROVIDER_INFO,
+ RSP_SUCCESS);
+ /* Keep state */
+ break;
+ case IRLAN_GET_MEDIA_CMD:
+ irlan_provider_send_reply(self, CMD_GET_MEDIA_CHAR,
+ RSP_SUCCESS);
+ /* Keep state */
+ break;
+ case IRLAN_OPEN_DATA_CMD:
+ ret = irlan_parse_open_data_cmd(self, skb);
+ if (self->provider.access_type == ACCESS_PEER) {
+ /* FIXME: make use of random functions! */
+ self->provider.send_arb_val = (jiffies & 0xffff);
+ }
+ irlan_provider_send_reply(self, CMD_OPEN_DATA_CHANNEL, ret);
+
+ if (ret == RSP_SUCCESS) {
+ irlan_next_provider_state(self, IRLAN_OPEN);
+
+ /* Signal client that we are now open */
+ irlan_do_client_event(self, IRLAN_PROVIDER_SIGNAL, NULL);
+ }
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_provider_state(self, IRLAN_IDLE);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_provider_state_open (self, event, skb, info)
+ *
+ * OPEN, The client has issued a OpenData command and is awaiting a
+ * reply
+ *
+ */
+static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_FILTER_CONFIG_CMD:
+ irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb);
+ irlan_provider_send_reply(self, CMD_FILTER_OPERATION,
+ RSP_SUCCESS);
+ /* Keep state */
+ break;
+ case IRLAN_DATA_CONNECT_INDICATION:
+ irlan_next_provider_state(self, IRLAN_DATA);
+ irlan_provider_connect_response(self, self->tsap_data);
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_provider_state(self, IRLAN_IDLE);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_provider_state_data (self, event, skb, info)
+ *
+ * DATA, The data channel is connected, allowing data transfers between
+ * the local and remote machines.
+ *
+ */
+static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_FILTER_CONFIG_CMD:
+ irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb);
+ irlan_provider_send_reply(self, CMD_FILTER_OPERATION,
+ RSP_SUCCESS);
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_provider_state(self, IRLAN_IDLE);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/drivers/staging/irda/net/irlap.c b/drivers/staging/irda/net/irlap.c
new file mode 100644
index 000000000000..1cde711bcab5
--- /dev/null
+++ b/drivers/staging/irda/net/irlap.c
@@ -0,0 +1,1207 @@
+/*********************************************************************
+ *
+ * Filename: irlap.c
+ * Version: 1.0
+ * Description: IrLAP implementation for Linux
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Tue Dec 14 09:26:44 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/irqueue.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irlmp_frame.h>
+#include <net/irda/irlap_frame.h>
+#include <net/irda/irlap.h>
+#include <net/irda/timer.h>
+#include <net/irda/qos.h>
+
+static hashbin_t *irlap = NULL;
+int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
+
+/* This is the delay of missed pf period before generating an event
+ * to the application. The spec mandate 3 seconds, but in some cases
+ * it's way too long. - Jean II */
+int sysctl_warn_noreply_time = 3;
+
+extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
+static void __irlap_close(struct irlap_cb *self);
+static void irlap_init_qos_capabilities(struct irlap_cb *self,
+ struct qos_info *qos_user);
+
+static const char *const lap_reasons[] __maybe_unused = {
+ "ERROR, NOT USED",
+ "LAP_DISC_INDICATION",
+ "LAP_NO_RESPONSE",
+ "LAP_RESET_INDICATION",
+ "LAP_FOUND_NONE",
+ "LAP_MEDIA_BUSY",
+ "LAP_PRIMARY_CONFLICT",
+ "ERROR, NOT USED",
+};
+
+int __init irlap_init(void)
+{
+ /* Check if the compiler did its job properly.
+ * May happen on some ARM configuration, check with Russell King. */
+ IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
+ IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
+ IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
+ IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
+
+ /* Allocate master array */
+ irlap = hashbin_new(HB_LOCK);
+ if (irlap == NULL) {
+ net_err_ratelimited("%s: can't allocate irlap hashbin!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void irlap_cleanup(void)
+{
+ IRDA_ASSERT(irlap != NULL, return;);
+
+ hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
+}
+
+/*
+ * Function irlap_open (driver)
+ *
+ * Initialize IrLAP layer
+ *
+ */
+struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
+ const char *hw_name)
+{
+ struct irlap_cb *self;
+
+ /* Initialize the irlap structure. */
+ self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
+ if (self == NULL)
+ return NULL;
+
+ self->magic = LAP_MAGIC;
+
+ /* Make a binding between the layers */
+ self->netdev = dev;
+ self->qos_dev = qos;
+ /* Copy hardware name */
+ if(hw_name != NULL) {
+ strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
+ } else {
+ self->hw_name[0] = '\0';
+ }
+
+ /* FIXME: should we get our own field? */
+ dev->atalk_ptr = self;
+
+ self->state = LAP_OFFLINE;
+
+ /* Initialize transmit queue */
+ skb_queue_head_init(&self->txq);
+ skb_queue_head_init(&self->txq_ultra);
+ skb_queue_head_init(&self->wx_list);
+
+ /* My unique IrLAP device address! */
+ /* We don't want the broadcast address, neither the NULL address
+ * (most often used to signify "invalid"), and we don't want an
+ * address already in use (otherwise connect won't be able
+ * to select the proper link). - Jean II */
+ do {
+ get_random_bytes(&self->saddr, sizeof(self->saddr));
+ } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
+ (hashbin_lock_find(irlap, self->saddr, NULL)) );
+ /* Copy to the driver */
+ memcpy(dev->dev_addr, &self->saddr, 4);
+
+ init_timer(&self->slot_timer);
+ init_timer(&self->query_timer);
+ init_timer(&self->discovery_timer);
+ init_timer(&self->final_timer);
+ init_timer(&self->poll_timer);
+ init_timer(&self->wd_timer);
+ init_timer(&self->backoff_timer);
+ init_timer(&self->media_busy_timer);
+
+ irlap_apply_default_connection_parameters(self);
+
+ self->N3 = 3; /* # connections attempts to try before giving up */
+
+ self->state = LAP_NDM;
+
+ hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
+
+ irlmp_register_link(self, self->saddr, &self->notify);
+
+ return self;
+}
+EXPORT_SYMBOL(irlap_open);
+
+/*
+ * Function __irlap_close (self)
+ *
+ * Remove IrLAP and all allocated memory. Stop any pending timers.
+ *
+ */
+static void __irlap_close(struct irlap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Stop timers */
+ del_timer(&self->slot_timer);
+ del_timer(&self->query_timer);
+ del_timer(&self->discovery_timer);
+ del_timer(&self->final_timer);
+ del_timer(&self->poll_timer);
+ del_timer(&self->wd_timer);
+ del_timer(&self->backoff_timer);
+ del_timer(&self->media_busy_timer);
+
+ irlap_flush_all_queues(self);
+
+ self->magic = 0;
+
+ kfree(self);
+}
+
+/*
+ * Function irlap_close (self)
+ *
+ * Remove IrLAP instance
+ *
+ */
+void irlap_close(struct irlap_cb *self)
+{
+ struct irlap_cb *lap;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* We used to send a LAP_DISC_INDICATION here, but this was
+ * racy. This has been move within irlmp_unregister_link()
+ * itself. Jean II */
+
+ /* Kill the LAP and all LSAPs on top of it */
+ irlmp_unregister_link(self->saddr);
+ self->notify.instance = NULL;
+
+ /* Be sure that we manage to remove ourself from the hash */
+ lap = hashbin_remove(irlap, self->saddr, NULL);
+ if (!lap) {
+ pr_debug("%s(), Didn't find myself!\n", __func__);
+ return;
+ }
+ __irlap_close(lap);
+}
+EXPORT_SYMBOL(irlap_close);
+
+/*
+ * Function irlap_connect_indication (self, skb)
+ *
+ * Another device is attempting to make a connection
+ *
+ */
+void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
+
+ irlmp_link_connect_indication(self->notify.instance, self->saddr,
+ self->daddr, &self->qos_tx, skb);
+}
+
+/*
+ * Function irlap_connect_response (self, skb)
+ *
+ * Service user has accepted incoming connection
+ *
+ */
+void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
+{
+ irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
+}
+
+/*
+ * Function irlap_connect_request (self, daddr, qos_user, sniff)
+ *
+ * Request connection with another device, sniffing is not implemented
+ * yet.
+ *
+ */
+void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
+ struct qos_info *qos_user, int sniff)
+{
+ pr_debug("%s(), daddr=0x%08x\n", __func__, daddr);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ self->daddr = daddr;
+
+ /*
+ * If the service user specifies QoS values for this connection,
+ * then use them
+ */
+ irlap_init_qos_capabilities(self, qos_user);
+
+ if ((self->state == LAP_NDM) && !self->media_busy)
+ irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
+ else
+ self->connect_pending = TRUE;
+}
+
+/*
+ * Function irlap_connect_confirm (self, skb)
+ *
+ * Connection request has been accepted
+ *
+ */
+void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
+}
+
+/*
+ * Function irlap_data_indication (self, skb)
+ *
+ * Received data frames from IR-port, so we just pass them up to
+ * IrLMP for further processing
+ *
+ */
+void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
+ int unreliable)
+{
+ /* Hide LAP header from IrLMP layer */
+ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+
+ irlmp_link_data_indication(self->notify.instance, skb, unreliable);
+}
+
+
+/*
+ * Function irlap_data_request (self, skb)
+ *
+ * Queue data for transmission, must wait until XMIT state
+ *
+ */
+void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
+ int unreliable)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
+ return;);
+ skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+
+ /*
+ * Must set frame format now so that the rest of the code knows
+ * if its dealing with an I or an UI frame
+ */
+ if (unreliable)
+ skb->data[1] = UI_FRAME;
+ else
+ skb->data[1] = I_FRAME;
+
+ /* Don't forget to refcount it - see irlmp_connect_request(). */
+ skb_get(skb);
+
+ /* Add at the end of the queue (keep ordering) - Jean II */
+ skb_queue_tail(&self->txq, skb);
+
+ /*
+ * Send event if this frame only if we are in the right state
+ * FIXME: udata should be sent first! (skb_queue_head?)
+ */
+ if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
+ /* If we are not already processing the Tx queue, trigger
+ * transmission immediately - Jean II */
+ if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
+ irlap_do_event(self, DATA_REQUEST, skb, NULL);
+ /* Otherwise, the packets will be sent normally at the
+ * next pf-poll - Jean II */
+ }
+}
+
+/*
+ * Function irlap_unitdata_request (self, skb)
+ *
+ * Send Ultra data. This is data that must be sent outside any connection
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
+ return;);
+ skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+
+ skb->data[0] = CBROADCAST;
+ skb->data[1] = UI_FRAME;
+
+ /* Don't need to refcount, see irlmp_connless_data_request() */
+
+ skb_queue_tail(&self->txq_ultra, skb);
+
+ irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
+}
+#endif /*CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irlap_udata_indication (self, skb)
+ *
+ * Receive Ultra data. This is data that is received outside any connection
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /* Hide LAP header from IrLMP layer */
+ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+
+ irlmp_link_unitdata_indication(self->notify.instance, skb);
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irlap_disconnect_request (void)
+ *
+ * Request to disconnect connection by service user
+ */
+void irlap_disconnect_request(struct irlap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Don't disconnect until all data frames are successfully sent */
+ if (!skb_queue_empty(&self->txq)) {
+ self->disconnect_pending = TRUE;
+ return;
+ }
+
+ /* Check if we are in the right state for disconnecting */
+ switch (self->state) {
+ case LAP_XMIT_P: /* FALLTHROUGH */
+ case LAP_XMIT_S: /* FALLTHROUGH */
+ case LAP_CONN: /* FALLTHROUGH */
+ case LAP_RESET_WAIT: /* FALLTHROUGH */
+ case LAP_RESET_CHECK:
+ irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
+ break;
+ default:
+ pr_debug("%s(), disconnect pending!\n", __func__);
+ self->disconnect_pending = TRUE;
+ break;
+ }
+}
+
+/*
+ * Function irlap_disconnect_indication (void)
+ *
+ * Disconnect request from other device
+ *
+ */
+void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
+{
+ pr_debug("%s(), reason=%s\n", __func__, lap_reasons[reason]);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Flush queues */
+ irlap_flush_all_queues(self);
+
+ switch (reason) {
+ case LAP_RESET_INDICATION:
+ pr_debug("%s(), Sending reset request!\n", __func__);
+ irlap_do_event(self, RESET_REQUEST, NULL, NULL);
+ break;
+ case LAP_NO_RESPONSE: /* FALLTHROUGH */
+ case LAP_DISC_INDICATION: /* FALLTHROUGH */
+ case LAP_FOUND_NONE: /* FALLTHROUGH */
+ case LAP_MEDIA_BUSY:
+ irlmp_link_disconnect_indication(self->notify.instance, self,
+ reason, NULL);
+ break;
+ default:
+ net_err_ratelimited("%s: Unknown reason %d\n",
+ __func__, reason);
+ }
+}
+
+/*
+ * Function irlap_discovery_request (gen_addr_bit)
+ *
+ * Start one single discovery operation.
+ *
+ */
+void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
+{
+ struct irlap_info info;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(discovery != NULL, return;);
+
+ pr_debug("%s(), nslots = %d\n", __func__, discovery->nslots);
+
+ IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
+ (discovery->nslots == 8) || (discovery->nslots == 16),
+ return;);
+
+ /* Discovery is only possible in NDM mode */
+ if (self->state != LAP_NDM) {
+ pr_debug("%s(), discovery only possible in NDM mode\n",
+ __func__);
+ irlap_discovery_confirm(self, NULL);
+ /* Note : in theory, if we are not in NDM, we could postpone
+ * the discovery like we do for connection request.
+ * In practice, it's not worth it. If the media was busy,
+ * it's likely next time around it won't be busy. If we are
+ * in REPLY state, we will get passive discovery info & event.
+ * Jean II */
+ return;
+ }
+
+ /* Check if last discovery request finished in time, or if
+ * it was aborted due to the media busy flag. */
+ if (self->discovery_log != NULL) {
+ hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
+ self->discovery_log = NULL;
+ }
+
+ /* All operations will occur at predictable time, no need to lock */
+ self->discovery_log = hashbin_new(HB_NOLOCK);
+
+ if (self->discovery_log == NULL) {
+ net_warn_ratelimited("%s(), Unable to allocate discovery log!\n",
+ __func__);
+ return;
+ }
+
+ info.S = discovery->nslots; /* Number of slots */
+ info.s = 0; /* Current slot */
+
+ self->discovery_cmd = discovery;
+ info.discovery = discovery;
+
+ /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
+ self->slot_timeout = msecs_to_jiffies(sysctl_slot_timeout);
+
+ irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
+}
+
+/*
+ * Function irlap_discovery_confirm (log)
+ *
+ * A device has been discovered in front of this station, we
+ * report directly to LMP.
+ */
+void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ IRDA_ASSERT(self->notify.instance != NULL, return;);
+
+ /*
+ * Check for successful discovery, since we are then allowed to clear
+ * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
+ * us to make connection attempts much faster and easier (i.e. no
+ * collisions).
+ * Setting media busy to false will also generate an event allowing
+ * to process pending events in NDM state machine.
+ * Note : the spec doesn't define what's a successful discovery is.
+ * If we want Ultra to work, it's successful even if there is
+ * nobody discovered - Jean II
+ */
+ if (discovery_log)
+ irda_device_set_media_busy(self->netdev, FALSE);
+
+ /* Inform IrLMP */
+ irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
+}
+
+/*
+ * Function irlap_discovery_indication (log)
+ *
+ * Somebody is trying to discover us!
+ *
+ */
+void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(discovery != NULL, return;);
+
+ IRDA_ASSERT(self->notify.instance != NULL, return;);
+
+ /* A device is very likely to connect immediately after it performs
+ * a successful discovery. This means that in our case, we are much
+ * more likely to receive a connection request over the medium.
+ * So, we backoff to avoid collisions.
+ * IrLAP spec 6.13.4 suggest 100ms...
+ * Note : this little trick actually make a *BIG* difference. If I set
+ * my Linux box with discovery enabled and one Ultra frame sent every
+ * second, my Palm has no trouble connecting to it every time !
+ * Jean II */
+ irda_device_set_media_busy(self->netdev, SMALL);
+
+ irlmp_link_discovery_indication(self->notify.instance, discovery);
+}
+
+/*
+ * Function irlap_status_indication (quality_of_link)
+ */
+void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
+{
+ switch (quality_of_link) {
+ case STATUS_NO_ACTIVITY:
+ net_info_ratelimited("IrLAP, no activity on link!\n");
+ break;
+ case STATUS_NOISY:
+ net_info_ratelimited("IrLAP, noisy link!\n");
+ break;
+ default:
+ break;
+ }
+ irlmp_status_indication(self->notify.instance,
+ quality_of_link, LOCK_NO_CHANGE);
+}
+
+/*
+ * Function irlap_reset_indication (void)
+ */
+void irlap_reset_indication(struct irlap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ if (self->state == LAP_RESET_WAIT)
+ irlap_do_event(self, RESET_REQUEST, NULL, NULL);
+ else
+ irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
+}
+
+/*
+ * Function irlap_reset_confirm (void)
+ */
+void irlap_reset_confirm(void)
+{
+}
+
+/*
+ * Function irlap_generate_rand_time_slot (S, s)
+ *
+ * Generate a random time slot between s and S-1 where
+ * S = Number of slots (0 -> S-1)
+ * s = Current slot
+ */
+int irlap_generate_rand_time_slot(int S, int s)
+{
+ static int rand;
+ int slot;
+
+ IRDA_ASSERT((S - s) > 0, return 0;);
+
+ rand += jiffies;
+ rand ^= (rand << 12);
+ rand ^= (rand >> 20);
+
+ slot = s + rand % (S-s);
+
+ IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
+
+ return slot;
+}
+
+/*
+ * Function irlap_update_nr_received (nr)
+ *
+ * Remove all acknowledged frames in current window queue. This code is
+ * not intuitive and you should not try to change it. If you think it
+ * contains bugs, please mail a patch to the author instead.
+ */
+void irlap_update_nr_received(struct irlap_cb *self, int nr)
+{
+ struct sk_buff *skb = NULL;
+ int count = 0;
+
+ /*
+ * Remove all the ack-ed frames from the window queue.
+ */
+
+ /*
+ * Optimize for the common case. It is most likely that the receiver
+ * will acknowledge all the frames we have sent! So in that case we
+ * delete all frames stored in window.
+ */
+ if (nr == self->vs) {
+ while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
+ dev_kfree_skb(skb);
+ }
+ /* The last acked frame is the next to send minus one */
+ self->va = nr - 1;
+ } else {
+ /* Remove all acknowledged frames in current window */
+ while ((skb_peek(&self->wx_list) != NULL) &&
+ (((self->va+1) % 8) != nr))
+ {
+ skb = skb_dequeue(&self->wx_list);
+ dev_kfree_skb(skb);
+
+ self->va = (self->va + 1) % 8;
+ count++;
+ }
+ }
+
+ /* Advance window */
+ self->window = self->window_size - skb_queue_len(&self->wx_list);
+}
+
+/*
+ * Function irlap_validate_ns_received (ns)
+ *
+ * Validate the next to send (ns) field from received frame.
+ */
+int irlap_validate_ns_received(struct irlap_cb *self, int ns)
+{
+ /* ns as expected? */
+ if (ns == self->vr)
+ return NS_EXPECTED;
+ /*
+ * Stations are allowed to treat invalid NS as unexpected NS
+ * IrLAP, Recv ... with-invalid-Ns. p. 84
+ */
+ return NS_UNEXPECTED;
+
+ /* return NR_INVALID; */
+}
+/*
+ * Function irlap_validate_nr_received (nr)
+ *
+ * Validate the next to receive (nr) field from received frame.
+ *
+ */
+int irlap_validate_nr_received(struct irlap_cb *self, int nr)
+{
+ /* nr as expected? */
+ if (nr == self->vs) {
+ pr_debug("%s(), expected!\n", __func__);
+ return NR_EXPECTED;
+ }
+
+ /*
+ * unexpected nr? (but within current window), first we check if the
+ * ns numbers of the frames in the current window wrap.
+ */
+ if (self->va < self->vs) {
+ if ((nr >= self->va) && (nr <= self->vs))
+ return NR_UNEXPECTED;
+ } else {
+ if ((nr >= self->va) || (nr <= self->vs))
+ return NR_UNEXPECTED;
+ }
+
+ /* Invalid nr! */
+ return NR_INVALID;
+}
+
+/*
+ * Function irlap_initiate_connection_state ()
+ *
+ * Initialize the connection state parameters
+ *
+ */
+void irlap_initiate_connection_state(struct irlap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Next to send and next to receive */
+ self->vs = self->vr = 0;
+
+ /* Last frame which got acked (0 - 1) % 8 */
+ self->va = 7;
+
+ self->window = 1;
+
+ self->remote_busy = FALSE;
+ self->retry_count = 0;
+}
+
+/*
+ * Function irlap_wait_min_turn_around (self, qos)
+ *
+ * Wait negotiated minimum turn around time, this function actually sets
+ * the number of BOS's that must be sent before the next transmitted
+ * frame in order to delay for the specified amount of time. This is
+ * done to avoid using timers, and the forbidden udelay!
+ */
+void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
+{
+ __u32 min_turn_time;
+ __u32 speed;
+
+ /* Get QoS values. */
+ speed = qos->baud_rate.value;
+ min_turn_time = qos->min_turn_time.value;
+
+ /* No need to calculate XBOFs for speeds over 115200 bps */
+ if (speed > 115200) {
+ self->mtt_required = min_turn_time;
+ return;
+ }
+
+ /*
+ * Send additional BOF's for the next frame for the requested
+ * min turn time, so now we must calculate how many chars (XBOF's) we
+ * must send for the requested time period (min turn time)
+ */
+ self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
+}
+
+/*
+ * Function irlap_flush_all_queues (void)
+ *
+ * Flush all queues
+ *
+ */
+void irlap_flush_all_queues(struct irlap_cb *self)
+{
+ struct sk_buff* skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Free transmission queue */
+ while ((skb = skb_dequeue(&self->txq)) != NULL)
+ dev_kfree_skb(skb);
+
+ while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
+ dev_kfree_skb(skb);
+
+ /* Free sliding window buffered packets */
+ while ((skb = skb_dequeue(&self->wx_list)) != NULL)
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function irlap_setspeed (self, speed)
+ *
+ * Change the speed of the IrDA port
+ *
+ */
+static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
+{
+ struct sk_buff *skb;
+
+ pr_debug("%s(), setting speed to %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ self->speed = speed;
+
+ /* Change speed now, or just piggyback speed on frames */
+ if (now) {
+ /* Send down empty frame to trigger speed change */
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (skb)
+ irlap_queue_xmit(self, skb);
+ }
+}
+
+/*
+ * Function irlap_init_qos_capabilities (self, qos)
+ *
+ * Initialize QoS for this IrLAP session, What we do is to compute the
+ * intersection of the QoS capabilities for the user, driver and for
+ * IrLAP itself. Normally, IrLAP will not specify any values, but it can
+ * be used to restrict certain values.
+ */
+static void irlap_init_qos_capabilities(struct irlap_cb *self,
+ struct qos_info *qos_user)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(self->netdev != NULL, return;);
+
+ /* Start out with the maximum QoS support possible */
+ irda_init_max_qos_capabilies(&self->qos_rx);
+
+ /* Apply drivers QoS capabilities */
+ irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
+
+ /*
+ * Check for user supplied QoS parameters. The service user is only
+ * allowed to supply these values. We check each parameter since the
+ * user may not have set all of them.
+ */
+ if (qos_user) {
+ pr_debug("%s(), Found user specified QoS!\n", __func__);
+
+ if (qos_user->baud_rate.bits)
+ self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
+
+ if (qos_user->max_turn_time.bits)
+ self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
+ if (qos_user->data_size.bits)
+ self->qos_rx.data_size.bits &= qos_user->data_size.bits;
+
+ if (qos_user->link_disc_time.bits)
+ self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
+ }
+
+ /* Use 500ms in IrLAP for now */
+ self->qos_rx.max_turn_time.bits &= 0x01;
+
+ /* Set data size */
+ /*self->qos_rx.data_size.bits &= 0x03;*/
+
+ irda_qos_bits_to_value(&self->qos_rx);
+}
+
+/*
+ * Function irlap_apply_default_connection_parameters (void, now)
+ *
+ * Use the default connection and transmission parameters
+ */
+void irlap_apply_default_connection_parameters(struct irlap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* xbofs : Default value in NDM */
+ self->next_bofs = 12;
+ self->bofs_count = 12;
+
+ /* NDM Speed is 9600 */
+ irlap_change_speed(self, 9600, TRUE);
+
+ /* Set mbusy when going to NDM state */
+ irda_device_set_media_busy(self->netdev, TRUE);
+
+ /*
+ * Generate random connection address for this session, which must
+ * be 7 bits wide and different from 0x00 and 0xfe
+ */
+ while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
+ get_random_bytes(&self->caddr, sizeof(self->caddr));
+ self->caddr &= 0xfe;
+ }
+
+ /* Use default values until connection has been negitiated */
+ self->slot_timeout = sysctl_slot_timeout;
+ self->final_timeout = FINAL_TIMEOUT;
+ self->poll_timeout = POLL_TIMEOUT;
+ self->wd_timeout = WD_TIMEOUT;
+
+ /* Set some default values */
+ self->qos_tx.baud_rate.value = 9600;
+ self->qos_rx.baud_rate.value = 9600;
+ self->qos_tx.max_turn_time.value = 0;
+ self->qos_rx.max_turn_time.value = 0;
+ self->qos_tx.min_turn_time.value = 0;
+ self->qos_rx.min_turn_time.value = 0;
+ self->qos_tx.data_size.value = 64;
+ self->qos_rx.data_size.value = 64;
+ self->qos_tx.window_size.value = 1;
+ self->qos_rx.window_size.value = 1;
+ self->qos_tx.additional_bofs.value = 12;
+ self->qos_rx.additional_bofs.value = 12;
+ self->qos_tx.link_disc_time.value = 0;
+ self->qos_rx.link_disc_time.value = 0;
+
+ irlap_flush_all_queues(self);
+
+ self->disconnect_pending = FALSE;
+ self->connect_pending = FALSE;
+}
+
+/*
+ * Function irlap_apply_connection_parameters (qos, now)
+ *
+ * Initialize IrLAP with the negotiated QoS values
+ *
+ * If 'now' is false, the speed and xbofs will be changed after the next
+ * frame is sent.
+ * If 'now' is true, the speed and xbofs is changed immediately
+ */
+void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Set the negotiated xbofs value */
+ self->next_bofs = self->qos_tx.additional_bofs.value;
+ if (now)
+ self->bofs_count = self->next_bofs;
+
+ /* Set the negotiated link speed (may need the new xbofs value) */
+ irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
+
+ self->window_size = self->qos_tx.window_size.value;
+ self->window = self->qos_tx.window_size.value;
+
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ /*
+ * Calculate how many bytes it is possible to transmit before the
+ * link must be turned around
+ */
+ self->line_capacity =
+ irlap_max_line_capacity(self->qos_tx.baud_rate.value,
+ self->qos_tx.max_turn_time.value);
+ self->bytes_left = self->line_capacity;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+
+
+ /*
+ * Initialize timeout values, some of the rules are listed on
+ * page 92 in IrLAP.
+ */
+ IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
+ IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
+ /* The poll timeout applies only to the primary station.
+ * It defines the maximum time the primary stay in XMIT mode
+ * before timeout and turning the link around (sending a RR).
+ * Or, this is how much we can keep the pf bit in primary mode.
+ * Therefore, it must be lower or equal than our *OWN* max turn around.
+ * Jean II */
+ self->poll_timeout = msecs_to_jiffies(
+ self->qos_tx.max_turn_time.value);
+ /* The Final timeout applies only to the primary station.
+ * It defines the maximum time the primary wait (mostly in RECV mode)
+ * for an answer from the secondary station before polling it again.
+ * Therefore, it must be greater or equal than our *PARTNER*
+ * max turn around time - Jean II */
+ self->final_timeout = msecs_to_jiffies(
+ self->qos_rx.max_turn_time.value);
+ /* The Watchdog Bit timeout applies only to the secondary station.
+ * It defines the maximum time the secondary wait (mostly in RECV mode)
+ * for poll from the primary station before getting annoyed.
+ * Therefore, it must be greater or equal than our *PARTNER*
+ * max turn around time - Jean II */
+ self->wd_timeout = self->final_timeout * 2;
+
+ /*
+ * N1 and N2 are maximum retry count for *both* the final timer
+ * and the wd timer (with a factor 2) as defined above.
+ * After N1 retry of a timer, we give a warning to the user.
+ * After N2 retry, we consider the link dead and disconnect it.
+ * Jean II
+ */
+
+ /*
+ * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
+ * 3 seconds otherwise. See page 71 in IrLAP for more details.
+ * Actually, it's not always 3 seconds, as we allow to set
+ * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
+ * of 2, so 1 second is minimum we can allow. - Jean II
+ */
+ if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
+ /*
+ * If we set N1 to 0, it will trigger immediately, which is
+ * not what we want. What we really want is to disable it,
+ * Jean II
+ */
+ self->N1 = -2; /* Disable - Need to be multiple of 2*/
+ else
+ self->N1 = sysctl_warn_noreply_time * 1000 /
+ self->qos_rx.max_turn_time.value;
+
+ pr_debug("Setting N1 = %d\n", self->N1);
+
+ /* Set N2 to match our own disconnect time */
+ self->N2 = self->qos_tx.link_disc_time.value * 1000 /
+ self->qos_rx.max_turn_time.value;
+ pr_debug("Setting N2 = %d\n", self->N2);
+}
+
+#ifdef CONFIG_PROC_FS
+struct irlap_iter_state {
+ int id;
+};
+
+static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct irlap_iter_state *iter = seq->private;
+ struct irlap_cb *self;
+
+ /* Protect our access to the tsap list */
+ spin_lock_irq(&irlap->hb_spinlock);
+ iter->id = 0;
+
+ for (self = (struct irlap_cb *) hashbin_get_first(irlap);
+ self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
+ if (iter->id == *pos)
+ break;
+ ++iter->id;
+ }
+
+ return self;
+}
+
+static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct irlap_iter_state *iter = seq->private;
+
+ ++*pos;
+ ++iter->id;
+ return (void *) hashbin_get_next(irlap);
+}
+
+static void irlap_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_irq(&irlap->hb_spinlock);
+}
+
+static int irlap_seq_show(struct seq_file *seq, void *v)
+{
+ const struct irlap_iter_state *iter = seq->private;
+ const struct irlap_cb *self = v;
+
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
+
+ seq_printf(seq, "irlap%d ", iter->id);
+ seq_printf(seq, "state: %s\n",
+ irlap_state[self->state]);
+
+ seq_printf(seq, " device name: %s, ",
+ (self->netdev) ? self->netdev->name : "bug");
+ seq_printf(seq, "hardware name: %s\n", self->hw_name);
+
+ seq_printf(seq, " caddr: %#02x, ", self->caddr);
+ seq_printf(seq, "saddr: %#08x, ", self->saddr);
+ seq_printf(seq, "daddr: %#08x\n", self->daddr);
+
+ seq_printf(seq, " win size: %d, ",
+ self->window_size);
+ seq_printf(seq, "win: %d, ", self->window);
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ seq_printf(seq, "line capacity: %d, ",
+ self->line_capacity);
+ seq_printf(seq, "bytes left: %d\n", self->bytes_left);
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ seq_printf(seq, " tx queue len: %d ",
+ skb_queue_len(&self->txq));
+ seq_printf(seq, "win queue len: %d ",
+ skb_queue_len(&self->wx_list));
+ seq_printf(seq, "rbusy: %s", self->remote_busy ?
+ "TRUE" : "FALSE");
+ seq_printf(seq, " mbusy: %s\n", self->media_busy ?
+ "TRUE" : "FALSE");
+
+ seq_printf(seq, " retrans: %d ", self->retry_count);
+ seq_printf(seq, "vs: %d ", self->vs);
+ seq_printf(seq, "vr: %d ", self->vr);
+ seq_printf(seq, "va: %d\n", self->va);
+
+ seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
+
+ seq_printf(seq, " tx\t%d\t",
+ self->qos_tx.baud_rate.value);
+ seq_printf(seq, "%d\t",
+ self->qos_tx.max_turn_time.value);
+ seq_printf(seq, "%d\t",
+ self->qos_tx.data_size.value);
+ seq_printf(seq, "%d\t",
+ self->qos_tx.window_size.value);
+ seq_printf(seq, "%d\t",
+ self->qos_tx.additional_bofs.value);
+ seq_printf(seq, "%d\t",
+ self->qos_tx.min_turn_time.value);
+ seq_printf(seq, "%d\t",
+ self->qos_tx.link_disc_time.value);
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, " rx\t%d\t",
+ self->qos_rx.baud_rate.value);
+ seq_printf(seq, "%d\t",
+ self->qos_rx.max_turn_time.value);
+ seq_printf(seq, "%d\t",
+ self->qos_rx.data_size.value);
+ seq_printf(seq, "%d\t",
+ self->qos_rx.window_size.value);
+ seq_printf(seq, "%d\t",
+ self->qos_rx.additional_bofs.value);
+ seq_printf(seq, "%d\t",
+ self->qos_rx.min_turn_time.value);
+ seq_printf(seq, "%d\n",
+ self->qos_rx.link_disc_time.value);
+
+ return 0;
+}
+
+static const struct seq_operations irlap_seq_ops = {
+ .start = irlap_seq_start,
+ .next = irlap_seq_next,
+ .stop = irlap_seq_stop,
+ .show = irlap_seq_show,
+};
+
+static int irlap_seq_open(struct inode *inode, struct file *file)
+{
+ if (irlap == NULL)
+ return -EINVAL;
+
+ return seq_open_private(file, &irlap_seq_ops,
+ sizeof(struct irlap_iter_state));
+}
+
+const struct file_operations irlap_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = irlap_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/drivers/staging/irda/net/irlap_event.c b/drivers/staging/irda/net/irlap_event.c
new file mode 100644
index 000000000000..0e1b4d79f745
--- /dev/null
+++ b/drivers/staging/irda/net/irlap_event.c
@@ -0,0 +1,2316 @@
+/*********************************************************************
+ *
+ * Filename: irlap_event.c
+ * Version: 0.9
+ * Description: IrLAP state machine implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dag@brattli.net>
+ * Created at: Sat Aug 16 00:59:29 1997
+ * Modified at: Sat Dec 25 21:07:57 1999
+ * Modified by: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dag@brattli.net>,
+ * Copyright (c) 1998 Thomas Davis <ratbert@radiks.net>
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlap_event.h>
+
+#include <net/irda/timer.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlap_frame.h>
+#include <net/irda/qos.h>
+#include <net/irda/parameters.h>
+#include <net/irda/irlmp.h> /* irlmp_flow_indication(), ... */
+
+#include <net/irda/irda_device.h>
+
+#ifdef CONFIG_IRDA_FAST_RR
+int sysctl_fast_poll_increase = 50;
+#endif
+
+static int irlap_state_ndm (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_query (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_reply (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_conn (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_setup (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_xmit_p (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_pclose (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_nrm_p (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_reset (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_nrm_s (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_xmit_s (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
+ struct sk_buff *, struct irlap_info *);
+
+static const char *const irlap_event[] __maybe_unused = {
+ "DISCOVERY_REQUEST",
+ "CONNECT_REQUEST",
+ "CONNECT_RESPONSE",
+ "DISCONNECT_REQUEST",
+ "DATA_REQUEST",
+ "RESET_REQUEST",
+ "RESET_RESPONSE",
+ "SEND_I_CMD",
+ "SEND_UI_FRAME",
+ "RECV_DISCOVERY_XID_CMD",
+ "RECV_DISCOVERY_XID_RSP",
+ "RECV_SNRM_CMD",
+ "RECV_TEST_CMD",
+ "RECV_TEST_RSP",
+ "RECV_UA_RSP",
+ "RECV_DM_RSP",
+ "RECV_RD_RSP",
+ "RECV_I_CMD",
+ "RECV_I_RSP",
+ "RECV_UI_FRAME",
+ "RECV_FRMR_RSP",
+ "RECV_RR_CMD",
+ "RECV_RR_RSP",
+ "RECV_RNR_CMD",
+ "RECV_RNR_RSP",
+ "RECV_REJ_CMD",
+ "RECV_REJ_RSP",
+ "RECV_SREJ_CMD",
+ "RECV_SREJ_RSP",
+ "RECV_DISC_CMD",
+ "SLOT_TIMER_EXPIRED",
+ "QUERY_TIMER_EXPIRED",
+ "FINAL_TIMER_EXPIRED",
+ "POLL_TIMER_EXPIRED",
+ "DISCOVERY_TIMER_EXPIRED",
+ "WD_TIMER_EXPIRED",
+ "BACKOFF_TIMER_EXPIRED",
+ "MEDIA_BUSY_TIMER_EXPIRED",
+};
+
+const char *const irlap_state[] = {
+ "LAP_NDM",
+ "LAP_QUERY",
+ "LAP_REPLY",
+ "LAP_CONN",
+ "LAP_SETUP",
+ "LAP_OFFLINE",
+ "LAP_XMIT_P",
+ "LAP_PCLOSE",
+ "LAP_NRM_P",
+ "LAP_RESET_WAIT",
+ "LAP_RESET",
+ "LAP_NRM_S",
+ "LAP_XMIT_S",
+ "LAP_SCLOSE",
+ "LAP_RESET_CHECK",
+};
+
+static int (*state[])(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info) =
+{
+ irlap_state_ndm,
+ irlap_state_query,
+ irlap_state_reply,
+ irlap_state_conn,
+ irlap_state_setup,
+ irlap_state_offline,
+ irlap_state_xmit_p,
+ irlap_state_pclose,
+ irlap_state_nrm_p,
+ irlap_state_reset_wait,
+ irlap_state_reset,
+ irlap_state_nrm_s,
+ irlap_state_xmit_s,
+ irlap_state_sclose,
+ irlap_state_reset_check,
+};
+
+/*
+ * Function irda_poll_timer_expired (data)
+ *
+ * Poll timer has expired. Normally we must now send a RR frame to the
+ * remote device
+ */
+static void irlap_poll_timer_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
+}
+
+/*
+ * Calculate and set time before we will have to send back the pf bit
+ * to the peer. Use in primary.
+ * Make sure that state is XMIT_P/XMIT_S when calling this function
+ * (and that nobody messed up with the state). - Jean II
+ */
+static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+#ifdef CONFIG_IRDA_FAST_RR
+ /*
+ * Send out the RR frames faster if our own transmit queue is empty, or
+ * if the peer is busy. The effect is a much faster conversation
+ */
+ if (skb_queue_empty(&self->txq) || self->remote_busy) {
+ if (self->fast_RR == TRUE) {
+ /*
+ * Assert that the fast poll timer has not reached the
+ * normal poll timer yet
+ */
+ if (self->fast_RR_timeout < timeout) {
+ /*
+ * FIXME: this should be a more configurable
+ * function
+ */
+ self->fast_RR_timeout +=
+ (sysctl_fast_poll_increase * HZ/1000);
+
+ /* Use this fast(er) timeout instead */
+ timeout = self->fast_RR_timeout;
+ }
+ } else {
+ self->fast_RR = TRUE;
+
+ /* Start with just 0 ms */
+ self->fast_RR_timeout = 0;
+ timeout = 0;
+ }
+ } else
+ self->fast_RR = FALSE;
+
+ pr_debug("%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies);
+#endif /* CONFIG_IRDA_FAST_RR */
+
+ if (timeout == 0)
+ irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
+ else
+ irda_start_timer(&self->poll_timer, timeout, self,
+ irlap_poll_timer_expired);
+}
+
+/*
+ * Function irlap_do_event (event, skb, info)
+ *
+ * Rushes through the state machine without any delay. If state == XMIT
+ * then send queued data frames.
+ */
+void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret;
+
+ if (!self || self->magic != LAP_MAGIC)
+ return;
+
+ pr_debug("%s(), event = %s, state = %s\n", __func__,
+ irlap_event[event], irlap_state[self->state]);
+
+ ret = (*state[self->state])(self, event, skb, info);
+
+ /*
+ * Check if there are any pending events that needs to be executed
+ */
+ switch (self->state) {
+ case LAP_XMIT_P: /* FALLTHROUGH */
+ case LAP_XMIT_S:
+ /*
+ * We just received the pf bit and are at the beginning
+ * of a new LAP transmit window.
+ * Check if there are any queued data frames, and do not
+ * try to disconnect link if we send any data frames, since
+ * that will change the state away form XMIT
+ */
+ pr_debug("%s() : queue len = %d\n", __func__,
+ skb_queue_len(&self->txq));
+
+ if (!skb_queue_empty(&self->txq)) {
+ /* Prevent race conditions with irlap_data_request() */
+ self->local_busy = TRUE;
+
+ /* Theory of operation.
+ * We send frames up to when we fill the window or
+ * reach line capacity. Those frames will queue up
+ * in the device queue, and the driver will slowly
+ * send them.
+ * After each frame that we send, we poll the higher
+ * layer for more data. It's the right time to do
+ * that because the link layer need to perform the mtt
+ * and then send the first frame, so we can afford
+ * to send a bit of time in kernel space.
+ * The explicit flow indication allow to minimise
+ * buffers (== lower latency), to avoid higher layer
+ * polling via timers (== less context switches) and
+ * to implement a crude scheduler - Jean II */
+
+ /* Try to send away all queued data frames */
+ while ((skb = skb_dequeue(&self->txq)) != NULL) {
+ /* Send one frame */
+ ret = (*state[self->state])(self, SEND_I_CMD,
+ skb, NULL);
+ /* Drop reference count.
+ * It will be increase as needed in
+ * irlap_send_data_xxx() */
+ kfree_skb(skb);
+
+ /* Poll the higher layers for one more frame */
+ irlmp_flow_indication(self->notify.instance,
+ FLOW_START);
+
+ if (ret == -EPROTO)
+ break; /* Try again later! */
+ }
+ /* Finished transmitting */
+ self->local_busy = FALSE;
+ } else if (self->disconnect_pending) {
+ self->disconnect_pending = FALSE;
+
+ ret = (*state[self->state])(self, DISCONNECT_REQUEST,
+ NULL, NULL);
+ }
+ break;
+/* case LAP_NDM: */
+/* case LAP_CONN: */
+/* case LAP_RESET_WAIT: */
+/* case LAP_RESET_CHECK: */
+ default:
+ break;
+ }
+}
+
+/*
+ * Function irlap_state_ndm (event, skb, frame)
+ *
+ * NDM (Normal Disconnected Mode) state
+ *
+ */
+static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ discovery_t *discovery_rsp;
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case CONNECT_REQUEST:
+ IRDA_ASSERT(self->netdev != NULL, return -1;);
+
+ if (self->media_busy) {
+ /* Note : this will never happen, because we test
+ * media busy in irlap_connect_request() and
+ * postpone the event... - Jean II */
+ pr_debug("%s(), CONNECT_REQUEST: media busy!\n",
+ __func__);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_MEDIA_BUSY);
+ } else {
+ irlap_send_snrm_frame(self, &self->qos_rx);
+
+ /* Start Final-bit timer */
+ irlap_start_final_timer(self, self->final_timeout);
+
+ self->retry_count = 0;
+ irlap_next_state(self, LAP_SETUP);
+ }
+ break;
+ case RECV_SNRM_CMD:
+ /* Check if the frame contains and I field */
+ if (info) {
+ self->daddr = info->daddr;
+ self->caddr = info->caddr;
+
+ irlap_next_state(self, LAP_CONN);
+
+ irlap_connect_indication(self, skb);
+ } else {
+ pr_debug("%s(), SNRM frame does not contain an I field!\n",
+ __func__);
+ }
+ break;
+ case DISCOVERY_REQUEST:
+ IRDA_ASSERT(info != NULL, return -1;);
+
+ if (self->media_busy) {
+ pr_debug("%s(), DISCOVERY_REQUEST: media busy!\n",
+ __func__);
+ /* irlap->log.condition = MEDIA_BUSY; */
+
+ /* This will make IrLMP try again */
+ irlap_discovery_confirm(self, NULL);
+ /* Note : the discovery log is not cleaned up here,
+ * it will be done in irlap_discovery_request()
+ * Jean II */
+ return 0;
+ }
+
+ self->S = info->S;
+ self->s = info->s;
+ irlap_send_discovery_xid_frame(self, info->S, info->s, TRUE,
+ info->discovery);
+ self->frame_sent = FALSE;
+ self->s++;
+
+ irlap_start_slot_timer(self, self->slot_timeout);
+ irlap_next_state(self, LAP_QUERY);
+ break;
+ case RECV_DISCOVERY_XID_CMD:
+ IRDA_ASSERT(info != NULL, return -1;);
+
+ /* Assert that this is not the final slot */
+ if (info->s <= info->S) {
+ self->slot = irlap_generate_rand_time_slot(info->S,
+ info->s);
+ if (self->slot == info->s) {
+ discovery_rsp = irlmp_get_discovery_response();
+ discovery_rsp->data.daddr = info->daddr;
+
+ irlap_send_discovery_xid_frame(self, info->S,
+ self->slot,
+ FALSE,
+ discovery_rsp);
+ self->frame_sent = TRUE;
+ } else
+ self->frame_sent = FALSE;
+
+ /*
+ * Go to reply state until end of discovery to
+ * inhibit our own transmissions. Set the timer
+ * to not stay forever there... Jean II
+ */
+ irlap_start_query_timer(self, info->S, info->s);
+ irlap_next_state(self, LAP_REPLY);
+ } else {
+ /* This is the final slot. How is it possible ?
+ * This would happen is both discoveries are just slightly
+ * offset (if they are in sync, all packets are lost).
+ * Most often, all the discovery requests will be received
+ * in QUERY state (see my comment there), except for the
+ * last frame that will come here.
+ * The big trouble when it happen is that active discovery
+ * doesn't happen, because nobody answer the discoveries
+ * frame of the other guy, so the log shows up empty.
+ * What should we do ?
+ * Not much. It's too late to answer those discovery frames,
+ * so we just pass the info to IrLMP who will put it in the
+ * log (and post an event).
+ * Another cause would be devices that do discovery much
+ * slower than us, however the latest fixes should minimise
+ * those cases...
+ * Jean II
+ */
+ pr_debug("%s(), Receiving final discovery request, missed the discovery slots :-(\n",
+ __func__);
+
+ /* Last discovery request -> in the log */
+ irlap_discovery_indication(self, info->discovery);
+ }
+ break;
+ case MEDIA_BUSY_TIMER_EXPIRED:
+ /* A bunch of events may be postponed because the media is
+ * busy (usually immediately after we close a connection),
+ * or while we are doing discovery (state query/reply).
+ * In all those cases, the media busy flag will be cleared
+ * when it's OK for us to process those postponed events.
+ * This event is not mentioned in the state machines in the
+ * IrLAP spec. It's because they didn't consider Ultra and
+ * postponing connection request is optional.
+ * Jean II */
+#ifdef CONFIG_IRDA_ULTRA
+ /* Send any pending Ultra frames if any */
+ if (!skb_queue_empty(&self->txq_ultra)) {
+ /* We don't send the frame, just post an event.
+ * Also, previously this code was in timer.c...
+ * Jean II */
+ ret = (*state[self->state])(self, SEND_UI_FRAME,
+ NULL, NULL);
+ }
+#endif /* CONFIG_IRDA_ULTRA */
+ /* Check if we should try to connect.
+ * This code was previously in irlap_do_event() */
+ if (self->connect_pending) {
+ self->connect_pending = FALSE;
+
+ /* This one *should* not pend in this state, except
+ * if a socket try to connect and immediately
+ * disconnect. - clear - Jean II */
+ if (self->disconnect_pending)
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ else
+ ret = (*state[self->state])(self,
+ CONNECT_REQUEST,
+ NULL, NULL);
+ self->disconnect_pending = FALSE;
+ }
+ /* Note : one way to test if this code works well (including
+ * media busy and small busy) is to create a user space
+ * application generating an Ultra packet every 3.05 sec (or
+ * 2.95 sec) and to see how it interact with discovery.
+ * It's fairly easy to check that no packet is lost, that the
+ * packets are postponed during discovery and that after
+ * discovery indication you have a 100ms "gap".
+ * As connection request and Ultra are now processed the same
+ * way, this avoid the tedious job of trying IrLAP connection
+ * in all those cases...
+ * Jean II */
+ break;
+#ifdef CONFIG_IRDA_ULTRA
+ case SEND_UI_FRAME:
+ {
+ int i;
+ /* Only allowed to repeat an operation twice */
+ for (i=0; ((i<2) && (self->media_busy == FALSE)); i++) {
+ skb = skb_dequeue(&self->txq_ultra);
+ if (skb)
+ irlap_send_ui_frame(self, skb, CBROADCAST,
+ CMD_FRAME);
+ else
+ break;
+ /* irlap_send_ui_frame() won't increase skb reference
+ * count, so no dev_kfree_skb() - Jean II */
+ }
+ if (i == 2) {
+ /* Force us to listen 500 ms again */
+ irda_device_set_media_busy(self->netdev, TRUE);
+ }
+ break;
+ }
+ case RECV_UI_FRAME:
+ /* Only accept broadcast frames in NDM mode */
+ if (info->caddr != CBROADCAST) {
+ pr_debug("%s(), not a broadcast frame!\n",
+ __func__);
+ } else
+ irlap_unitdata_indication(self, skb);
+ break;
+#endif /* CONFIG_IRDA_ULTRA */
+ case RECV_TEST_CMD:
+ /* Remove test frame header */
+ skb_pull(skb, sizeof(struct test_frame));
+
+ /*
+ * Send response. This skb will not be sent out again, and
+ * will only be used to send out the same info as the cmd
+ */
+ irlap_send_test_frame(self, CBROADCAST, info->daddr, skb);
+ break;
+ case RECV_TEST_RSP:
+ pr_debug("%s() not implemented!\n", __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_query (event, skb, info)
+ *
+ * QUERY state
+ *
+ */
+static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case RECV_DISCOVERY_XID_RSP:
+ IRDA_ASSERT(info != NULL, return -1;);
+ IRDA_ASSERT(info->discovery != NULL, return -1;);
+
+ pr_debug("%s(), daddr=%08x\n", __func__,
+ info->discovery->data.daddr);
+
+ if (!self->discovery_log) {
+ net_warn_ratelimited("%s: discovery log is gone! maybe the discovery timeout has been set too short?\n",
+ __func__);
+ break;
+ }
+ hashbin_insert(self->discovery_log,
+ (irda_queue_t *) info->discovery,
+ info->discovery->data.daddr, NULL);
+
+ /* Keep state */
+ /* irlap_next_state(self, LAP_QUERY); */
+
+ break;
+ case RECV_DISCOVERY_XID_CMD:
+ /* Yes, it is possible to receive those frames in this mode.
+ * Note that most often the last discovery request won't
+ * occur here but in NDM state (see my comment there).
+ * What should we do ?
+ * Not much. We are currently performing our own discovery,
+ * therefore we can't answer those frames. We don't want
+ * to change state either. We just pass the info to
+ * IrLMP who will put it in the log (and post an event).
+ * Jean II
+ */
+
+ IRDA_ASSERT(info != NULL, return -1;);
+
+ pr_debug("%s(), Receiving discovery request (s = %d) while performing discovery :-(\n",
+ __func__, info->s);
+
+ /* Last discovery request ? */
+ if (info->s == 0xff)
+ irlap_discovery_indication(self, info->discovery);
+ break;
+ case SLOT_TIMER_EXPIRED:
+ /*
+ * Wait a little longer if we detect an incoming frame. This
+ * is not mentioned in the spec, but is a good thing to do,
+ * since we want to work even with devices that violate the
+ * timing requirements.
+ */
+ if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
+ pr_debug("%s(), device is slow to answer, waiting some more!\n",
+ __func__);
+ irlap_start_slot_timer(self, msecs_to_jiffies(10));
+ self->add_wait = TRUE;
+ return ret;
+ }
+ self->add_wait = FALSE;
+
+ if (self->s < self->S) {
+ irlap_send_discovery_xid_frame(self, self->S,
+ self->s, TRUE,
+ self->discovery_cmd);
+ self->s++;
+ irlap_start_slot_timer(self, self->slot_timeout);
+
+ /* Keep state */
+ irlap_next_state(self, LAP_QUERY);
+ } else {
+ /* This is the final slot! */
+ irlap_send_discovery_xid_frame(self, self->S, 0xff,
+ TRUE,
+ self->discovery_cmd);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ /*
+ * We are now finished with the discovery procedure,
+ * so now we must return the results
+ */
+ irlap_discovery_confirm(self, self->discovery_log);
+
+ /* IrLMP should now have taken care of the log */
+ self->discovery_log = NULL;
+ }
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_reply (self, event, skb, info)
+ *
+ * REPLY, we have received a XID discovery frame from a device and we
+ * are waiting for the right time slot to send a response XID frame
+ *
+ */
+static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ discovery_t *discovery_rsp;
+ int ret=0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case QUERY_TIMER_EXPIRED:
+ pr_debug("%s(), QUERY_TIMER_EXPIRED <%ld>\n",
+ __func__, jiffies);
+ irlap_next_state(self, LAP_NDM);
+ break;
+ case RECV_DISCOVERY_XID_CMD:
+ IRDA_ASSERT(info != NULL, return -1;);
+ /* Last frame? */
+ if (info->s == 0xff) {
+ del_timer(&self->query_timer);
+
+ /* info->log.condition = REMOTE; */
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_discovery_indication(self, info->discovery);
+ } else {
+ /* If it's our slot, send our reply */
+ if ((info->s >= self->slot) && (!self->frame_sent)) {
+ discovery_rsp = irlmp_get_discovery_response();
+ discovery_rsp->data.daddr = info->daddr;
+
+ irlap_send_discovery_xid_frame(self, info->S,
+ self->slot,
+ FALSE,
+ discovery_rsp);
+
+ self->frame_sent = TRUE;
+ }
+ /* Readjust our timer to accommodate devices
+ * doing faster or slower discovery than us...
+ * Jean II */
+ irlap_start_query_timer(self, info->S, info->s);
+
+ /* Keep state */
+ //irlap_next_state(self, LAP_REPLY);
+ }
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d, %s\n", __func__,
+ event, irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_conn (event, skb, info)
+ *
+ * CONN, we have received a SNRM command and is waiting for the upper
+ * layer to accept or refuse connection
+ *
+ */
+static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case CONNECT_RESPONSE:
+ skb_pull(skb, sizeof(struct snrm_frame));
+
+ IRDA_ASSERT(self->netdev != NULL, return -1;);
+
+ irlap_qos_negotiate(self, skb);
+
+ irlap_initiate_connection_state(self);
+
+ /*
+ * Applying the parameters now will make sure we change speed
+ * *after* we have sent the next frame
+ */
+ irlap_apply_connection_parameters(self, FALSE);
+
+ /*
+ * Sending this frame will force a speed change after it has
+ * been sent (i.e. the frame will be sent at 9600).
+ */
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+
+#if 0
+ /*
+ * We are allowed to send two frames, but this may increase
+ * the connect latency, so lets not do it for now.
+ */
+ /* This is full of good intentions, but doesn't work in
+ * practice.
+ * After sending the first UA response, we switch the
+ * dongle to the negotiated speed, which is usually
+ * different than 9600 kb/s.
+ * From there, there is two solutions :
+ * 1) The other end has received the first UA response :
+ * it will set up the connection, move to state LAP_NRM_P,
+ * and will ignore and drop the second UA response.
+ * Actually, it's even worse : the other side will almost
+ * immediately send a RR that will likely collide with the
+ * UA response (depending on negotiated turnaround).
+ * 2) The other end has not received the first UA response,
+ * will stay at 9600 and will never see the second UA response.
+ * Jean II */
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+#endif
+
+ /*
+ * The WD-timer could be set to the duration of the P-timer
+ * for this case, but it is recommended to use twice the
+ * value (note 3 IrLAP p. 60).
+ */
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_NRM_S);
+
+ break;
+ case RECV_DISCOVERY_XID_CMD:
+ pr_debug("%s(), event RECV_DISCOVER_XID_CMD!\n",
+ __func__);
+ irlap_next_state(self, LAP_NDM);
+
+ break;
+ case DISCONNECT_REQUEST:
+ pr_debug("%s(), Disconnect request!\n", __func__);
+ irlap_send_dm_frame(self);
+ irlap_next_state( self, LAP_NDM);
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d, %s\n", __func__,
+ event, irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Function irlap_state_setup (event, skb, frame)
+ *
+ * SETUP state, The local layer has transmitted a SNRM command frame to
+ * a remote peer layer and is awaiting a reply .
+ *
+ */
+static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case FINAL_TIMER_EXPIRED:
+ if (self->retry_count < self->N3) {
+/*
+ * Perform random backoff, Wait a random number of time units, minimum
+ * duration half the time taken to transmitt a SNRM frame, maximum duration
+ * 1.5 times the time taken to transmit a SNRM frame. So this time should
+ * between 15 msecs and 45 msecs.
+ */
+ irlap_start_backoff_timer(self, msecs_to_jiffies(20 +
+ (jiffies % 30)));
+ } else {
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_FOUND_NONE);
+ }
+ break;
+ case BACKOFF_TIMER_EXPIRED:
+ irlap_send_snrm_frame(self, &self->qos_rx);
+ irlap_start_final_timer(self, self->final_timeout);
+ self->retry_count++;
+ break;
+ case RECV_SNRM_CMD:
+ pr_debug("%s(), SNRM battle!\n", __func__);
+
+ IRDA_ASSERT(skb != NULL, return 0;);
+ IRDA_ASSERT(info != NULL, return 0;);
+
+ /*
+ * The device with the largest device address wins the battle
+ * (both have sent a SNRM command!)
+ */
+ if (info &&(info->daddr > self->saddr)) {
+ del_timer(&self->final_timer);
+ irlap_initiate_connection_state(self);
+
+ IRDA_ASSERT(self->netdev != NULL, return -1;);
+
+ skb_pull(skb, sizeof(struct snrm_frame));
+
+ irlap_qos_negotiate(self, skb);
+
+ /* Send UA frame and then change link settings */
+ irlap_apply_connection_parameters(self, FALSE);
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+
+ irlap_next_state(self, LAP_NRM_S);
+ irlap_connect_confirm(self, skb);
+
+ /*
+ * The WD-timer could be set to the duration of the
+ * P-timer for this case, but it is recommended
+ * to use twice the value (note 3 IrLAP p. 60).
+ */
+ irlap_start_wd_timer(self, self->wd_timeout);
+ } else {
+ /* We just ignore the other device! */
+ irlap_next_state(self, LAP_SETUP);
+ }
+ break;
+ case RECV_UA_RSP:
+ /* Stop F-timer */
+ del_timer(&self->final_timer);
+
+ /* Initiate connection state */
+ irlap_initiate_connection_state(self);
+
+ /* Negotiate connection parameters */
+ IRDA_ASSERT(skb->len > 10, return -1;);
+
+ skb_pull(skb, sizeof(struct ua_frame));
+
+ IRDA_ASSERT(self->netdev != NULL, return -1;);
+
+ irlap_qos_negotiate(self, skb);
+
+ /* Set the new link setting *now* (before the rr frame) */
+ irlap_apply_connection_parameters(self, TRUE);
+ self->retry_count = 0;
+
+ /* Wait for turnaround time to give a chance to the other
+ * device to be ready to receive us.
+ * Note : the time to switch speed is typically larger
+ * than the turnaround time, but as we don't have the other
+ * side speed switch time, that's our best guess...
+ * Jean II */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /* This frame will actually be sent at the new speed */
+ irlap_send_rr_frame(self, CMD_FRAME);
+
+ /* The timer is set to half the normal timer to quickly
+ * detect a failure to negotiate the new connection
+ * parameters. IrLAP 6.11.3.2, note 3.
+ * Note that currently we don't process this failure
+ * properly, as we should do a quick disconnect.
+ * Jean II */
+ irlap_start_final_timer(self, self->final_timeout/2);
+ irlap_next_state(self, LAP_NRM_P);
+
+ irlap_connect_confirm(self, skb);
+ break;
+ case RECV_DM_RSP: /* FALLTHROUGH */
+ case RECV_DISC_CMD:
+ del_timer(&self->final_timer);
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d, %s\n", __func__,
+ event, irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_offline (self, event, skb, info)
+ *
+ * OFFLINE state, not used for now!
+ *
+ */
+static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ pr_debug("%s(), Unknown event\n", __func__);
+
+ return -1;
+}
+
+/*
+ * Function irlap_state_xmit_p (self, event, skb, info)
+ *
+ * XMIT, Only the primary station has right to transmit, and we
+ * therefore do not expect to receive any transmissions from other
+ * stations.
+ *
+ */
+static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ switch (event) {
+ case SEND_I_CMD:
+ /*
+ * Only send frame if send-window > 0.
+ */
+ if ((self->window > 0) && (!self->remote_busy)) {
+ int nextfit;
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ struct sk_buff *skb_next;
+
+ /* With DYNAMIC_WINDOW, we keep the window size
+ * maximum, and adapt on the packets we are sending.
+ * At 115k, we can send only 2 packets of 2048 bytes
+ * in a 500 ms turnaround. Without this option, we
+ * would always limit the window to 2. With this
+ * option, if we send smaller packets, we can send
+ * up to 7 of them (always depending on QoS).
+ * Jean II */
+
+ /* Look at the next skb. This is safe, as we are
+ * the only consumer of the Tx queue (if we are not,
+ * we have other problems) - Jean II */
+ skb_next = skb_peek(&self->txq);
+
+ /* Check if a subsequent skb exist and would fit in
+ * the current window (with respect to turnaround
+ * time).
+ * This allow us to properly mark the current packet
+ * with the pf bit, to avoid falling back on the
+ * second test below, and avoid waiting the
+ * end of the window and sending a extra RR.
+ * Note : (skb_next != NULL) <=> (skb_queue_len() > 0)
+ * Jean II */
+ nextfit = ((skb_next != NULL) &&
+ ((skb_next->len + skb->len) <=
+ self->bytes_left));
+
+ /*
+ * The current packet may not fit ! Because of test
+ * above, this should not happen any more !!!
+ * Test if we have transmitted more bytes over the
+ * link than its possible to do with the current
+ * speed and turn-around-time.
+ */
+ if((!nextfit) && (skb->len > self->bytes_left)) {
+ pr_debug("%s(), Not allowed to transmit more bytes!\n",
+ __func__);
+ /* Requeue the skb */
+ skb_queue_head(&self->txq, skb_get(skb));
+ /*
+ * We should switch state to LAP_NRM_P, but
+ * that is not possible since we must be sure
+ * that we poll the other side. Since we have
+ * used up our time, the poll timer should
+ * trigger anyway now, so we just wait for it
+ * DB
+ */
+ /*
+ * Sorry, but that's not totally true. If
+ * we send 2000B packets, we may wait another
+ * 1000B until our turnaround expire. That's
+ * why we need to be proactive in avoiding
+ * coming here. - Jean II
+ */
+ return -EPROTO;
+ }
+
+ /* Subtract space used by this skb */
+ self->bytes_left -= skb->len;
+#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ /* Window has been adjusted for the max packet
+ * size, so much simpler... - Jean II */
+ nextfit = !skb_queue_empty(&self->txq);
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ /*
+ * Send data with poll bit cleared only if window > 1
+ * and there is more frames after this one to be sent
+ */
+ if ((self->window > 1) && (nextfit)) {
+ /* More packet to send in current window */
+ irlap_send_data_primary(self, skb);
+ irlap_next_state(self, LAP_XMIT_P);
+ } else {
+ /* Final packet of window */
+ irlap_send_data_primary_poll(self, skb);
+
+ /*
+ * Make sure state machine does not try to send
+ * any more frames
+ */
+ ret = -EPROTO;
+ }
+#ifdef CONFIG_IRDA_FAST_RR
+ /* Peer may want to reply immediately */
+ self->fast_RR = FALSE;
+#endif /* CONFIG_IRDA_FAST_RR */
+ } else {
+ pr_debug("%s(), Unable to send! remote busy?\n",
+ __func__);
+ skb_queue_head(&self->txq, skb_get(skb));
+
+ /*
+ * The next ret is important, because it tells
+ * irlap_next_state _not_ to deliver more frames
+ */
+ ret = -EPROTO;
+ }
+ break;
+ case POLL_TIMER_EXPIRED:
+ pr_debug("%s(), POLL_TIMER_EXPIRED <%ld>\n",
+ __func__, jiffies);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ /* Return to NRM properly - Jean II */
+ self->window = self->window_size;
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ /* Allowed to transmit a maximum number of bytes again. */
+ self->bytes_left = self->line_capacity;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ irlap_start_final_timer(self, self->final_timeout);
+ irlap_next_state(self, LAP_NRM_P);
+ break;
+ case DISCONNECT_REQUEST:
+ del_timer(&self->poll_timer);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_disc_frame(self);
+ irlap_flush_all_queues(self);
+ irlap_start_final_timer(self, self->final_timeout);
+ self->retry_count = 0;
+ irlap_next_state(self, LAP_PCLOSE);
+ break;
+ case DATA_REQUEST:
+ /* Nothing to do, irlap_do_event() will send the packet
+ * when we return... - Jean II */
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlap_event[event]);
+
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_pclose (event, skb, info)
+ *
+ * PCLOSE state
+ */
+static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case RECV_UA_RSP: /* FALLTHROUGH */
+ case RECV_DM_RSP:
+ del_timer(&self->final_timer);
+
+ /* Set new link parameters */
+ irlap_apply_default_connection_parameters(self);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ case FINAL_TIMER_EXPIRED:
+ if (self->retry_count < self->N3) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_disc_frame(self);
+ irlap_start_final_timer(self, self->final_timeout);
+ self->retry_count++;
+ /* Keep state */
+ } else {
+ irlap_apply_default_connection_parameters(self);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_NO_RESPONSE);
+ }
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_nrm_p (self, event, skb, info)
+ *
+ * NRM_P (Normal Response Mode as Primary), The primary station has given
+ * permissions to a secondary station to transmit IrLAP resonse frames
+ * (by sending a frame with the P bit set). The primary station will not
+ * transmit any frames and is expecting to receive frames only from the
+ * secondary to which transmission permissions has been given.
+ */
+static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+ int ns_status;
+ int nr_status;
+
+ switch (event) {
+ case RECV_I_RSP: /* Optimize for the common case */
+ if (unlikely(skb->len <= LAP_ADDR_HEADER + LAP_CTRL_HEADER)) {
+ /*
+ * Input validation check: a stir4200/mcp2150
+ * combination sometimes results in an empty i:rsp.
+ * This makes no sense; we can just ignore the frame
+ * and send an rr:cmd immediately. This happens before
+ * changing nr or ns so triggers a retransmit
+ */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ /* Keep state */
+ break;
+ }
+ /* FIXME: must check for remote_busy below */
+#ifdef CONFIG_IRDA_FAST_RR
+ /*
+ * Reset the fast_RR so we can use the fast RR code with
+ * full speed the next time since peer may have more frames
+ * to transmitt
+ */
+ self->fast_RR = FALSE;
+#endif /* CONFIG_IRDA_FAST_RR */
+ IRDA_ASSERT( info != NULL, return -1;);
+
+ ns_status = irlap_validate_ns_received(self, info->ns);
+ nr_status = irlap_validate_nr_received(self, info->nr);
+
+ /*
+ * Check for expected I(nformation) frame
+ */
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) {
+
+ /* Update Vr (next frame for us to receive) */
+ self->vr = (self->vr + 1) % 8;
+
+ /* Update Nr received, cleanup our retry queue */
+ irlap_update_nr_received(self, info->nr);
+
+ /*
+ * Got expected NR, so reset the
+ * retry_count. This is not done by IrLAP spec,
+ * which is strange!
+ */
+ self->retry_count = 0;
+ self->ack_required = TRUE;
+
+ /* poll bit cleared? */
+ if (!info->pf) {
+ /* Keep state, do not move this line */
+ irlap_next_state(self, LAP_NRM_P);
+
+ irlap_data_indication(self, skb, FALSE);
+ } else {
+ /* No longer waiting for pf */
+ del_timer(&self->final_timer);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /* Call higher layer *before* changing state
+ * to give them a chance to send data in the
+ * next LAP frame.
+ * Jean II */
+ irlap_data_indication(self, skb, FALSE);
+
+ /* XMIT states are the most dangerous state
+ * to be in, because user requests are
+ * processed directly and may change state.
+ * On the other hand, in NDM_P, those
+ * requests are queued and we will process
+ * them when we return to irlap_do_event().
+ * Jean II
+ */
+ irlap_next_state(self, LAP_XMIT_P);
+
+ /* This is the last frame.
+ * Make sure it's always called in XMIT state.
+ * - Jean II */
+ irlap_start_poll_timer(self, self->poll_timeout);
+ }
+ break;
+
+ }
+ /* Unexpected next to send (Ns) */
+ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
+ {
+ if (!info->pf) {
+ irlap_update_nr_received(self, info->nr);
+
+ /*
+ * Wait until the last frame before doing
+ * anything
+ */
+
+ /* Keep state */
+ irlap_next_state(self, LAP_NRM_P);
+ } else {
+ pr_debug("%s(), missing or duplicate frame!\n",
+ __func__);
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+
+ self->ack_required = FALSE;
+
+ irlap_start_final_timer(self, self->final_timeout);
+ irlap_next_state(self, LAP_NRM_P);
+ }
+ break;
+ }
+ /*
+ * Unexpected next to receive (Nr)
+ */
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
+ {
+ if (info->pf) {
+ self->vr = (self->vr + 1) % 8;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ /* Resend rejected frames */
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+
+ self->ack_required = FALSE;
+
+ /* Make sure we account for the time
+ * to transmit our frames. See comemnts
+ * in irlap_send_data_primary_poll().
+ * Jean II */
+ irlap_start_final_timer(self, 2 * self->final_timeout);
+
+ /* Keep state, do not move this line */
+ irlap_next_state(self, LAP_NRM_P);
+
+ irlap_data_indication(self, skb, FALSE);
+ } else {
+ /*
+ * Do not resend frames until the last
+ * frame has arrived from the other
+ * device. This is not documented in
+ * IrLAP!!
+ */
+ self->vr = (self->vr + 1) % 8;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ self->ack_required = FALSE;
+
+ /* Keep state, do not move this line!*/
+ irlap_next_state(self, LAP_NRM_P);
+
+ irlap_data_indication(self, skb, FALSE);
+ }
+ break;
+ }
+ /*
+ * Unexpected next to send (Ns) and next to receive (Nr)
+ * Not documented by IrLAP!
+ */
+ if ((ns_status == NS_UNEXPECTED) &&
+ (nr_status == NR_UNEXPECTED))
+ {
+ pr_debug("%s(), unexpected nr and ns!\n",
+ __func__);
+ if (info->pf) {
+ /* Resend rejected frames */
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+
+ /* Give peer some time to retransmit!
+ * But account for our own Tx. */
+ irlap_start_final_timer(self, 2 * self->final_timeout);
+
+ /* Keep state, do not move this line */
+ irlap_next_state(self, LAP_NRM_P);
+ } else {
+ /* Update Nr received */
+ /* irlap_update_nr_received( info->nr); */
+
+ self->ack_required = FALSE;
+ }
+ break;
+ }
+
+ /*
+ * Invalid NR or NS
+ */
+ if ((nr_status == NR_INVALID) || (ns_status == NS_INVALID)) {
+ if (info->pf) {
+ del_timer(&self->final_timer);
+
+ irlap_next_state(self, LAP_RESET_WAIT);
+
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
+ self->xmitflag = TRUE;
+ } else {
+ del_timer(&self->final_timer);
+
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
+
+ self->xmitflag = FALSE;
+ }
+ break;
+ }
+ pr_debug("%s(), Not implemented!\n", __func__);
+ pr_debug("%s(), event=%s, ns_status=%d, nr_status=%d\n",
+ __func__, irlap_event[event], ns_status, nr_status);
+ break;
+ case RECV_UI_FRAME:
+ /* Poll bit cleared? */
+ if (!info->pf) {
+ irlap_data_indication(self, skb, TRUE);
+ irlap_next_state(self, LAP_NRM_P);
+ } else {
+ del_timer(&self->final_timer);
+ irlap_data_indication(self, skb, TRUE);
+ irlap_next_state(self, LAP_XMIT_P);
+ pr_debug("%s: RECV_UI_FRAME: next state %s\n",
+ __func__, irlap_state[self->state]);
+ irlap_start_poll_timer(self, self->poll_timeout);
+ }
+ break;
+ case RECV_RR_RSP:
+ /*
+ * If you get a RR, the remote isn't busy anymore,
+ * no matter what the NR
+ */
+ self->remote_busy = FALSE;
+
+ /* Stop final timer */
+ del_timer(&self->final_timer);
+
+ /*
+ * Nr as expected?
+ */
+ ret = irlap_validate_nr_received(self, info->nr);
+ if (ret == NR_EXPECTED) {
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ /*
+ * Got expected NR, so reset the retry_count. This
+ * is not done by the IrLAP standard , which is
+ * strange! DB.
+ */
+ self->retry_count = 0;
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ irlap_next_state(self, LAP_XMIT_P);
+
+ /* Start poll timer */
+ irlap_start_poll_timer(self, self->poll_timeout);
+ } else if (ret == NR_UNEXPECTED) {
+ IRDA_ASSERT(info != NULL, return -1;);
+ /*
+ * Unexpected nr!
+ */
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ pr_debug("RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n",
+ self->retry_count, info->nr, self->va,
+ self->vs, self->vr);
+
+ /* Resend rejected frames */
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+ irlap_start_final_timer(self, self->final_timeout * 2);
+
+ irlap_next_state(self, LAP_NRM_P);
+ } else if (ret == NR_INVALID) {
+ pr_debug("%s(), Received RR with invalid nr !\n",
+ __func__);
+
+ irlap_next_state(self, LAP_RESET_WAIT);
+
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
+ self->xmitflag = TRUE;
+ }
+ break;
+ case RECV_RNR_RSP:
+ IRDA_ASSERT(info != NULL, return -1;);
+
+ /* Stop final timer */
+ del_timer(&self->final_timer);
+ self->remote_busy = TRUE;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+ irlap_next_state(self, LAP_XMIT_P);
+
+ /* Start poll timer */
+ irlap_start_poll_timer(self, self->poll_timeout);
+ break;
+ case RECV_FRMR_RSP:
+ del_timer(&self->final_timer);
+ self->xmitflag = TRUE;
+ irlap_next_state(self, LAP_RESET_WAIT);
+ irlap_reset_indication(self);
+ break;
+ case FINAL_TIMER_EXPIRED:
+ /*
+ * We are allowed to wait for additional 300 ms if
+ * final timer expires when we are in the middle
+ * of receiving a frame (page 45, IrLAP). Check that
+ * we only do this once for each frame.
+ */
+ if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
+ pr_debug("FINAL_TIMER_EXPIRED when receiving a frame! Waiting a little bit more!\n");
+ irlap_start_final_timer(self, msecs_to_jiffies(300));
+
+ /*
+ * Don't allow this to happen one more time in a row,
+ * or else we can get a pretty tight loop here if
+ * if we only receive half a frame. DB.
+ */
+ self->add_wait = TRUE;
+ break;
+ }
+ self->add_wait = FALSE;
+
+ /* N2 is the disconnect timer. Until we reach it, we retry */
+ if (self->retry_count < self->N2) {
+ if (skb_peek(&self->wx_list) == NULL) {
+ /* Retry sending the pf bit to the secondary */
+ pr_debug("nrm_p: resending rr");
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else {
+ pr_debug("nrm_p: resend frames");
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+ }
+
+ irlap_start_final_timer(self, self->final_timeout);
+ self->retry_count++;
+ pr_debug("irlap_state_nrm_p: FINAL_TIMER_EXPIRED: retry_count=%d\n",
+ self->retry_count);
+
+ /* Early warning event. I'm using a pretty liberal
+ * interpretation of the spec and generate an event
+ * every time the timer is multiple of N1 (and not
+ * only the first time). This allow application
+ * to know precisely if connectivity restart...
+ * Jean II */
+ if((self->retry_count % self->N1) == 0)
+ irlap_status_indication(self,
+ STATUS_NO_ACTIVITY);
+
+ /* Keep state */
+ } else {
+ irlap_apply_default_connection_parameters(self);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+ irlap_disconnect_indication(self, LAP_NO_RESPONSE);
+ }
+ break;
+ case RECV_REJ_RSP:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+ irlap_start_final_timer(self, 2 * self->final_timeout);
+ break;
+ case RECV_SREJ_RSP:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else
+ irlap_resend_rejected_frame(self, CMD_FRAME);
+ irlap_start_final_timer(self, 2 * self->final_timeout);
+ break;
+ case RECV_RD_RSP:
+ pr_debug("%s(), RECV_RD_RSP\n", __func__);
+
+ irlap_flush_all_queues(self);
+ irlap_next_state(self, LAP_XMIT_P);
+ /* Call back the LAP state machine to do a proper disconnect */
+ irlap_disconnect_request(self);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_reset_wait (event, skb, info)
+ *
+ * We have informed the service user of a reset condition, and is
+ * awaiting reset of disconnect request.
+ *
+ */
+static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s(), event = %s\n", __func__, irlap_event[event]);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case RESET_REQUEST:
+ if (self->xmitflag) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_snrm_frame(self, NULL);
+ irlap_start_final_timer(self, self->final_timeout);
+ irlap_next_state(self, LAP_RESET);
+ } else {
+ irlap_start_final_timer(self, self->final_timeout);
+ irlap_next_state(self, LAP_RESET);
+ }
+ break;
+ case DISCONNECT_REQUEST:
+ irlap_wait_min_turn_around( self, &self->qos_tx);
+ irlap_send_disc_frame( self);
+ irlap_flush_all_queues( self);
+ irlap_start_final_timer( self, self->final_timeout);
+ self->retry_count = 0;
+ irlap_next_state( self, LAP_PCLOSE);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_reset (self, event, skb, info)
+ *
+ * We have sent a SNRM reset command to the peer layer, and is awaiting
+ * reply.
+ *
+ */
+static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s(), event = %s\n", __func__, irlap_event[event]);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case RECV_DISC_CMD:
+ del_timer(&self->final_timer);
+
+ irlap_apply_default_connection_parameters(self);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_NO_RESPONSE);
+
+ break;
+ case RECV_UA_RSP:
+ del_timer(&self->final_timer);
+
+ /* Initiate connection state */
+ irlap_initiate_connection_state(self);
+
+ irlap_reset_confirm();
+
+ self->remote_busy = FALSE;
+
+ irlap_next_state(self, LAP_XMIT_P);
+
+ irlap_start_poll_timer(self, self->poll_timeout);
+
+ break;
+ case FINAL_TIMER_EXPIRED:
+ if (self->retry_count < 3) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ IRDA_ASSERT(self->netdev != NULL, return -1;);
+ irlap_send_snrm_frame(self, self->qos_dev);
+
+ self->retry_count++; /* Experimental!! */
+
+ irlap_start_final_timer(self, self->final_timeout);
+ irlap_next_state(self, LAP_RESET);
+ } else if (self->retry_count >= self->N3) {
+ irlap_apply_default_connection_parameters(self);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_disconnect_indication(self, LAP_NO_RESPONSE);
+ }
+ break;
+ case RECV_SNRM_CMD:
+ /*
+ * SNRM frame is not allowed to contain an I-field in this
+ * state
+ */
+ if (!info) {
+ pr_debug("%s(), RECV_SNRM_CMD\n", __func__);
+ irlap_initiate_connection_state(self);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+ irlap_reset_confirm();
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_NDM);
+ } else {
+ pr_debug("%s(), SNRM frame contained an I field!\n",
+ __func__);
+ }
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlap_event[event]);
+
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_xmit_s (event, skb, info)
+ *
+ * XMIT_S, The secondary station has been given the right to transmit,
+ * and we therefore do not expect to receive any transmissions from other
+ * stations.
+ */
+static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
+
+ IRDA_ASSERT(self != NULL, return -ENODEV;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
+
+ switch (event) {
+ case SEND_I_CMD:
+ /*
+ * Send frame only if send window > 0
+ */
+ if ((self->window > 0) && (!self->remote_busy)) {
+ int nextfit;
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ struct sk_buff *skb_next;
+
+ /*
+ * Same deal as in irlap_state_xmit_p(), so see
+ * the comments at that point.
+ * We are the secondary, so there are only subtle
+ * differences. - Jean II
+ */
+
+ /* Check if a subsequent skb exist and would fit in
+ * the current window (with respect to turnaround
+ * time). - Jean II */
+ skb_next = skb_peek(&self->txq);
+ nextfit = ((skb_next != NULL) &&
+ ((skb_next->len + skb->len) <=
+ self->bytes_left));
+
+ /*
+ * Test if we have transmitted more bytes over the
+ * link than its possible to do with the current
+ * speed and turn-around-time.
+ */
+ if((!nextfit) && (skb->len > self->bytes_left)) {
+ pr_debug("%s(), Not allowed to transmit more bytes!\n",
+ __func__);
+ /* Requeue the skb */
+ skb_queue_head(&self->txq, skb_get(skb));
+
+ /*
+ * Switch to NRM_S, this is only possible
+ * when we are in secondary mode, since we
+ * must be sure that we don't miss any RR
+ * frames
+ */
+ self->window = self->window_size;
+ self->bytes_left = self->line_capacity;
+ irlap_start_wd_timer(self, self->wd_timeout);
+
+ irlap_next_state(self, LAP_NRM_S);
+ /* Slight difference with primary :
+ * here we would wait for the other side to
+ * expire the turnaround. - Jean II */
+
+ return -EPROTO; /* Try again later */
+ }
+ /* Subtract space used by this skb */
+ self->bytes_left -= skb->len;
+#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ /* Window has been adjusted for the max packet
+ * size, so much simpler... - Jean II */
+ nextfit = !skb_queue_empty(&self->txq);
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ /*
+ * Send data with final bit cleared only if window > 1
+ * and there is more frames to be sent
+ */
+ if ((self->window > 1) && (nextfit)) {
+ irlap_send_data_secondary(self, skb);
+ irlap_next_state(self, LAP_XMIT_S);
+ } else {
+ irlap_send_data_secondary_final(self, skb);
+ irlap_next_state(self, LAP_NRM_S);
+
+ /*
+ * Make sure state machine does not try to send
+ * any more frames
+ */
+ ret = -EPROTO;
+ }
+ } else {
+ pr_debug("%s(), Unable to send!\n", __func__);
+ skb_queue_head(&self->txq, skb_get(skb));
+ ret = -EPROTO;
+ }
+ break;
+ case DISCONNECT_REQUEST:
+ irlap_send_rd_frame(self);
+ irlap_flush_all_queues(self);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_SCLOSE);
+ break;
+ case DATA_REQUEST:
+ /* Nothing to do, irlap_do_event() will send the packet
+ * when we return... - Jean II */
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
+
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_nrm_s (event, skb, info)
+ *
+ * NRM_S (Normal Response Mode as Secondary) state, in this state we are
+ * expecting to receive frames from the primary station
+ *
+ */
+static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ int ns_status;
+ int nr_status;
+ int ret = 0;
+
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ switch (event) {
+ case RECV_I_CMD: /* Optimize for the common case */
+ /* FIXME: must check for remote_busy below */
+ pr_debug("%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n",
+ __func__, irlap_event[event], info->nr,
+ self->vs, info->ns, self->vr, info->pf);
+
+ self->retry_count = 0;
+
+ ns_status = irlap_validate_ns_received(self, info->ns);
+ nr_status = irlap_validate_nr_received(self, info->nr);
+ /*
+ * Check for expected I(nformation) frame
+ */
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) {
+
+ /* Update Vr (next frame for us to receive) */
+ self->vr = (self->vr + 1) % 8;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ /*
+ * poll bit cleared?
+ */
+ if (!info->pf) {
+
+ self->ack_required = TRUE;
+
+ /*
+ * Starting WD-timer here is optional, but
+ * not recommended. Note 6 IrLAP p. 83
+ */
+#if 0
+ irda_start_timer(WD_TIMER, self->wd_timeout);
+#endif
+ /* Keep state, do not move this line */
+ irlap_next_state(self, LAP_NRM_S);
+
+ irlap_data_indication(self, skb, FALSE);
+ break;
+ } else {
+ /*
+ * We should wait before sending RR, and
+ * also before changing to XMIT_S
+ * state. (note 1, IrLAP p. 82)
+ */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /*
+ * Give higher layers a chance to
+ * immediately reply with some data before
+ * we decide if we should send a RR frame
+ * or not
+ */
+ irlap_data_indication(self, skb, FALSE);
+
+ /* Any pending data requests? */
+ if (!skb_queue_empty(&self->txq) &&
+ (self->window > 0))
+ {
+ self->ack_required = TRUE;
+
+ del_timer(&self->wd_timer);
+
+ irlap_next_state(self, LAP_XMIT_S);
+ } else {
+ irlap_send_rr_frame(self, RSP_FRAME);
+ irlap_start_wd_timer(self,
+ self->wd_timeout);
+
+ /* Keep the state */
+ irlap_next_state(self, LAP_NRM_S);
+ }
+ break;
+ }
+ }
+ /*
+ * Check for Unexpected next to send (Ns)
+ */
+ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
+ {
+ /* Unexpected next to send, with final bit cleared */
+ if (!info->pf) {
+ irlap_update_nr_received(self, info->nr);
+
+ irlap_start_wd_timer(self, self->wd_timeout);
+ } else {
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, RSP_FRAME);
+
+ irlap_start_wd_timer(self, self->wd_timeout);
+ }
+ break;
+ }
+
+ /*
+ * Unexpected Next to Receive(NR) ?
+ */
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
+ {
+ if (info->pf) {
+ pr_debug("RECV_I_RSP: frame(s) lost\n");
+
+ self->vr = (self->vr + 1) % 8;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ /* Resend rejected frames */
+ irlap_resend_rejected_frames(self, RSP_FRAME);
+
+ /* Keep state, do not move this line */
+ irlap_next_state(self, LAP_NRM_S);
+
+ irlap_data_indication(self, skb, FALSE);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ break;
+ }
+ /*
+ * This is not documented in IrLAP!! Unexpected NR
+ * with poll bit cleared
+ */
+ if (!info->pf) {
+ self->vr = (self->vr + 1) % 8;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ /* Keep state, do not move this line */
+ irlap_next_state(self, LAP_NRM_S);
+
+ irlap_data_indication(self, skb, FALSE);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ }
+ break;
+ }
+
+ if (ret == NR_INVALID) {
+ pr_debug("NRM_S, NR_INVALID not implemented!\n");
+ }
+ if (ret == NS_INVALID) {
+ pr_debug("NRM_S, NS_INVALID not implemented!\n");
+ }
+ break;
+ case RECV_UI_FRAME:
+ /*
+ * poll bit cleared?
+ */
+ if (!info->pf) {
+ irlap_data_indication(self, skb, TRUE);
+ irlap_next_state(self, LAP_NRM_S); /* Keep state */
+ } else {
+ /*
+ * Any pending data requests?
+ */
+ if (!skb_queue_empty(&self->txq) &&
+ (self->window > 0) && !self->remote_busy)
+ {
+ irlap_data_indication(self, skb, TRUE);
+
+ del_timer(&self->wd_timer);
+
+ irlap_next_state(self, LAP_XMIT_S);
+ } else {
+ irlap_data_indication(self, skb, TRUE);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ irlap_send_rr_frame(self, RSP_FRAME);
+ self->ack_required = FALSE;
+
+ irlap_start_wd_timer(self, self->wd_timeout);
+
+ /* Keep the state */
+ irlap_next_state(self, LAP_NRM_S);
+ }
+ }
+ break;
+ case RECV_RR_CMD:
+ self->retry_count = 0;
+
+ /*
+ * Nr as expected?
+ */
+ nr_status = irlap_validate_nr_received(self, info->nr);
+ if (nr_status == NR_EXPECTED) {
+ if (!skb_queue_empty(&self->txq) &&
+ (self->window > 0)) {
+ self->remote_busy = FALSE;
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+ del_timer(&self->wd_timer);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_next_state(self, LAP_XMIT_S);
+ } else {
+ self->remote_busy = FALSE;
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_start_wd_timer(self, self->wd_timeout);
+
+ /* Note : if the link is idle (this case),
+ * we never go in XMIT_S, so we never get a
+ * chance to process any DISCONNECT_REQUEST.
+ * Do it now ! - Jean II */
+ if (self->disconnect_pending) {
+ /* Disconnect */
+ irlap_send_rd_frame(self);
+ irlap_flush_all_queues(self);
+
+ irlap_next_state(self, LAP_SCLOSE);
+ } else {
+ /* Just send back pf bit */
+ irlap_send_rr_frame(self, RSP_FRAME);
+
+ irlap_next_state(self, LAP_NRM_S);
+ }
+ }
+ } else if (nr_status == NR_UNEXPECTED) {
+ self->remote_busy = FALSE;
+ irlap_update_nr_received(self, info->nr);
+ irlap_resend_rejected_frames(self, RSP_FRAME);
+
+ irlap_start_wd_timer(self, self->wd_timeout);
+
+ /* Keep state */
+ irlap_next_state(self, LAP_NRM_S);
+ } else {
+ pr_debug("%s(), invalid nr not implemented!\n",
+ __func__);
+ }
+ break;
+ case RECV_SNRM_CMD:
+ /* SNRM frame is not allowed to contain an I-field */
+ if (!info) {
+ del_timer(&self->wd_timer);
+ pr_debug("%s(), received SNRM cmd\n", __func__);
+ irlap_next_state(self, LAP_RESET_CHECK);
+
+ irlap_reset_indication(self);
+ } else {
+ pr_debug("%s(), SNRM frame contained an I-field!\n",
+ __func__);
+
+ }
+ break;
+ case RECV_REJ_CMD:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, RSP_FRAME);
+ } else
+ irlap_resend_rejected_frames(self, RSP_FRAME);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ break;
+ case RECV_SREJ_CMD:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, RSP_FRAME);
+ } else
+ irlap_resend_rejected_frame(self, RSP_FRAME);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ break;
+ case WD_TIMER_EXPIRED:
+ /*
+ * Wait until retry_count * n matches negotiated threshold/
+ * disconnect time (note 2 in IrLAP p. 82)
+ *
+ * Similar to irlap_state_nrm_p() -> FINAL_TIMER_EXPIRED
+ * Note : self->wd_timeout = (self->final_timeout * 2),
+ * which explain why we use (self->N2 / 2) here !!!
+ * Jean II
+ */
+ pr_debug("%s(), retry_count = %d\n", __func__,
+ self->retry_count);
+
+ if (self->retry_count < (self->N2 / 2)) {
+ /* No retry, just wait for primary */
+ irlap_start_wd_timer(self, self->wd_timeout);
+ self->retry_count++;
+
+ if((self->retry_count % (self->N1 / 2)) == 0)
+ irlap_status_indication(self,
+ STATUS_NO_ACTIVITY);
+ } else {
+ irlap_apply_default_connection_parameters(self);
+
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+ irlap_disconnect_indication(self, LAP_NO_RESPONSE);
+ }
+ break;
+ case RECV_DISC_CMD:
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ /* Send disconnect response */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_ua_response_frame(self, NULL);
+
+ del_timer(&self->wd_timer);
+ irlap_flush_all_queues(self);
+ /* Set default link parameters */
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ case RECV_DISCOVERY_XID_CMD:
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, RSP_FRAME);
+ self->ack_required = TRUE;
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_NRM_S);
+
+ break;
+ case RECV_TEST_CMD:
+ /* Remove test frame header (only LAP header in NRM) */
+ skb_pull(skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_start_wd_timer(self, self->wd_timeout);
+
+ /* Send response (info will be copied) */
+ irlap_send_test_frame(self, self->caddr, info->daddr, skb);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d, (%s)\n", __func__,
+ event, irlap_event[event]);
+
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlap_state_sclose (self, event, skb, info)
+ */
+static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
+{
+ IRDA_ASSERT(self != NULL, return -ENODEV;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
+
+ switch (event) {
+ case RECV_DISC_CMD:
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ /* Send disconnect response */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_ua_response_frame(self, NULL);
+
+ del_timer(&self->wd_timer);
+ /* Set default link parameters */
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ case RECV_DM_RSP:
+ /* IrLAP-1.1 p.82: in SCLOSE, S and I type RSP frames
+ * shall take us down into default NDM state, like DM_RSP
+ */
+ case RECV_RR_RSP:
+ case RECV_RNR_RSP:
+ case RECV_REJ_RSP:
+ case RECV_SREJ_RSP:
+ case RECV_I_RSP:
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ del_timer(&self->wd_timer);
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ case WD_TIMER_EXPIRED:
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ default:
+ /* IrLAP-1.1 p.82: in SCLOSE, basically any received frame
+ * with pf=1 shall restart the wd-timer and resend the rd:rsp
+ */
+ if (info != NULL && info->pf) {
+ del_timer(&self->wd_timer);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rd_frame(self);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ break; /* stay in SCLOSE */
+ }
+
+ pr_debug("%s(), Unknown event %d, (%s)\n", __func__,
+ event, irlap_event[event]);
+
+ break;
+ }
+
+ return -1;
+}
+
+static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ int ret = 0;
+
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
+
+ IRDA_ASSERT(self != NULL, return -ENODEV;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
+
+ switch (event) {
+ case RESET_RESPONSE:
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+ irlap_initiate_connection_state(self);
+ irlap_start_wd_timer(self, WD_TIMEOUT);
+ irlap_flush_all_queues(self);
+
+ irlap_next_state(self, LAP_NRM_S);
+ break;
+ case DISCONNECT_REQUEST:
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rd_frame(self);
+ irlap_start_wd_timer(self, WD_TIMEOUT);
+ irlap_next_state(self, LAP_SCLOSE);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %d, (%s)\n", __func__,
+ event, irlap_event[event]);
+
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/staging/irda/net/irlap_frame.c b/drivers/staging/irda/net/irlap_frame.c
new file mode 100644
index 000000000000..debda3de4726
--- /dev/null
+++ b/drivers/staging/irda/net/irlap_frame.c
@@ -0,0 +1,1407 @@
+/*********************************************************************
+ *
+ * Filename: irlap_frame.c
+ * Version: 1.0
+ * Description: Build and transmit IrLAP frames
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Aug 19 10:27:26 1997
+ * Modified at: Wed Jan 5 08:59:04 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/irda.h>
+#include <linux/slab.h>
+
+#include <net/pkt_sched.h>
+#include <net/sock.h>
+
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/irlap.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/timer.h>
+#include <net/irda/irlap_frame.h>
+#include <net/irda/qos.h>
+
+static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
+ int command);
+
+/*
+ * Function irlap_insert_info (self, skb)
+ *
+ * Insert minimum turnaround time and speed information into the skb. We
+ * need to do this since it's per packet relevant information. Safe to
+ * have this function inlined since it's only called from one place
+ */
+static inline void irlap_insert_info(struct irlap_cb *self,
+ struct sk_buff *skb)
+{
+ struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
+
+ /*
+ * Insert MTT (min. turn time) and speed into skb, so that the
+ * device driver knows which settings to use
+ */
+ cb->magic = LAP_MAGIC;
+ cb->mtt = self->mtt_required;
+ cb->next_speed = self->speed;
+
+ /* Reset */
+ self->mtt_required = 0;
+
+ /*
+ * Delay equals negotiated BOFs count, plus the number of BOFs to
+ * force the negotiated minimum turnaround time
+ */
+ cb->xbofs = self->bofs_count;
+ cb->next_xbofs = self->next_bofs;
+ cb->xbofs_delay = self->xbofs_delay;
+
+ /* Reset XBOF's delay (used only for getting min turn time) */
+ self->xbofs_delay = 0;
+ /* Put the correct xbofs value for the next packet */
+ self->bofs_count = self->next_bofs;
+}
+
+/*
+ * Function irlap_queue_xmit (self, skb)
+ *
+ * A little wrapper for dev_queue_xmit, so we can insert some common
+ * code into it.
+ */
+void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
+{
+ /* Some common init stuff */
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->priority = TC_PRIO_BESTEFFORT;
+
+ irlap_insert_info(self, skb);
+
+ if (unlikely(self->mode & IRDA_MODE_MONITOR)) {
+ pr_debug("%s(): %s is in monitor mode\n", __func__,
+ self->netdev->name);
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ dev_queue_xmit(skb);
+}
+
+/*
+ * Function irlap_send_snrm_cmd (void)
+ *
+ * Transmits a connect SNRM command frame
+ */
+void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
+{
+ struct sk_buff *tx_skb;
+ struct snrm_frame *frame;
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Allocate frame */
+ tx_skb = alloc_skb(sizeof(struct snrm_frame) +
+ IRLAP_NEGOCIATION_PARAMS_LEN,
+ GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ frame = skb_put(tx_skb, 2);
+
+ /* Insert connection address field */
+ if (qos)
+ frame->caddr = CMD_FRAME | CBROADCAST;
+ else
+ frame->caddr = CMD_FRAME | self->caddr;
+
+ /* Insert control field */
+ frame->control = SNRM_CMD | PF_BIT;
+
+ /*
+ * If we are establishing a connection then insert QoS parameters
+ */
+ if (qos) {
+ skb_put(tx_skb, 9); /* 25 left */
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(self->daddr);
+
+ frame->ncaddr = self->caddr;
+
+ ret = irlap_insert_qos_negotiation_params(self, tx_skb);
+ if (ret < 0) {
+ dev_kfree_skb(tx_skb);
+ return;
+ }
+ }
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_recv_snrm_cmd (skb, info)
+ *
+ * Received SNRM (Set Normal Response Mode) command frame
+ *
+ */
+static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ struct snrm_frame *frame;
+
+ if (pskb_may_pull(skb,sizeof(struct snrm_frame))) {
+ frame = (struct snrm_frame *) skb->data;
+
+ /* Copy the new connection address ignoring the C/R bit */
+ info->caddr = frame->ncaddr & 0xFE;
+
+ /* Check if the new connection address is valid */
+ if ((info->caddr == 0x00) || (info->caddr == 0xfe)) {
+ pr_debug("%s(), invalid connection address!\n",
+ __func__);
+ return;
+ }
+
+ /* Copy peer device address */
+ info->daddr = le32_to_cpu(frame->saddr);
+ info->saddr = le32_to_cpu(frame->daddr);
+
+ /* Only accept if addressed directly to us */
+ if (info->saddr != self->saddr) {
+ pr_debug("%s(), not addressed to us!\n",
+ __func__);
+ return;
+ }
+ irlap_do_event(self, RECV_SNRM_CMD, skb, info);
+ } else {
+ /* Signal that this SNRM frame does not contain and I-field */
+ irlap_do_event(self, RECV_SNRM_CMD, skb, NULL);
+ }
+}
+
+/*
+ * Function irlap_send_ua_response_frame (qos)
+ *
+ * Send UA (Unnumbered Acknowledgement) frame
+ *
+ */
+void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
+{
+ struct sk_buff *tx_skb;
+ struct ua_frame *frame;
+ int ret;
+
+ pr_debug("%s() <%ld>\n", __func__, jiffies);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Allocate frame */
+ tx_skb = alloc_skb(sizeof(struct ua_frame) +
+ IRLAP_NEGOCIATION_PARAMS_LEN,
+ GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ frame = skb_put(tx_skb, 10);
+
+ /* Build UA response */
+ frame->caddr = self->caddr;
+ frame->control = UA_RSP | PF_BIT;
+
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(self->daddr);
+
+ /* Should we send QoS negotiation parameters? */
+ if (qos) {
+ ret = irlap_insert_qos_negotiation_params(self, tx_skb);
+ if (ret < 0) {
+ dev_kfree_skb(tx_skb);
+ return;
+ }
+ }
+
+ irlap_queue_xmit(self, tx_skb);
+}
+
+
+/*
+ * Function irlap_send_dm_frame (void)
+ *
+ * Send disconnected mode (DM) frame
+ *
+ */
+void irlap_send_dm_frame( struct irlap_cb *self)
+{
+ struct sk_buff *tx_skb = NULL;
+ struct dm_frame *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ tx_skb = alloc_skb(sizeof(struct dm_frame), GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ frame = skb_put(tx_skb, 2);
+
+ if (self->state == LAP_NDM)
+ frame->caddr = CBROADCAST;
+ else
+ frame->caddr = self->caddr;
+
+ frame->control = DM_RSP | PF_BIT;
+
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_send_disc_frame (void)
+ *
+ * Send disconnect (DISC) frame
+ *
+ */
+void irlap_send_disc_frame(struct irlap_cb *self)
+{
+ struct sk_buff *tx_skb = NULL;
+ struct disc_frame *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ tx_skb = alloc_skb(sizeof(struct disc_frame), GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ frame = skb_put(tx_skb, 2);
+
+ frame->caddr = self->caddr | CMD_FRAME;
+ frame->control = DISC_CMD | PF_BIT;
+
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_send_discovery_xid_frame (S, s, command)
+ *
+ * Build and transmit a XID (eXchange station IDentifier) discovery
+ * frame.
+ */
+void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
+ __u8 command, discovery_t *discovery)
+{
+ struct sk_buff *tx_skb = NULL;
+ struct xid_frame *frame;
+ __u32 bcast = BROADCAST;
+ __u8 *info;
+
+ pr_debug("%s(), s=%d, S=%d, command=%d\n", __func__,
+ s, S, command);
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(discovery != NULL, return;);
+
+ tx_skb = alloc_skb(sizeof(struct xid_frame) + IRLAP_DISCOVERY_INFO_LEN,
+ GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ skb_put(tx_skb, 14);
+ frame = (struct xid_frame *) tx_skb->data;
+
+ if (command) {
+ frame->caddr = CBROADCAST | CMD_FRAME;
+ frame->control = XID_CMD | PF_BIT;
+ } else {
+ frame->caddr = CBROADCAST;
+ frame->control = XID_RSP | PF_BIT;
+ }
+ frame->ident = XID_FORMAT;
+
+ frame->saddr = cpu_to_le32(self->saddr);
+
+ if (command)
+ frame->daddr = cpu_to_le32(bcast);
+ else
+ frame->daddr = cpu_to_le32(discovery->data.daddr);
+
+ switch (S) {
+ case 1:
+ frame->flags = 0x00;
+ break;
+ case 6:
+ frame->flags = 0x01;
+ break;
+ case 8:
+ frame->flags = 0x02;
+ break;
+ case 16:
+ frame->flags = 0x03;
+ break;
+ default:
+ frame->flags = 0x02;
+ break;
+ }
+
+ frame->slotnr = s;
+ frame->version = 0x00;
+
+ /*
+ * Provide info for final slot only in commands, and for all
+ * responses. Send the second byte of the hint only if the
+ * EXTENSION bit is set in the first byte.
+ */
+ if (!command || (frame->slotnr == 0xff)) {
+ int len;
+
+ if (discovery->data.hints[0] & HINT_EXTENSION) {
+ info = skb_put(tx_skb, 2);
+ info[0] = discovery->data.hints[0];
+ info[1] = discovery->data.hints[1];
+ } else {
+ info = skb_put(tx_skb, 1);
+ info[0] = discovery->data.hints[0];
+ }
+ info = skb_put(tx_skb, 1);
+ info[0] = discovery->data.charset;
+
+ len = IRDA_MIN(discovery->name_len, skb_tailroom(tx_skb));
+ skb_put_data(tx_skb, discovery->data.info, len);
+ }
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_recv_discovery_xid_rsp (skb, info)
+ *
+ * Received a XID discovery response
+ *
+ */
+static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ struct xid_frame *xid;
+ discovery_t *discovery = NULL;
+ __u8 *discovery_info;
+ char *text;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
+ net_err_ratelimited("%s: frame too short!\n", __func__);
+ return;
+ }
+
+ xid = (struct xid_frame *) skb->data;
+
+ info->daddr = le32_to_cpu(xid->saddr);
+ info->saddr = le32_to_cpu(xid->daddr);
+
+ /* Make sure frame is addressed to us */
+ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
+ pr_debug("%s(), frame is not addressed to us!\n",
+ __func__);
+ return;
+ }
+
+ if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
+ net_warn_ratelimited("%s: kmalloc failed!\n", __func__);
+ return;
+ }
+
+ discovery->data.daddr = info->daddr;
+ discovery->data.saddr = self->saddr;
+ discovery->timestamp = jiffies;
+
+ pr_debug("%s(), daddr=%08x\n", __func__,
+ discovery->data.daddr);
+
+ discovery_info = skb_pull(skb, sizeof(struct xid_frame));
+
+ /* Get info returned from peer */
+ discovery->data.hints[0] = discovery_info[0];
+ if (discovery_info[0] & HINT_EXTENSION) {
+ pr_debug("EXTENSION\n");
+ discovery->data.hints[1] = discovery_info[1];
+ discovery->data.charset = discovery_info[2];
+ text = (char *) &discovery_info[3];
+ } else {
+ discovery->data.hints[1] = 0;
+ discovery->data.charset = discovery_info[1];
+ text = (char *) &discovery_info[2];
+ }
+ /*
+ * Terminate info string, should be safe since this is where the
+ * FCS bytes resides.
+ */
+ skb->data[skb->len] = '\0';
+ strncpy(discovery->data.info, text, NICKNAME_MAX_LEN);
+ discovery->name_len = strlen(discovery->data.info);
+
+ info->discovery = discovery;
+
+ irlap_do_event(self, RECV_DISCOVERY_XID_RSP, skb, info);
+}
+
+/*
+ * Function irlap_recv_discovery_xid_cmd (skb, info)
+ *
+ * Received a XID discovery command
+ *
+ */
+static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ struct xid_frame *xid;
+ discovery_t *discovery = NULL;
+ __u8 *discovery_info;
+ char *text;
+
+ if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
+ net_err_ratelimited("%s: frame too short!\n", __func__);
+ return;
+ }
+
+ xid = (struct xid_frame *) skb->data;
+
+ info->daddr = le32_to_cpu(xid->saddr);
+ info->saddr = le32_to_cpu(xid->daddr);
+
+ /* Make sure frame is addressed to us */
+ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
+ pr_debug("%s(), frame is not addressed to us!\n",
+ __func__);
+ return;
+ }
+
+ switch (xid->flags & 0x03) {
+ case 0x00:
+ info->S = 1;
+ break;
+ case 0x01:
+ info->S = 6;
+ break;
+ case 0x02:
+ info->S = 8;
+ break;
+ case 0x03:
+ info->S = 16;
+ break;
+ default:
+ /* Error!! */
+ return;
+ }
+ info->s = xid->slotnr;
+
+ discovery_info = skb_pull(skb, sizeof(struct xid_frame));
+
+ /*
+ * Check if last frame
+ */
+ if (info->s == 0xff) {
+ /* Check if things are sane at this point... */
+ if((discovery_info == NULL) ||
+ !pskb_may_pull(skb, 3)) {
+ net_err_ratelimited("%s: discovery frame too short!\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * We now have some discovery info to deliver!
+ */
+ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
+ if (!discovery)
+ return;
+
+ discovery->data.daddr = info->daddr;
+ discovery->data.saddr = self->saddr;
+ discovery->timestamp = jiffies;
+
+ discovery->data.hints[0] = discovery_info[0];
+ if (discovery_info[0] & HINT_EXTENSION) {
+ discovery->data.hints[1] = discovery_info[1];
+ discovery->data.charset = discovery_info[2];
+ text = (char *) &discovery_info[3];
+ } else {
+ discovery->data.hints[1] = 0;
+ discovery->data.charset = discovery_info[1];
+ text = (char *) &discovery_info[2];
+ }
+ /*
+ * Terminate string, should be safe since this is where the
+ * FCS bytes resides.
+ */
+ skb->data[skb->len] = '\0';
+ strncpy(discovery->data.info, text, NICKNAME_MAX_LEN);
+ discovery->name_len = strlen(discovery->data.info);
+
+ info->discovery = discovery;
+ } else
+ info->discovery = NULL;
+
+ irlap_do_event(self, RECV_DISCOVERY_XID_CMD, skb, info);
+}
+
+/*
+ * Function irlap_send_rr_frame (self, command)
+ *
+ * Build and transmit RR (Receive Ready) frame. Notice that it is currently
+ * only possible to send RR frames with the poll bit set.
+ */
+void irlap_send_rr_frame(struct irlap_cb *self, int command)
+{
+ struct sk_buff *tx_skb;
+ struct rr_frame *frame;
+
+ tx_skb = alloc_skb(sizeof(struct rr_frame), GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ frame = skb_put(tx_skb, 2);
+
+ frame->caddr = self->caddr;
+ frame->caddr |= (command) ? CMD_FRAME : 0;
+
+ frame->control = RR | PF_BIT | (self->vr << 5);
+
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_send_rd_frame (self)
+ *
+ * Request disconnect. Used by a secondary station to request the
+ * disconnection of the link.
+ */
+void irlap_send_rd_frame(struct irlap_cb *self)
+{
+ struct sk_buff *tx_skb;
+ struct rd_frame *frame;
+
+ tx_skb = alloc_skb(sizeof(struct rd_frame), GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ frame = skb_put(tx_skb, 2);
+
+ frame->caddr = self->caddr;
+ frame->control = RD_RSP | PF_BIT;
+
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_recv_rr_frame (skb, info)
+ *
+ * Received RR (Receive Ready) frame from peer station, no harm in
+ * making it inline since its called only from one single place
+ * (irlap_driver_rcv).
+ */
+static inline void irlap_recv_rr_frame(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ info->nr = skb->data[1] >> 5;
+
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_RR_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_RR_RSP, skb, info);
+}
+
+/*
+ * Function irlap_recv_rnr_frame (self, skb, info)
+ *
+ * Received RNR (Receive Not Ready) frame from peer station
+ *
+ */
+static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ info->nr = skb->data[1] >> 5;
+
+ pr_debug("%s(), nr=%d, %ld\n", __func__, info->nr, jiffies);
+
+ if (command)
+ irlap_do_event(self, RECV_RNR_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_RNR_RSP, skb, info);
+}
+
+static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ info->nr = skb->data[1] >> 5;
+
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_REJ_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_REJ_RSP, skb, info);
+}
+
+static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ info->nr = skb->data[1] >> 5;
+
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_SREJ_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_SREJ_RSP, skb, info);
+}
+
+static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_DISC_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_RD_RSP, skb, info);
+}
+
+/*
+ * Function irlap_recv_ua_frame (skb, frame)
+ *
+ * Received UA (Unnumbered Acknowledgement) frame
+ *
+ */
+static inline void irlap_recv_ua_frame(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ irlap_do_event(self, RECV_UA_RSP, skb, info);
+}
+
+/*
+ * Function irlap_send_data_primary(self, skb)
+ *
+ * Send I-frames as the primary station but without the poll bit set
+ *
+ */
+void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb;
+
+ if (skb->data[1] == I_FRAME) {
+
+ /*
+ * Insert frame sequence number (Vs) in control field before
+ * inserting into transmit window queue.
+ */
+ skb->data[1] = I_FRAME | (self->vs << 1);
+
+ /*
+ * Insert frame in store, in case of retransmissions
+ * Increase skb reference count, see irlap_do_event()
+ */
+ skb_get(skb);
+ skb_queue_tail(&self->wx_list, skb);
+
+ /* Copy buffer */
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ return;
+ }
+
+ self->vs = (self->vs + 1) % 8;
+ self->ack_required = FALSE;
+ self->window -= 1;
+
+ irlap_send_i_frame( self, tx_skb, CMD_FRAME);
+ } else {
+ pr_debug("%s(), sending unreliable frame\n", __func__);
+ irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
+ self->window -= 1;
+ }
+}
+/*
+ * Function irlap_send_data_primary_poll (self, skb)
+ *
+ * Send I(nformation) frame as primary with poll bit set
+ */
+void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb;
+ int transmission_time;
+
+ /* Stop P timer */
+ del_timer(&self->poll_timer);
+
+ /* Is this reliable or unreliable data? */
+ if (skb->data[1] == I_FRAME) {
+
+ /*
+ * Insert frame sequence number (Vs) in control field before
+ * inserting into transmit window queue.
+ */
+ skb->data[1] = I_FRAME | (self->vs << 1);
+
+ /*
+ * Insert frame in store, in case of retransmissions
+ * Increase skb reference count, see irlap_do_event()
+ */
+ skb_get(skb);
+ skb_queue_tail(&self->wx_list, skb);
+
+ /* Copy buffer */
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ return;
+ }
+
+ /*
+ * Set poll bit if necessary. We do this to the copied
+ * skb, since retransmitted need to set or clear the poll
+ * bit depending on when they are sent.
+ */
+ tx_skb->data[1] |= PF_BIT;
+
+ self->vs = (self->vs + 1) % 8;
+ self->ack_required = FALSE;
+
+ irlap_next_state(self, LAP_NRM_P);
+ irlap_send_i_frame(self, tx_skb, CMD_FRAME);
+ } else {
+ pr_debug("%s(), sending unreliable frame\n", __func__);
+
+ if (self->ack_required) {
+ irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
+ irlap_next_state(self, LAP_NRM_P);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ self->ack_required = FALSE;
+ } else {
+ skb->data[1] |= PF_BIT;
+ irlap_next_state(self, LAP_NRM_P);
+ irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
+ }
+ }
+
+ /* How much time we took for transmission of all frames.
+ * We don't know, so let assume we used the full window. Jean II */
+ transmission_time = self->final_timeout;
+
+ /* Reset parameter so that we can fill next window */
+ self->window = self->window_size;
+
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ /* Remove what we have not used. Just do a prorata of the
+ * bytes left in window to window capacity.
+ * See max_line_capacities[][] in qos.c for details. Jean II */
+ transmission_time -= (self->final_timeout * self->bytes_left
+ / self->line_capacity);
+ pr_debug("%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n",
+ __func__, self->final_timeout, self->bytes_left,
+ self->line_capacity, transmission_time);
+
+ /* We are allowed to transmit a maximum number of bytes again. */
+ self->bytes_left = self->line_capacity;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+
+ /*
+ * The network layer has a intermediate buffer between IrLAP
+ * and the IrDA driver which can contain 8 frames. So, even
+ * though IrLAP is currently sending the *last* frame of the
+ * tx-window, the driver most likely has only just started
+ * sending the *first* frame of the same tx-window.
+ * I.e. we are always at the very beginning of or Tx window.
+ * Now, we are supposed to set the final timer from the end
+ * of our tx-window to let the other peer reply. So, we need
+ * to add extra time to compensate for the fact that we
+ * are really at the start of tx-window, otherwise the final timer
+ * might expire before he can answer...
+ * Jean II
+ */
+ irlap_start_final_timer(self, self->final_timeout + transmission_time);
+
+ /*
+ * The clever amongst you might ask why we do this adjustement
+ * only here, and not in all the other cases in irlap_event.c.
+ * In all those other case, we only send a very short management
+ * frame (few bytes), so the adjustement would be lost in the
+ * noise...
+ * The exception of course is irlap_resend_rejected_frame().
+ * Jean II */
+}
+
+/*
+ * Function irlap_send_data_secondary_final (self, skb)
+ *
+ * Send I(nformation) frame as secondary with final bit set
+ *
+ */
+void irlap_send_data_secondary_final(struct irlap_cb *self,
+ struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb = NULL;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /* Is this reliable or unreliable data? */
+ if (skb->data[1] == I_FRAME) {
+
+ /*
+ * Insert frame sequence number (Vs) in control field before
+ * inserting into transmit window queue.
+ */
+ skb->data[1] = I_FRAME | (self->vs << 1);
+
+ /*
+ * Insert frame in store, in case of retransmissions
+ * Increase skb reference count, see irlap_do_event()
+ */
+ skb_get(skb);
+ skb_queue_tail(&self->wx_list, skb);
+
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ return;
+ }
+
+ tx_skb->data[1] |= PF_BIT;
+
+ self->vs = (self->vs + 1) % 8;
+ self->ack_required = FALSE;
+
+ irlap_send_i_frame(self, tx_skb, RSP_FRAME);
+ } else {
+ if (self->ack_required) {
+ irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME);
+ irlap_send_rr_frame(self, RSP_FRAME);
+ self->ack_required = FALSE;
+ } else {
+ skb->data[1] |= PF_BIT;
+ irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME);
+ }
+ }
+
+ self->window = self->window_size;
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ /* We are allowed to transmit a maximum number of bytes again. */
+ self->bytes_left = self->line_capacity;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+
+ irlap_start_wd_timer(self, self->wd_timeout);
+}
+
+/*
+ * Function irlap_send_data_secondary (self, skb)
+ *
+ * Send I(nformation) frame as secondary without final bit set
+ *
+ */
+void irlap_send_data_secondary(struct irlap_cb *self, struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb = NULL;
+
+ /* Is this reliable or unreliable data? */
+ if (skb->data[1] == I_FRAME) {
+
+ /*
+ * Insert frame sequence number (Vs) in control field before
+ * inserting into transmit window queue.
+ */
+ skb->data[1] = I_FRAME | (self->vs << 1);
+
+ /*
+ * Insert frame in store, in case of retransmissions
+ * Increase skb reference count, see irlap_do_event()
+ */
+ skb_get(skb);
+ skb_queue_tail(&self->wx_list, skb);
+
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ return;
+ }
+
+ self->vs = (self->vs + 1) % 8;
+ self->ack_required = FALSE;
+ self->window -= 1;
+
+ irlap_send_i_frame(self, tx_skb, RSP_FRAME);
+ } else {
+ irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME);
+ self->window -= 1;
+ }
+}
+
+/*
+ * Function irlap_resend_rejected_frames (nr)
+ *
+ * Resend frames which has not been acknowledged. Should be safe to
+ * traverse the list without locking it since this function will only be
+ * called from interrupt context (BH)
+ */
+void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
+{
+ struct sk_buff *tx_skb;
+ struct sk_buff *skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Resend unacknowledged frame(s) */
+ skb_queue_walk(&self->wx_list, skb) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /* We copy the skb to be retransmitted since we will have to
+ * modify it. Cloning will confuse packet sniffers
+ */
+ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!tx_skb) {
+ pr_debug("%s(), unable to copy\n", __func__);
+ return;
+ }
+
+ /* Clear old Nr field + poll bit */
+ tx_skb->data[1] &= 0x0f;
+
+ /*
+ * Set poll bit on the last frame retransmitted
+ */
+ if (skb_queue_is_last(&self->wx_list, skb))
+ tx_skb->data[1] |= PF_BIT; /* Set p/f bit */
+ else
+ tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */
+
+ irlap_send_i_frame(self, tx_skb, command);
+ }
+#if 0 /* Not yet */
+ /*
+ * We can now fill the window with additional data frames
+ */
+ while (!skb_queue_empty(&self->txq)) {
+
+ pr_debug("%s(), sending additional frames!\n", __func__);
+ if (self->window > 0) {
+ skb = skb_dequeue( &self->txq);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /*
+ * If send window > 1 then send frame with pf
+ * bit cleared
+ */
+ if ((self->window > 1) &&
+ !skb_queue_empty(&self->txq)) {
+ irlap_send_data_primary(self, skb);
+ } else {
+ irlap_send_data_primary_poll(self, skb);
+ }
+ kfree_skb(skb);
+ }
+ }
+#endif
+}
+
+void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
+{
+ struct sk_buff *tx_skb;
+ struct sk_buff *skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Resend unacknowledged frame(s) */
+ skb = skb_peek(&self->wx_list);
+ if (skb != NULL) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /* We copy the skb to be retransmitted since we will have to
+ * modify it. Cloning will confuse packet sniffers
+ */
+ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!tx_skb) {
+ pr_debug("%s(), unable to copy\n", __func__);
+ return;
+ }
+
+ /* Clear old Nr field + poll bit */
+ tx_skb->data[1] &= 0x0f;
+
+ /* Set poll/final bit */
+ tx_skb->data[1] |= PF_BIT; /* Set p/f bit */
+
+ irlap_send_i_frame(self, tx_skb, command);
+ }
+}
+
+/*
+ * Function irlap_send_ui_frame (self, skb, command)
+ *
+ * Contruct and transmit an Unnumbered Information (UI) frame
+ *
+ */
+void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
+ __u8 caddr, int command)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /* Insert connection address */
+ skb->data[0] = caddr | ((command) ? CMD_FRAME : 0);
+
+ irlap_queue_xmit(self, skb);
+}
+
+/*
+ * Function irlap_send_i_frame (skb)
+ *
+ * Contruct and transmit Information (I) frame
+ */
+static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
+ int command)
+{
+ /* Insert connection address */
+ skb->data[0] = self->caddr;
+ skb->data[0] |= (command) ? CMD_FRAME : 0;
+
+ /* Insert next to receive (Vr) */
+ skb->data[1] |= (self->vr << 5); /* insert nr */
+
+ irlap_queue_xmit(self, skb);
+}
+
+/*
+ * Function irlap_recv_i_frame (skb, frame)
+ *
+ * Receive and parse an I (Information) frame, no harm in making it inline
+ * since it's called only from one single place (irlap_driver_rcv).
+ */
+static inline void irlap_recv_i_frame(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ info->nr = skb->data[1] >> 5; /* Next to receive */
+ info->pf = skb->data[1] & PF_BIT; /* Final bit */
+ info->ns = (skb->data[1] >> 1) & 0x07; /* Next to send */
+
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_I_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_I_RSP, skb, info);
+}
+
+/*
+ * Function irlap_recv_ui_frame (self, skb, info)
+ *
+ * Receive and parse an Unnumbered Information (UI) frame
+ *
+ */
+static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ info->pf = skb->data[1] & PF_BIT; /* Final bit */
+
+ irlap_do_event(self, RECV_UI_FRAME, skb, info);
+}
+
+/*
+ * Function irlap_recv_frmr_frame (skb, frame)
+ *
+ * Received Frame Reject response.
+ *
+ */
+static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info)
+{
+ __u8 *frame;
+ int w, x, y, z;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(info != NULL, return;);
+
+ if (!pskb_may_pull(skb, 4)) {
+ net_err_ratelimited("%s: frame too short!\n", __func__);
+ return;
+ }
+
+ frame = skb->data;
+
+ info->nr = frame[2] >> 5; /* Next to receive */
+ info->pf = frame[2] & PF_BIT; /* Final bit */
+ info->ns = (frame[2] >> 1) & 0x07; /* Next to send */
+
+ w = frame[3] & 0x01;
+ x = frame[3] & 0x02;
+ y = frame[3] & 0x04;
+ z = frame[3] & 0x08;
+
+ if (w) {
+ pr_debug("Rejected control field is undefined or not implemented\n");
+ }
+ if (x) {
+ pr_debug("Rejected control field was invalid because it contained a non permitted I field\n");
+ }
+ if (y) {
+ pr_debug("Received I field exceeded the maximum negotiated for the existing connection or exceeded the maximum this station supports if no connection exists\n");
+ }
+ if (z) {
+ pr_debug("Rejected control field control field contained an invalid Nr count\n");
+ }
+ irlap_do_event(self, RECV_FRMR_RSP, skb, info);
+}
+
+/*
+ * Function irlap_send_test_frame (self, daddr)
+ *
+ * Send a test frame response
+ *
+ */
+void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
+ struct sk_buff *cmd)
+{
+ struct sk_buff *tx_skb;
+ struct test_frame *frame;
+
+ tx_skb = alloc_skb(cmd->len + sizeof(struct test_frame), GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ /* Broadcast frames must include saddr and daddr fields */
+ if (caddr == CBROADCAST) {
+ frame = skb_put(tx_skb, sizeof(struct test_frame));
+
+ /* Insert the swapped addresses */
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(daddr);
+ } else
+ frame = skb_put(tx_skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER);
+
+ frame->caddr = caddr;
+ frame->control = TEST_RSP | PF_BIT;
+
+ /* Copy info */
+ skb_put_data(tx_skb, cmd->data, cmd->len);
+
+ /* Return to sender */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_queue_xmit(self, tx_skb);
+}
+
+/*
+ * Function irlap_recv_test_frame (self, skb)
+ *
+ * Receive a test frame
+ *
+ */
+static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ struct test_frame *frame;
+
+ if (!pskb_may_pull(skb, sizeof(*frame))) {
+ net_err_ratelimited("%s: frame too short!\n", __func__);
+ return;
+ }
+ frame = (struct test_frame *) skb->data;
+
+ /* Broadcast frames must carry saddr and daddr fields */
+ if (info->caddr == CBROADCAST) {
+ if (skb->len < sizeof(struct test_frame)) {
+ pr_debug("%s() test frame too short!\n",
+ __func__);
+ return;
+ }
+
+ /* Read and swap addresses */
+ info->daddr = le32_to_cpu(frame->saddr);
+ info->saddr = le32_to_cpu(frame->daddr);
+
+ /* Make sure frame is addressed to us */
+ if ((info->saddr != self->saddr) &&
+ (info->saddr != BROADCAST)) {
+ return;
+ }
+ }
+
+ if (command)
+ irlap_do_event(self, RECV_TEST_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_TEST_RSP, skb, info);
+}
+
+/*
+ * Function irlap_driver_rcv (skb, netdev, ptype)
+ *
+ * Called when a frame is received. Dispatches the right receive function
+ * for processing of the frame.
+ *
+ * Note on skb management :
+ * After calling the higher layers of the IrDA stack, we always
+ * kfree() the skb, which drop the reference count (and potentially
+ * destroy it).
+ * If a higher layer of the stack want to keep the skb around (to put
+ * in a queue or pass it to the higher layer), it will need to use
+ * skb_get() to keep a reference on it. This is usually done at the
+ * LMP level in irlmp.c.
+ * Jean II
+ */
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
+{
+ struct irlap_info info;
+ struct irlap_cb *self;
+ int command;
+ __u8 control;
+ int ret = -1;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ goto out;
+
+ /* FIXME: should we get our own field? */
+ self = (struct irlap_cb *) dev->atalk_ptr;
+
+ /* If the net device is down, then IrLAP is gone! */
+ if (!self || self->magic != LAP_MAGIC)
+ goto err;
+
+ /* We are no longer an "old" protocol, so we need to handle
+ * share and non linear skbs. This should never happen, so
+ * we don't need to be clever about it. Jean II */
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ net_err_ratelimited("%s: can't clone shared skb!\n", __func__);
+ goto err;
+ }
+
+ /* Check if frame is large enough for parsing */
+ if (!pskb_may_pull(skb, 2)) {
+ net_err_ratelimited("%s: frame too short!\n", __func__);
+ goto err;
+ }
+
+ command = skb->data[0] & CMD_FRAME;
+ info.caddr = skb->data[0] & CBROADCAST;
+
+ info.pf = skb->data[1] & PF_BIT;
+ info.control = skb->data[1] & ~PF_BIT; /* Mask away poll/final bit */
+
+ control = info.control;
+
+ /* First we check if this frame has a valid connection address */
+ if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) {
+ pr_debug("%s(), wrong connection address!\n",
+ __func__);
+ goto out;
+ }
+ /*
+ * Optimize for the common case and check if the frame is an
+ * I(nformation) frame. Only I-frames have bit 0 set to 0
+ */
+ if (~control & 0x01) {
+ irlap_recv_i_frame(self, skb, &info, command);
+ goto out;
+ }
+ /*
+ * We now check is the frame is an S(upervisory) frame. Only
+ * S-frames have bit 0 set to 1 and bit 1 set to 0
+ */
+ if (~control & 0x02) {
+ /*
+ * Received S(upervisory) frame, check which frame type it is
+ * only the first nibble is of interest
+ */
+ switch (control & 0x0f) {
+ case RR:
+ irlap_recv_rr_frame(self, skb, &info, command);
+ break;
+ case RNR:
+ irlap_recv_rnr_frame(self, skb, &info, command);
+ break;
+ case REJ:
+ irlap_recv_rej_frame(self, skb, &info, command);
+ break;
+ case SREJ:
+ irlap_recv_srej_frame(self, skb, &info, command);
+ break;
+ default:
+ net_warn_ratelimited("%s: Unknown S-frame %02x received!\n",
+ __func__, info.control);
+ break;
+ }
+ goto out;
+ }
+ /*
+ * This must be a C(ontrol) frame
+ */
+ switch (control) {
+ case XID_RSP:
+ irlap_recv_discovery_xid_rsp(self, skb, &info);
+ break;
+ case XID_CMD:
+ irlap_recv_discovery_xid_cmd(self, skb, &info);
+ break;
+ case SNRM_CMD:
+ irlap_recv_snrm_cmd(self, skb, &info);
+ break;
+ case DM_RSP:
+ irlap_do_event(self, RECV_DM_RSP, skb, &info);
+ break;
+ case DISC_CMD: /* And RD_RSP since they have the same value */
+ irlap_recv_disc_frame(self, skb, &info, command);
+ break;
+ case TEST_CMD:
+ irlap_recv_test_frame(self, skb, &info, command);
+ break;
+ case UA_RSP:
+ irlap_recv_ua_frame(self, skb, &info);
+ break;
+ case FRMR_RSP:
+ irlap_recv_frmr_frame(self, skb, &info);
+ break;
+ case UI_FRAME:
+ irlap_recv_ui_frame(self, skb, &info);
+ break;
+ default:
+ net_warn_ratelimited("%s: Unknown frame %02x received!\n",
+ __func__, info.control);
+ break;
+ }
+out:
+ ret = 0;
+err:
+ /* Always drop our reference on the skb */
+ dev_kfree_skb(skb);
+ return ret;
+}
diff --git a/drivers/staging/irda/net/irlmp.c b/drivers/staging/irda/net/irlmp.c
new file mode 100644
index 000000000000..43964594aa12
--- /dev/null
+++ b/drivers/staging/irda/net/irlmp.c
@@ -0,0 +1,1996 @@
+/*********************************************************************
+ *
+ * Filename: irlmp.c
+ * Version: 1.0
+ * Description: IrDA Link Management Protocol (LMP) layer
+ * Status: Stable.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 17 20:54:32 1997
+ * Modified at: Wed Jan 5 11:26:03 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/timer.h>
+#include <net/irda/qos.h>
+#include <net/irda/irlap.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irlmp_frame.h>
+
+#include <asm/unaligned.h>
+
+static __u8 irlmp_find_free_slsap(void);
+static int irlmp_slsap_inuse(__u8 slsap_sel);
+
+/* Master structure */
+struct irlmp_cb *irlmp = NULL;
+
+/* These can be altered by the sysctl interface */
+int sysctl_discovery = 0;
+int sysctl_discovery_timeout = 3; /* 3 seconds by default */
+int sysctl_discovery_slots = 6; /* 6 slots by default */
+int sysctl_lap_keepalive_time = LM_IDLE_TIMEOUT * 1000 / HZ;
+char sysctl_devname[65];
+
+static const char *irlmp_reasons[] = {
+ "ERROR, NOT USED",
+ "LM_USER_REQUEST",
+ "LM_LAP_DISCONNECT",
+ "LM_CONNECT_FAILURE",
+ "LM_LAP_RESET",
+ "LM_INIT_DISCONNECT",
+ "ERROR, NOT USED",
+ "UNKNOWN",
+};
+
+const char *irlmp_reason_str(LM_REASON reason)
+{
+ reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1);
+ return irlmp_reasons[reason];
+}
+
+/*
+ * Function irlmp_init (void)
+ *
+ * Create (allocate) the main IrLMP structure
+ *
+ */
+int __init irlmp_init(void)
+{
+ /* Initialize the irlmp structure. */
+ irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
+ if (irlmp == NULL)
+ return -ENOMEM;
+
+ irlmp->magic = LMP_MAGIC;
+
+ irlmp->clients = hashbin_new(HB_LOCK);
+ irlmp->services = hashbin_new(HB_LOCK);
+ irlmp->links = hashbin_new(HB_LOCK);
+ irlmp->unconnected_lsaps = hashbin_new(HB_LOCK);
+ irlmp->cachelog = hashbin_new(HB_NOLOCK);
+
+ if ((irlmp->clients == NULL) ||
+ (irlmp->services == NULL) ||
+ (irlmp->links == NULL) ||
+ (irlmp->unconnected_lsaps == NULL) ||
+ (irlmp->cachelog == NULL)) {
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&irlmp->cachelog->hb_spinlock);
+
+ irlmp->last_lsap_sel = 0x0f; /* Reserved 0x00-0x0f */
+ strcpy(sysctl_devname, "Linux");
+
+ init_timer(&irlmp->discovery_timer);
+
+ /* Do discovery every 3 seconds, conditionally */
+ if (sysctl_discovery)
+ irlmp_start_discovery_timer(irlmp,
+ sysctl_discovery_timeout*HZ);
+
+ return 0;
+}
+
+/*
+ * Function irlmp_cleanup (void)
+ *
+ * Remove IrLMP layer
+ *
+ */
+void irlmp_cleanup(void)
+{
+ /* Check for main structure */
+ IRDA_ASSERT(irlmp != NULL, return;);
+ IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return;);
+
+ del_timer(&irlmp->discovery_timer);
+
+ hashbin_delete(irlmp->links, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->unconnected_lsaps, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->clients, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->services, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->cachelog, (FREE_FUNC) kfree);
+
+ /* De-allocate main structure */
+ kfree(irlmp);
+ irlmp = NULL;
+}
+
+/*
+ * Function irlmp_open_lsap (slsap, notify)
+ *
+ * Register with IrLMP and create a local LSAP,
+ * returns handle to LSAP.
+ */
+struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
+{
+ struct lsap_cb *self;
+
+ IRDA_ASSERT(notify != NULL, return NULL;);
+ IRDA_ASSERT(irlmp != NULL, return NULL;);
+ IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return NULL;);
+ IRDA_ASSERT(notify->instance != NULL, return NULL;);
+
+ /* Does the client care which Source LSAP selector it gets? */
+ if (slsap_sel == LSAP_ANY) {
+ slsap_sel = irlmp_find_free_slsap();
+ if (!slsap_sel)
+ return NULL;
+ } else if (irlmp_slsap_inuse(slsap_sel))
+ return NULL;
+
+ /* Allocate new instance of a LSAP connection */
+ self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
+ if (self == NULL)
+ return NULL;
+
+ self->magic = LMP_LSAP_MAGIC;
+ self->slsap_sel = slsap_sel;
+
+ /* Fix connectionless LSAP's */
+ if (slsap_sel == LSAP_CONNLESS) {
+#ifdef CONFIG_IRDA_ULTRA
+ self->dlsap_sel = LSAP_CONNLESS;
+ self->pid = pid;
+#endif /* CONFIG_IRDA_ULTRA */
+ } else
+ self->dlsap_sel = LSAP_ANY;
+ /* self->connected = FALSE; -> already NULL via memset() */
+
+ init_timer(&self->watchdog_timer);
+
+ self->notify = *notify;
+
+ self->lsap_state = LSAP_DISCONNECTED;
+
+ /* Insert into queue of unconnected LSAPs */
+ hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self,
+ (long) self, NULL);
+
+ return self;
+}
+EXPORT_SYMBOL(irlmp_open_lsap);
+
+/*
+ * Function __irlmp_close_lsap (self)
+ *
+ * Remove an instance of LSAP
+ */
+static void __irlmp_close_lsap(struct lsap_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+
+ /*
+ * Set some of the variables to preset values
+ */
+ self->magic = 0;
+ del_timer(&self->watchdog_timer); /* Important! */
+
+ if (self->conn_skb)
+ dev_kfree_skb(self->conn_skb);
+
+ kfree(self);
+}
+
+/*
+ * Function irlmp_close_lsap (self)
+ *
+ * Close and remove LSAP
+ *
+ */
+void irlmp_close_lsap(struct lsap_cb *self)
+{
+ struct lap_cb *lap;
+ struct lsap_cb *lsap = NULL;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+
+ /*
+ * Find out if we should remove this LSAP from a link or from the
+ * list of unconnected lsaps (not associated with a link)
+ */
+ lap = self->lap;
+ if (lap) {
+ IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
+ /* We might close a LSAP before it has completed the
+ * connection setup. In those case, higher layers won't
+ * send a proper disconnect request. Harmless, except
+ * that we will forget to close LAP... - Jean II */
+ if(self->lsap_state != LSAP_DISCONNECTED) {
+ self->lsap_state = LSAP_DISCONNECTED;
+ irlmp_do_lap_event(self->lap,
+ LM_LAP_DISCONNECT_REQUEST, NULL);
+ }
+ /* Now, remove from the link */
+ lsap = hashbin_remove(lap->lsaps, (long) self, NULL);
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ lap->cache.valid = FALSE;
+#endif
+ }
+ self->lap = NULL;
+ /* Check if we found the LSAP! If not then try the unconnected lsaps */
+ if (!lsap) {
+ lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self,
+ NULL);
+ }
+ if (!lsap) {
+ pr_debug("%s(), Looks like somebody has removed me already!\n",
+ __func__);
+ return;
+ }
+ __irlmp_close_lsap(self);
+}
+EXPORT_SYMBOL(irlmp_close_lsap);
+
+/*
+ * Function irlmp_register_irlap (saddr, notify)
+ *
+ * Register IrLAP layer with IrLMP. There is possible to have multiple
+ * instances of the IrLAP layer, each connected to different IrDA ports
+ *
+ */
+void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
+{
+ struct lap_cb *lap;
+
+ IRDA_ASSERT(irlmp != NULL, return;);
+ IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return;);
+ IRDA_ASSERT(notify != NULL, return;);
+
+ /*
+ * Allocate new instance of a LSAP connection
+ */
+ lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
+ if (lap == NULL)
+ return;
+
+ lap->irlap = irlap;
+ lap->magic = LMP_LAP_MAGIC;
+ lap->saddr = saddr;
+ lap->daddr = DEV_ADDR_ANY;
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ lap->cache.valid = FALSE;
+#endif
+ lap->lsaps = hashbin_new(HB_LOCK);
+ if (lap->lsaps == NULL) {
+ net_warn_ratelimited("%s(), unable to kmalloc lsaps\n",
+ __func__);
+ kfree(lap);
+ return;
+ }
+
+ lap->lap_state = LAP_STANDBY;
+
+ init_timer(&lap->idle_timer);
+
+ /*
+ * Insert into queue of LMP links
+ */
+ hashbin_insert(irlmp->links, (irda_queue_t *) lap, lap->saddr, NULL);
+
+ /*
+ * We set only this variable so IrLAP can tell us on which link the
+ * different events happened on
+ */
+ irda_notify_init(notify);
+ notify->instance = lap;
+}
+
+/*
+ * Function irlmp_unregister_irlap (saddr)
+ *
+ * IrLAP layer has been removed!
+ *
+ */
+void irlmp_unregister_link(__u32 saddr)
+{
+ struct lap_cb *link;
+
+ /* We must remove ourselves from the hashbin *first*. This ensure
+ * that no more LSAPs will be open on this link and no discovery
+ * will be triggered anymore. Jean II */
+ link = hashbin_remove(irlmp->links, saddr, NULL);
+ if (link) {
+ IRDA_ASSERT(link->magic == LMP_LAP_MAGIC, return;);
+
+ /* Kill all the LSAPs on this link. Jean II */
+ link->reason = LAP_DISC_INDICATION;
+ link->daddr = DEV_ADDR_ANY;
+ irlmp_do_lap_event(link, LM_LAP_DISCONNECT_INDICATION, NULL);
+
+ /* Remove all discoveries discovered at this link */
+ irlmp_expire_discoveries(irlmp->cachelog, link->saddr, TRUE);
+
+ /* Final cleanup */
+ del_timer(&link->idle_timer);
+ link->magic = 0;
+ hashbin_delete(link->lsaps, (FREE_FUNC) __irlmp_close_lsap);
+ kfree(link);
+ }
+}
+
+/*
+ * Function irlmp_connect_request (handle, dlsap, userdata)
+ *
+ * Connect with a peer LSAP
+ *
+ */
+int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *qos, struct sk_buff *userdata)
+{
+ struct sk_buff *tx_skb = userdata;
+ struct lap_cb *lap;
+ struct lsap_cb *lsap;
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -EBADR;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;);
+
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n",
+ __func__, self->slsap_sel, dlsap_sel, saddr, daddr);
+
+ if (test_bit(0, &self->connected)) {
+ ret = -EISCONN;
+ goto err;
+ }
+
+ /* Client must supply destination device address */
+ if (!daddr) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Any userdata? */
+ if (tx_skb == NULL) {
+ tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+ }
+
+ /* Make room for MUX control header (3 bytes) */
+ IRDA_ASSERT(skb_headroom(tx_skb) >= LMP_CONTROL_HEADER, return -1;);
+ skb_push(tx_skb, LMP_CONTROL_HEADER);
+
+ self->dlsap_sel = dlsap_sel;
+
+ /*
+ * Find the link to where we should try to connect since there may
+ * be more than one IrDA port on this machine. If the client has
+ * passed us the saddr (and already knows which link to use), then
+ * we use that to find the link, if not then we have to look in the
+ * discovery log and check if any of the links has discovered a
+ * device with the given daddr
+ */
+ if ((!saddr) || (saddr == DEV_ADDR_ANY)) {
+ discovery_t *discovery;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irlmp->cachelog->hb_spinlock, flags);
+ if (daddr != DEV_ADDR_ANY)
+ discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
+ else {
+ pr_debug("%s(), no daddr\n", __func__);
+ discovery = (discovery_t *)
+ hashbin_get_first(irlmp->cachelog);
+ }
+
+ if (discovery) {
+ saddr = discovery->data.saddr;
+ daddr = discovery->data.daddr;
+ }
+ spin_unlock_irqrestore(&irlmp->cachelog->hb_spinlock, flags);
+ }
+ lap = hashbin_lock_find(irlmp->links, saddr, NULL);
+ if (lap == NULL) {
+ pr_debug("%s(), Unable to find a usable link!\n", __func__);
+ ret = -EHOSTUNREACH;
+ goto err;
+ }
+
+ /* Check if LAP is disconnected or already connected */
+ if (lap->daddr == DEV_ADDR_ANY)
+ lap->daddr = daddr;
+ else if (lap->daddr != daddr) {
+ /* Check if some LSAPs are active on this LAP */
+ if (HASHBIN_GET_SIZE(lap->lsaps) == 0) {
+ /* No active connection, but LAP hasn't been
+ * disconnected yet (waiting for timeout in LAP).
+ * Maybe we could give LAP a bit of help in this case.
+ */
+ pr_debug("%s(), sorry, but I'm waiting for LAP to timeout!\n",
+ __func__);
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ /* LAP is already connected to a different node, and LAP
+ * can only talk to one node at a time */
+ pr_debug("%s(), sorry, but link is busy!\n", __func__);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ self->lap = lap;
+
+ /*
+ * Remove LSAP from list of unconnected LSAPs and insert it into the
+ * list of connected LSAPs for the particular link
+ */
+ lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self, NULL);
+
+ IRDA_ASSERT(lsap != NULL, return -1;);
+ IRDA_ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;);
+ IRDA_ASSERT(lsap->lap != NULL, return -1;);
+ IRDA_ASSERT(lsap->lap->magic == LMP_LAP_MAGIC, return -1;);
+
+ hashbin_insert(self->lap->lsaps, (irda_queue_t *) self, (long) self,
+ NULL);
+
+ set_bit(0, &self->connected); /* TRUE */
+
+ /*
+ * User supplied qos specifications?
+ */
+ if (qos)
+ self->qos = *qos;
+
+ irlmp_do_lsap_event(self, LM_CONNECT_REQUEST, tx_skb);
+
+ /* Drop reference count - see irlap_data_request(). */
+ dev_kfree_skb(tx_skb);
+
+ return 0;
+
+err:
+ /* Cleanup */
+ if(tx_skb)
+ dev_kfree_skb(tx_skb);
+ return ret;
+}
+EXPORT_SYMBOL(irlmp_connect_request);
+
+/*
+ * Function irlmp_connect_indication (self)
+ *
+ * Incoming connection
+ *
+ */
+void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
+{
+ int max_seg_size;
+ int lap_header_size;
+ int max_header_size;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(self->lap != NULL, return;);
+
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ __func__, self->slsap_sel, self->dlsap_sel);
+
+ /* Note : self->lap is set in irlmp_link_data_indication(),
+ * (case CONNECT_CMD:) because we have no way to set it here.
+ * Similarly, self->dlsap_sel is usually set in irlmp_find_lsap().
+ * Jean II */
+
+ self->qos = *self->lap->qos;
+
+ max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
+ lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap);
+ max_header_size = LMP_HEADER + lap_header_size;
+
+ /* Hide LMP_CONTROL_HEADER header from layer above */
+ skb_pull(skb, LMP_CONTROL_HEADER);
+
+ if (self->notify.connect_indication) {
+ /* Don't forget to refcount it - see irlap_driver_rcv(). */
+ skb_get(skb);
+ self->notify.connect_indication(self->notify.instance, self,
+ &self->qos, max_seg_size,
+ max_header_size, skb);
+ }
+}
+
+/*
+ * Function irlmp_connect_response (handle, userdata)
+ *
+ * Service user is accepting connection
+ *
+ */
+int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+ IRDA_ASSERT(userdata != NULL, return -1;);
+
+ /* We set the connected bit and move the lsap to the connected list
+ * in the state machine itself. Jean II */
+
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ __func__, self->slsap_sel, self->dlsap_sel);
+
+ /* Make room for MUX control header (3 bytes) */
+ IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;);
+ skb_push(userdata, LMP_CONTROL_HEADER);
+
+ irlmp_do_lsap_event(self, LM_CONNECT_RESPONSE, userdata);
+
+ /* Drop reference count - see irlap_data_request(). */
+ dev_kfree_skb(userdata);
+
+ return 0;
+}
+EXPORT_SYMBOL(irlmp_connect_response);
+
+/*
+ * Function irlmp_connect_confirm (handle, skb)
+ *
+ * LSAP connection confirmed peer device!
+ */
+void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
+{
+ int max_header_size;
+ int lap_header_size;
+ int max_seg_size;
+
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ IRDA_ASSERT(self->lap != NULL, return;);
+
+ self->qos = *self->lap->qos;
+
+ max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
+ lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap);
+ max_header_size = LMP_HEADER + lap_header_size;
+
+ pr_debug("%s(), max_header_size=%d\n",
+ __func__, max_header_size);
+
+ /* Hide LMP_CONTROL_HEADER header from layer above */
+ skb_pull(skb, LMP_CONTROL_HEADER);
+
+ if (self->notify.connect_confirm) {
+ /* Don't forget to refcount it - see irlap_driver_rcv() */
+ skb_get(skb);
+ self->notify.connect_confirm(self->notify.instance, self,
+ &self->qos, max_seg_size,
+ max_header_size, skb);
+ }
+}
+
+/*
+ * Function irlmp_dup (orig, instance)
+ *
+ * Duplicate LSAP, can be used by servers to confirm a connection on a
+ * new LSAP so it can keep listening on the old one.
+ *
+ */
+struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
+{
+ struct lsap_cb *new;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+
+ /* Only allowed to duplicate unconnected LSAP's, and only LSAPs
+ * that have received a connect indication. Jean II */
+ if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) ||
+ (orig->lap == NULL)) {
+ pr_debug("%s(), invalid LSAP (wrong state)\n",
+ __func__);
+ spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
+ flags);
+ return NULL;
+ }
+
+ /* Allocate a new instance */
+ new = kmemdup(orig, sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ pr_debug("%s(), unable to kmalloc\n", __func__);
+ spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
+ flags);
+ return NULL;
+ }
+ /* new->lap = orig->lap; => done in the memcpy() */
+ /* new->slsap_sel = orig->slsap_sel; => done in the memcpy() */
+ new->conn_skb = NULL;
+
+ spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+
+ /* Not everything is the same */
+ new->notify.instance = instance;
+
+ init_timer(&new->watchdog_timer);
+
+ hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new,
+ (long) new, NULL);
+
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ /* Make sure that we invalidate the LSAP cache */
+ new->lap->cache.valid = FALSE;
+#endif /* CONFIG_IRDA_CACHE_LAST_LSAP */
+
+ return new;
+}
+
+/*
+ * Function irlmp_disconnect_request (handle, userdata)
+ *
+ * The service user is requesting disconnection, this will not remove the
+ * LSAP, but only mark it as disconnected
+ */
+int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
+{
+ struct lsap_cb *lsap;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+ IRDA_ASSERT(userdata != NULL, return -1;);
+
+ /* Already disconnected ?
+ * There is a race condition between irlmp_disconnect_indication()
+ * and us that might mess up the hashbins below. This fixes it.
+ * Jean II */
+ if (! test_and_clear_bit(0, &self->connected)) {
+ pr_debug("%s(), already disconnected!\n", __func__);
+ dev_kfree_skb(userdata);
+ return -1;
+ }
+
+ skb_push(userdata, LMP_CONTROL_HEADER);
+
+ /*
+ * Do the event before the other stuff since we must know
+ * which lap layer that the frame should be transmitted on
+ */
+ irlmp_do_lsap_event(self, LM_DISCONNECT_REQUEST, userdata);
+
+ /* Drop reference count - see irlap_data_request(). */
+ dev_kfree_skb(userdata);
+
+ /*
+ * Remove LSAP from list of connected LSAPs for the particular link
+ * and insert it into the list of unconnected LSAPs
+ */
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ IRDA_ASSERT(self->lap->magic == LMP_LAP_MAGIC, return -1;);
+ IRDA_ASSERT(self->lap->lsaps != NULL, return -1;);
+
+ lsap = hashbin_remove(self->lap->lsaps, (long) self, NULL);
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ self->lap->cache.valid = FALSE;
+#endif
+
+ IRDA_ASSERT(lsap != NULL, return -1;);
+ IRDA_ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;);
+ IRDA_ASSERT(lsap == self, return -1;);
+
+ hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self,
+ (long) self, NULL);
+
+ /* Reset some values */
+ self->dlsap_sel = LSAP_ANY;
+ self->lap = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(irlmp_disconnect_request);
+
+/*
+ * Function irlmp_disconnect_indication (reason, userdata)
+ *
+ * LSAP is being closed!
+ */
+void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
+ struct sk_buff *skb)
+{
+ struct lsap_cb *lsap;
+
+ pr_debug("%s(), reason=%s [%d]\n", __func__,
+ irlmp_reason_str(reason), reason);
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ __func__, self->slsap_sel, self->dlsap_sel);
+
+ /* Already disconnected ?
+ * There is a race condition between irlmp_disconnect_request()
+ * and us that might mess up the hashbins below. This fixes it.
+ * Jean II */
+ if (! test_and_clear_bit(0, &self->connected)) {
+ pr_debug("%s(), already disconnected!\n", __func__);
+ return;
+ }
+
+ /*
+ * Remove association between this LSAP and the link it used
+ */
+ IRDA_ASSERT(self->lap != NULL, return;);
+ IRDA_ASSERT(self->lap->lsaps != NULL, return;);
+
+ lsap = hashbin_remove(self->lap->lsaps, (long) self, NULL);
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ self->lap->cache.valid = FALSE;
+#endif
+
+ IRDA_ASSERT(lsap != NULL, return;);
+ IRDA_ASSERT(lsap == self, return;);
+ hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) lsap,
+ (long) lsap, NULL);
+
+ self->dlsap_sel = LSAP_ANY;
+ self->lap = NULL;
+
+ /*
+ * Inform service user
+ */
+ if (self->notify.disconnect_indication) {
+ /* Don't forget to refcount it - see irlap_driver_rcv(). */
+ if(skb)
+ skb_get(skb);
+ self->notify.disconnect_indication(self->notify.instance,
+ self, reason, skb);
+ } else {
+ pr_debug("%s(), no handler\n", __func__);
+ }
+}
+
+/*
+ * Function irlmp_do_expiry (void)
+ *
+ * Do a cleanup of the discovery log (remove old entries)
+ *
+ * Note : separate from irlmp_do_discovery() so that we can handle
+ * passive discovery properly.
+ */
+void irlmp_do_expiry(void)
+{
+ struct lap_cb *lap;
+
+ /*
+ * Expire discovery on all links which are *not* connected.
+ * On links which are connected, we can't do discovery
+ * anymore and can't refresh the log, so we freeze the
+ * discovery log to keep info about the device we are
+ * connected to.
+ * This info is mandatory if we want irlmp_connect_request()
+ * to work properly. - Jean II
+ */
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
+
+ if (lap->lap_state == LAP_STANDBY) {
+ /* Expire discoveries discovered on this link */
+ irlmp_expire_discoveries(irlmp->cachelog, lap->saddr,
+ FALSE);
+ }
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
+ }
+}
+
+/*
+ * Function irlmp_do_discovery (nslots)
+ *
+ * Do some discovery on all links
+ *
+ * Note : log expiry is done above.
+ */
+void irlmp_do_discovery(int nslots)
+{
+ struct lap_cb *lap;
+ __u16 *data_hintsp;
+
+ /* Make sure the value is sane */
+ if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){
+ net_warn_ratelimited("%s: invalid value for number of slots!\n",
+ __func__);
+ nslots = sysctl_discovery_slots = 8;
+ }
+
+ /* Construct new discovery info to be used by IrLAP, */
+ data_hintsp = (__u16 *) irlmp->discovery_cmd.data.hints;
+ put_unaligned(irlmp->hints.word, data_hintsp);
+
+ /*
+ * Set character set for device name (we use ASCII), and
+ * copy device name. Remember to make room for a \0 at the
+ * end
+ */
+ irlmp->discovery_cmd.data.charset = CS_ASCII;
+ strncpy(irlmp->discovery_cmd.data.info, sysctl_devname,
+ NICKNAME_MAX_LEN);
+ irlmp->discovery_cmd.name_len = strlen(irlmp->discovery_cmd.data.info);
+ irlmp->discovery_cmd.nslots = nslots;
+
+ /*
+ * Try to send discovery packets on all links
+ */
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
+
+ if (lap->lap_state == LAP_STANDBY) {
+ /* Try to discover */
+ irlmp_do_lap_event(lap, LM_LAP_DISCOVERY_REQUEST,
+ NULL);
+ }
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
+ }
+}
+
+/*
+ * Function irlmp_discovery_request (nslots)
+ *
+ * Do a discovery of devices in front of the computer
+ *
+ * If the caller has registered a client discovery callback, this
+ * allow him to receive the full content of the discovery log through
+ * this callback (as normally he will receive only new discoveries).
+ */
+void irlmp_discovery_request(int nslots)
+{
+ /* Return current cached discovery log (in full) */
+ irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_LOG);
+
+ /*
+ * Start a single discovery operation if discovery is not already
+ * running
+ */
+ if (!sysctl_discovery) {
+ /* Check if user wants to override the default */
+ if (nslots == DISCOVERY_DEFAULT_SLOTS)
+ nslots = sysctl_discovery_slots;
+
+ irlmp_do_discovery(nslots);
+ /* Note : we never do expiry here. Expiry will run on the
+ * discovery timer regardless of the state of sysctl_discovery
+ * Jean II */
+ }
+}
+EXPORT_SYMBOL(irlmp_discovery_request);
+
+/*
+ * Function irlmp_get_discoveries (pn, mask, slots)
+ *
+ * Return the current discovery log
+ *
+ * If discovery is not enabled, you should call this function again
+ * after 1 or 2 seconds (i.e. after discovery has been done).
+ */
+struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots)
+{
+ /* If discovery is not enabled, it's likely that the discovery log
+ * will be empty. So, we trigger a single discovery, so that next
+ * time the user call us there might be some results in the log.
+ * Jean II
+ */
+ if (!sysctl_discovery) {
+ /* Check if user wants to override the default */
+ if (nslots == DISCOVERY_DEFAULT_SLOTS)
+ nslots = sysctl_discovery_slots;
+
+ /* Start discovery - will complete sometime later */
+ irlmp_do_discovery(nslots);
+ /* Note : we never do expiry here. Expiry will run on the
+ * discovery timer regardless of the state of sysctl_discovery
+ * Jean II */
+ }
+
+ /* Return current cached discovery log */
+ return irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE);
+}
+EXPORT_SYMBOL(irlmp_get_discoveries);
+
+/*
+ * Function irlmp_notify_client (log)
+ *
+ * Notify all about discovered devices
+ *
+ * Clients registered with IrLMP are :
+ * o IrComm
+ * o IrLAN
+ * o Any socket (in any state - ouch, that may be a lot !)
+ * The client may have defined a callback to be notified in case of
+ * partial/selective discovery based on the hints that it passed to IrLMP.
+ */
+static inline void
+irlmp_notify_client(irlmp_client_t *client,
+ hashbin_t *log, DISCOVERY_MODE mode)
+{
+ discinfo_t *discoveries; /* Copy of the discovery log */
+ int number; /* Number of nodes in the log */
+ int i;
+
+ /* Check if client wants or not partial/selective log (optimisation) */
+ if (!client->disco_callback)
+ return;
+
+ /*
+ * Locking notes :
+ * the old code was manipulating the log directly, which was
+ * very racy. Now, we use copy_discoveries, that protects
+ * itself while dumping the log for us.
+ * The overhead of the copy is compensated by the fact that
+ * we only pass new discoveries in normal mode and don't
+ * pass the same old entry every 3s to the caller as we used
+ * to do (virtual function calling is expensive).
+ * Jean II
+ */
+
+ /*
+ * Now, check all discovered devices (if any), and notify client
+ * only about the services that the client is interested in
+ * We also notify only about the new devices unless the caller
+ * explicitly request a dump of the log. Jean II
+ */
+ discoveries = irlmp_copy_discoveries(log, &number,
+ client->hint_mask.word,
+ (mode == DISCOVERY_LOG));
+ /* Check if the we got some results */
+ if (discoveries == NULL)
+ return; /* No nodes discovered */
+
+ /* Pass all entries to the listener */
+ for(i = 0; i < number; i++)
+ client->disco_callback(&(discoveries[i]), mode, client->priv);
+
+ /* Free up our buffer */
+ kfree(discoveries);
+}
+
+/*
+ * Function irlmp_discovery_confirm ( self, log)
+ *
+ * Some device(s) answered to our discovery request! Check to see which
+ * device it is, and give indication to the client(s)
+ *
+ */
+void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
+{
+ irlmp_client_t *client;
+ irlmp_client_t *client_next;
+
+ IRDA_ASSERT(log != NULL, return;);
+
+ if (!(HASHBIN_GET_SIZE(log)))
+ return;
+
+ /* For each client - notify callback may touch client list */
+ client = (irlmp_client_t *) hashbin_get_first(irlmp->clients);
+ while (NULL != hashbin_find_next(irlmp->clients, (long) client, NULL,
+ (void *) &client_next) ) {
+ /* Check if we should notify client */
+ irlmp_notify_client(client, log, mode);
+
+ client = client_next;
+ }
+}
+
+/*
+ * Function irlmp_discovery_expiry (expiry)
+ *
+ * This device is no longer been discovered, and therefore it is being
+ * purged from the discovery log. Inform all clients who have
+ * registered for this event...
+ *
+ * Note : called exclusively from discovery.c
+ * Note : this is no longer called under discovery spinlock, so the
+ * client can do whatever he wants in the callback.
+ */
+void irlmp_discovery_expiry(discinfo_t *expiries, int number)
+{
+ irlmp_client_t *client;
+ irlmp_client_t *client_next;
+ int i;
+
+ IRDA_ASSERT(expiries != NULL, return;);
+
+ /* For each client - notify callback may touch client list */
+ client = (irlmp_client_t *) hashbin_get_first(irlmp->clients);
+ while (NULL != hashbin_find_next(irlmp->clients, (long) client, NULL,
+ (void *) &client_next) ) {
+
+ /* Pass all entries to the listener */
+ for(i = 0; i < number; i++) {
+ /* Check if we should notify client */
+ if ((client->expir_callback) &&
+ (client->hint_mask.word &
+ get_unaligned((__u16 *)expiries[i].hints)
+ & 0x7f7f) )
+ client->expir_callback(&(expiries[i]),
+ EXPIRY_TIMEOUT,
+ client->priv);
+ }
+
+ /* Next client */
+ client = client_next;
+ }
+}
+
+/*
+ * Function irlmp_get_discovery_response ()
+ *
+ * Used by IrLAP to get the discovery info it needs when answering
+ * discovery requests by other devices.
+ */
+discovery_t *irlmp_get_discovery_response(void)
+{
+ IRDA_ASSERT(irlmp != NULL, return NULL;);
+
+ put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints);
+
+ /*
+ * Set character set for device name (we use ASCII), and
+ * copy device name. Remember to make room for a \0 at the
+ * end
+ */
+ irlmp->discovery_rsp.data.charset = CS_ASCII;
+
+ strncpy(irlmp->discovery_rsp.data.info, sysctl_devname,
+ NICKNAME_MAX_LEN);
+ irlmp->discovery_rsp.name_len = strlen(irlmp->discovery_rsp.data.info);
+
+ return &irlmp->discovery_rsp;
+}
+
+/*
+ * Function irlmp_data_request (self, skb)
+ *
+ * Send some data to peer device
+ *
+ * Note on skb management :
+ * After calling the lower layers of the IrDA stack, we always
+ * kfree() the skb, which drop the reference count (and potentially
+ * destroy it).
+ * IrLMP and IrLAP may queue the packet, and in those cases will need
+ * to use skb_get() to keep it around.
+ * Jean II
+ */
+int irlmp_data_request(struct lsap_cb *self, struct sk_buff *userdata)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+
+ /* Make room for MUX header */
+ IRDA_ASSERT(skb_headroom(userdata) >= LMP_HEADER, return -1;);
+ skb_push(userdata, LMP_HEADER);
+
+ ret = irlmp_do_lsap_event(self, LM_DATA_REQUEST, userdata);
+
+ /* Drop reference count - see irlap_data_request(). */
+ dev_kfree_skb(userdata);
+
+ return ret;
+}
+EXPORT_SYMBOL(irlmp_data_request);
+
+/*
+ * Function irlmp_data_indication (handle, skb)
+ *
+ * Got data from LAP layer so pass it up to upper layer
+ *
+ */
+void irlmp_data_indication(struct lsap_cb *self, struct sk_buff *skb)
+{
+ /* Hide LMP header from layer above */
+ skb_pull(skb, LMP_HEADER);
+
+ if (self->notify.data_indication) {
+ /* Don't forget to refcount it - see irlap_driver_rcv(). */
+ skb_get(skb);
+ self->notify.data_indication(self->notify.instance, self, skb);
+ }
+}
+
+/*
+ * Function irlmp_udata_request (self, skb)
+ */
+int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata)
+{
+ int ret;
+
+ IRDA_ASSERT(userdata != NULL, return -1;);
+
+ /* Make room for MUX header */
+ IRDA_ASSERT(skb_headroom(userdata) >= LMP_HEADER, return -1;);
+ skb_push(userdata, LMP_HEADER);
+
+ ret = irlmp_do_lsap_event(self, LM_UDATA_REQUEST, userdata);
+
+ /* Drop reference count - see irlap_data_request(). */
+ dev_kfree_skb(userdata);
+
+ return ret;
+}
+
+/*
+ * Function irlmp_udata_indication (self, skb)
+ *
+ * Send unreliable data (but still within the connection)
+ *
+ */
+void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /* Hide LMP header from layer above */
+ skb_pull(skb, LMP_HEADER);
+
+ if (self->notify.udata_indication) {
+ /* Don't forget to refcount it - see irlap_driver_rcv(). */
+ skb_get(skb);
+ self->notify.udata_indication(self->notify.instance, self,
+ skb);
+ }
+}
+
+/*
+ * Function irlmp_connless_data_request (self, skb)
+ */
+#ifdef CONFIG_IRDA_ULTRA
+int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata,
+ __u8 pid)
+{
+ struct sk_buff *clone_skb;
+ struct lap_cb *lap;
+
+ IRDA_ASSERT(userdata != NULL, return -1;);
+
+ /* Make room for MUX and PID header */
+ IRDA_ASSERT(skb_headroom(userdata) >= LMP_HEADER+LMP_PID_HEADER,
+ return -1;);
+
+ /* Insert protocol identifier */
+ skb_push(userdata, LMP_PID_HEADER);
+ if(self != NULL)
+ userdata->data[0] = self->pid;
+ else
+ userdata->data[0] = pid;
+
+ /* Connectionless sockets must use 0x70 */
+ skb_push(userdata, LMP_HEADER);
+ userdata->data[0] = userdata->data[1] = LSAP_CONNLESS;
+
+ /* Try to send Connectionless packets out on all links */
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return -1;);
+
+ clone_skb = skb_clone(userdata, GFP_ATOMIC);
+ if (!clone_skb) {
+ dev_kfree_skb(userdata);
+ return -ENOMEM;
+ }
+
+ irlap_unitdata_request(lap->irlap, clone_skb);
+ /* irlap_unitdata_request() don't increase refcount,
+ * so no dev_kfree_skb() - Jean II */
+
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
+ }
+ dev_kfree_skb(userdata);
+
+ return 0;
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irlmp_connless_data_indication (self, skb)
+ *
+ * Receive unreliable data outside any connection. Mostly used by Ultra
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /* Hide LMP and PID header from layer above */
+ skb_pull(skb, LMP_HEADER+LMP_PID_HEADER);
+
+ if (self->notify.udata_indication) {
+ /* Don't forget to refcount it - see irlap_driver_rcv(). */
+ skb_get(skb);
+ self->notify.udata_indication(self->notify.instance, self,
+ skb);
+ }
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Propagate status indication from LAP to LSAPs (via LMP)
+ * This don't trigger any change of state in lap_cb, lmp_cb or lsap_cb,
+ * and the event is stateless, therefore we can bypass both state machines
+ * and send the event direct to the LSAP user.
+ * Jean II
+ */
+void irlmp_status_indication(struct lap_cb *self,
+ LINK_STATUS link, LOCK_STATUS lock)
+{
+ struct lsap_cb *next;
+ struct lsap_cb *curr;
+
+ /* Send status_indication to all LSAPs using this link */
+ curr = (struct lsap_cb *) hashbin_get_first( self->lsaps);
+ while (NULL != hashbin_find_next(self->lsaps, (long) curr, NULL,
+ (void *) &next) ) {
+ IRDA_ASSERT(curr->magic == LMP_LSAP_MAGIC, return;);
+ /*
+ * Inform service user if he has requested it
+ */
+ if (curr->notify.status_indication != NULL)
+ curr->notify.status_indication(curr->notify.instance,
+ link, lock);
+ else
+ pr_debug("%s(), no handler\n", __func__);
+
+ curr = next;
+ }
+}
+
+/*
+ * Receive flow control indication from LAP.
+ * LAP want us to send it one more frame. We implement a simple round
+ * robin scheduler between the active sockets so that we get a bit of
+ * fairness. Note that the round robin is far from perfect, but it's
+ * better than nothing.
+ * We then poll the selected socket so that we can do synchronous
+ * refilling of IrLAP (which allow to minimise the number of buffers).
+ * Jean II
+ */
+void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
+{
+ struct lsap_cb *next;
+ struct lsap_cb *curr;
+ int lsap_todo;
+
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ IRDA_ASSERT(flow == FLOW_START, return;);
+
+ /* Get the number of lsap. That's the only safe way to know
+ * that we have looped around... - Jean II */
+ lsap_todo = HASHBIN_GET_SIZE(self->lsaps);
+ pr_debug("%s() : %d lsaps to scan\n", __func__, lsap_todo);
+
+ /* Poll lsap in order until the queue is full or until we
+ * tried them all.
+ * Most often, the current LSAP will have something to send,
+ * so we will go through this loop only once. - Jean II */
+ while((lsap_todo--) &&
+ (IRLAP_GET_TX_QUEUE_LEN(self->irlap) < LAP_HIGH_THRESHOLD)) {
+ /* Try to find the next lsap we should poll. */
+ next = self->flow_next;
+ /* If we have no lsap, restart from first one */
+ if(next == NULL)
+ next = (struct lsap_cb *) hashbin_get_first(self->lsaps);
+ /* Verify current one and find the next one */
+ curr = hashbin_find_next(self->lsaps, (long) next, NULL,
+ (void *) &self->flow_next);
+ /* Uh-oh... Paranoia */
+ if(curr == NULL)
+ break;
+ pr_debug("%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n",
+ __func__, curr, next, self->flow_next, lsap_todo,
+ IRLAP_GET_TX_QUEUE_LEN(self->irlap));
+
+ /* Inform lsap user that it can send one more packet. */
+ if (curr->notify.flow_indication != NULL)
+ curr->notify.flow_indication(curr->notify.instance,
+ curr, flow);
+ else
+ pr_debug("%s(), no handler\n", __func__);
+ }
+}
+
+#if 0
+/*
+ * Function irlmp_hint_to_service (hint)
+ *
+ * Returns a list of all servics contained in the given hint bits. This
+ * function assumes that the hint bits have the size of two bytes only
+ */
+__u8 *irlmp_hint_to_service(__u8 *hint)
+{
+ __u8 *service;
+ int i = 0;
+
+ /*
+ * Allocate array to store services in. 16 entries should be safe
+ * since we currently only support 2 hint bytes
+ */
+ service = kmalloc(16, GFP_ATOMIC);
+ if (!service)
+ return NULL;
+
+ if (!hint[0]) {
+ pr_debug("<None>\n");
+ kfree(service);
+ return NULL;
+ }
+ if (hint[0] & HINT_PNP)
+ pr_debug("PnP Compatible ");
+ if (hint[0] & HINT_PDA)
+ pr_debug("PDA/Palmtop ");
+ if (hint[0] & HINT_COMPUTER)
+ pr_debug("Computer ");
+ if (hint[0] & HINT_PRINTER) {
+ pr_debug("Printer ");
+ service[i++] = S_PRINTER;
+ }
+ if (hint[0] & HINT_MODEM)
+ pr_debug("Modem ");
+ if (hint[0] & HINT_FAX)
+ pr_debug("Fax ");
+ if (hint[0] & HINT_LAN) {
+ pr_debug("LAN Access ");
+ service[i++] = S_LAN;
+ }
+ /*
+ * Test if extension byte exists. This byte will usually be
+ * there, but this is not really required by the standard.
+ * (IrLMP p. 29)
+ */
+ if (hint[0] & HINT_EXTENSION) {
+ if (hint[1] & HINT_TELEPHONY) {
+ pr_debug("Telephony ");
+ service[i++] = S_TELEPHONY;
+ }
+ if (hint[1] & HINT_FILE_SERVER)
+ pr_debug("File Server ");
+
+ if (hint[1] & HINT_COMM) {
+ pr_debug("IrCOMM ");
+ service[i++] = S_COMM;
+ }
+ if (hint[1] & HINT_OBEX) {
+ pr_debug("IrOBEX ");
+ service[i++] = S_OBEX;
+ }
+ }
+ pr_debug("\n");
+
+ /* So that client can be notified about any discovery */
+ service[i++] = S_ANY;
+
+ service[i] = S_END;
+
+ return service;
+}
+#endif
+
+static const __u16 service_hint_mapping[S_END][2] = {
+ { HINT_PNP, 0 }, /* S_PNP */
+ { HINT_PDA, 0 }, /* S_PDA */
+ { HINT_COMPUTER, 0 }, /* S_COMPUTER */
+ { HINT_PRINTER, 0 }, /* S_PRINTER */
+ { HINT_MODEM, 0 }, /* S_MODEM */
+ { HINT_FAX, 0 }, /* S_FAX */
+ { HINT_LAN, 0 }, /* S_LAN */
+ { HINT_EXTENSION, HINT_TELEPHONY }, /* S_TELEPHONY */
+ { HINT_EXTENSION, HINT_COMM }, /* S_COMM */
+ { HINT_EXTENSION, HINT_OBEX }, /* S_OBEX */
+ { 0xFF, 0xFF }, /* S_ANY */
+};
+
+/*
+ * Function irlmp_service_to_hint (service)
+ *
+ * Converts a service type, to a hint bit
+ *
+ * Returns: a 16 bit hint value, with the service bit set
+ */
+__u16 irlmp_service_to_hint(int service)
+{
+ __u16_host_order hint;
+
+ hint.byte[0] = service_hint_mapping[service][0];
+ hint.byte[1] = service_hint_mapping[service][1];
+
+ return hint.word;
+}
+EXPORT_SYMBOL(irlmp_service_to_hint);
+
+/*
+ * Function irlmp_register_service (service)
+ *
+ * Register local service with IrLMP
+ *
+ */
+void *irlmp_register_service(__u16 hints)
+{
+ irlmp_service_t *service;
+
+ pr_debug("%s(), hints = %04x\n", __func__, hints);
+
+ /* Make a new registration */
+ service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC);
+ if (!service)
+ return NULL;
+
+ service->hints.word = hints;
+ hashbin_insert(irlmp->services, (irda_queue_t *) service,
+ (long) service, NULL);
+
+ irlmp->hints.word |= hints;
+
+ return (void *)service;
+}
+EXPORT_SYMBOL(irlmp_register_service);
+
+/*
+ * Function irlmp_unregister_service (handle)
+ *
+ * Unregister service with IrLMP.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int irlmp_unregister_service(void *handle)
+{
+ irlmp_service_t *service;
+ unsigned long flags;
+
+ if (!handle)
+ return -1;
+
+ /* Caller may call with invalid handle (it's legal) - Jean II */
+ service = hashbin_lock_find(irlmp->services, (long) handle, NULL);
+ if (!service) {
+ pr_debug("%s(), Unknown service!\n", __func__);
+ return -1;
+ }
+
+ hashbin_remove_this(irlmp->services, (irda_queue_t *) service);
+ kfree(service);
+
+ /* Remove old hint bits */
+ irlmp->hints.word = 0;
+
+ /* Refresh current hint bits */
+ spin_lock_irqsave(&irlmp->services->hb_spinlock, flags);
+ service = (irlmp_service_t *) hashbin_get_first(irlmp->services);
+ while (service) {
+ irlmp->hints.word |= service->hints.word;
+
+ service = (irlmp_service_t *)hashbin_get_next(irlmp->services);
+ }
+ spin_unlock_irqrestore(&irlmp->services->hb_spinlock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(irlmp_unregister_service);
+
+/*
+ * Function irlmp_register_client (hint_mask, callback1, callback2)
+ *
+ * Register a local client with IrLMP
+ * First callback is selective discovery (based on hints)
+ * Second callback is for selective discovery expiries
+ *
+ * Returns: handle > 0 on success, 0 on error
+ */
+void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
+ DISCOVERY_CALLBACK2 expir_clb, void *priv)
+{
+ irlmp_client_t *client;
+
+ IRDA_ASSERT(irlmp != NULL, return NULL;);
+
+ /* Make a new registration */
+ client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC);
+ if (!client)
+ return NULL;
+
+ /* Register the details */
+ client->hint_mask.word = hint_mask;
+ client->disco_callback = disco_clb;
+ client->expir_callback = expir_clb;
+ client->priv = priv;
+
+ hashbin_insert(irlmp->clients, (irda_queue_t *) client,
+ (long) client, NULL);
+
+ return (void *) client;
+}
+EXPORT_SYMBOL(irlmp_register_client);
+
+/*
+ * Function irlmp_update_client (handle, hint_mask, callback1, callback2)
+ *
+ * Updates specified client (handle) with possibly new hint_mask and
+ * callback
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int irlmp_update_client(void *handle, __u16 hint_mask,
+ DISCOVERY_CALLBACK1 disco_clb,
+ DISCOVERY_CALLBACK2 expir_clb, void *priv)
+{
+ irlmp_client_t *client;
+
+ if (!handle)
+ return -1;
+
+ client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
+ if (!client) {
+ pr_debug("%s(), Unknown client!\n", __func__);
+ return -1;
+ }
+
+ client->hint_mask.word = hint_mask;
+ client->disco_callback = disco_clb;
+ client->expir_callback = expir_clb;
+ client->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL(irlmp_update_client);
+
+/*
+ * Function irlmp_unregister_client (handle)
+ *
+ * Returns: 0 on success, -1 on error
+ *
+ */
+int irlmp_unregister_client(void *handle)
+{
+ struct irlmp_client *client;
+
+ if (!handle)
+ return -1;
+
+ /* Caller may call with invalid handle (it's legal) - Jean II */
+ client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
+ if (!client) {
+ pr_debug("%s(), Unknown client!\n", __func__);
+ return -1;
+ }
+
+ pr_debug("%s(), removing client!\n", __func__);
+ hashbin_remove_this(irlmp->clients, (irda_queue_t *) client);
+ kfree(client);
+
+ return 0;
+}
+EXPORT_SYMBOL(irlmp_unregister_client);
+
+/*
+ * Function irlmp_slsap_inuse (slsap)
+ *
+ * Check if the given source LSAP selector is in use
+ *
+ * This function is clearly not very efficient. On the mitigating side, the
+ * stack make sure that in 99% of the cases, we are called only once
+ * for each socket allocation. We could probably keep a bitmap
+ * of the allocated LSAP, but I'm not sure the complexity is worth it.
+ * Jean II
+ */
+static int irlmp_slsap_inuse(__u8 slsap_sel)
+{
+ struct lsap_cb *self;
+ struct lap_cb *lap;
+ unsigned long flags;
+
+ IRDA_ASSERT(irlmp != NULL, return TRUE;);
+ IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;);
+ IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;);
+
+#ifdef CONFIG_IRDA_ULTRA
+ /* Accept all bindings to the connectionless LSAP */
+ if (slsap_sel == LSAP_CONNLESS)
+ return FALSE;
+#endif /* CONFIG_IRDA_ULTRA */
+
+ /* Valid values are between 0 and 127 (0x0-0x6F) */
+ if (slsap_sel > LSAP_MAX)
+ return TRUE;
+
+ /*
+ * Check if slsap is already in use. To do this we have to loop over
+ * every IrLAP connection and check every LSAP associated with each
+ * the connection.
+ */
+ spin_lock_irqsave_nested(&irlmp->links->hb_spinlock, flags,
+ SINGLE_DEPTH_NESTING);
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, goto errlap;);
+
+ /* Careful for priority inversions here !
+ * irlmp->links is never taken while another IrDA
+ * spinlock is held, so we are safe. Jean II */
+ spin_lock(&lap->lsaps->hb_spinlock);
+
+ /* For this IrLAP, check all the LSAPs */
+ self = (struct lsap_cb *) hashbin_get_first(lap->lsaps);
+ while (self != NULL) {
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC,
+ goto errlsap;);
+
+ if ((self->slsap_sel == slsap_sel)) {
+ pr_debug("Source LSAP selector=%02x in use\n",
+ self->slsap_sel);
+ goto errlsap;
+ }
+ self = (struct lsap_cb*) hashbin_get_next(lap->lsaps);
+ }
+ spin_unlock(&lap->lsaps->hb_spinlock);
+
+ /* Next LAP */
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
+ }
+ spin_unlock_irqrestore(&irlmp->links->hb_spinlock, flags);
+
+ /*
+ * Server sockets are typically waiting for connections and
+ * therefore reside in the unconnected list. We don't want
+ * to give out their LSAPs for obvious reasons...
+ * Jean II
+ */
+ spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+
+ self = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
+ while (self != NULL) {
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto erruncon;);
+ if ((self->slsap_sel == slsap_sel)) {
+ pr_debug("Source LSAP selector=%02x in use (unconnected)\n",
+ self->slsap_sel);
+ goto erruncon;
+ }
+ self = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps);
+ }
+ spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+
+ return FALSE;
+
+ /* Error exit from within one of the two nested loops.
+ * Make sure we release the right spinlock in the righ order.
+ * Jean II */
+errlsap:
+ spin_unlock(&lap->lsaps->hb_spinlock);
+IRDA_ASSERT_LABEL(errlap:)
+ spin_unlock_irqrestore(&irlmp->links->hb_spinlock, flags);
+ return TRUE;
+
+ /* Error exit from within the unconnected loop.
+ * Just one spinlock to release... Jean II */
+erruncon:
+ spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+ return TRUE;
+}
+
+/*
+ * Function irlmp_find_free_slsap ()
+ *
+ * Find a free source LSAP to use. This function is called if the service
+ * user has requested a source LSAP equal to LM_ANY
+ */
+static __u8 irlmp_find_free_slsap(void)
+{
+ __u8 lsap_sel;
+ int wrapped = 0;
+
+ IRDA_ASSERT(irlmp != NULL, return -1;);
+ IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return -1;);
+
+ /* Most users don't really care which LSAPs they are given,
+ * and therefore we automatically give them a free LSAP.
+ * This function try to find a suitable LSAP, i.e. which is
+ * not in use and is within the acceptable range. Jean II */
+
+ do {
+ /* Always increment to LSAP number before using it.
+ * In theory, we could reuse the last LSAP number, as long
+ * as it is no longer in use. Some IrDA stack do that.
+ * However, the previous socket may be half closed, i.e.
+ * we closed it, we think it's no longer in use, but the
+ * other side did not receive our close and think it's
+ * active and still send data on it.
+ * This is similar to what is done with PIDs and TCP ports.
+ * Also, this reduce the number of calls to irlmp_slsap_inuse()
+ * which is an expensive function to call.
+ * Jean II */
+ irlmp->last_lsap_sel++;
+
+ /* Check if we need to wraparound (0x70-0x7f are reserved) */
+ if (irlmp->last_lsap_sel > LSAP_MAX) {
+ /* 0x00-0x10 are also reserved for well know ports */
+ irlmp->last_lsap_sel = 0x10;
+
+ /* Make sure we terminate the loop */
+ if (wrapped++) {
+ net_err_ratelimited("%s: no more free LSAPs !\n",
+ __func__);
+ return 0;
+ }
+ }
+
+ /* If the LSAP is in use, try the next one.
+ * Despite the autoincrement, we need to check if the lsap
+ * is really in use or not, first because LSAP may be
+ * directly allocated in irlmp_open_lsap(), and also because
+ * we may wraparound on old sockets. Jean II */
+ } while (irlmp_slsap_inuse(irlmp->last_lsap_sel));
+
+ /* Got it ! */
+ lsap_sel = irlmp->last_lsap_sel;
+ pr_debug("%s(), found free lsap_sel=%02x\n",
+ __func__, lsap_sel);
+
+ return lsap_sel;
+}
+
+/*
+ * Function irlmp_convert_lap_reason (lap_reason)
+ *
+ * Converts IrLAP disconnect reason codes to IrLMP disconnect reason
+ * codes
+ *
+ */
+LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
+{
+ int reason = LM_LAP_DISCONNECT;
+
+ switch (lap_reason) {
+ case LAP_DISC_INDICATION: /* Received a disconnect request from peer */
+ pr_debug("%s(), LAP_DISC_INDICATION\n", __func__);
+ reason = LM_USER_REQUEST;
+ break;
+ case LAP_NO_RESPONSE: /* To many retransmits without response */
+ pr_debug("%s(), LAP_NO_RESPONSE\n", __func__);
+ reason = LM_LAP_DISCONNECT;
+ break;
+ case LAP_RESET_INDICATION:
+ pr_debug("%s(), LAP_RESET_INDICATION\n", __func__);
+ reason = LM_LAP_RESET;
+ break;
+ case LAP_FOUND_NONE:
+ case LAP_MEDIA_BUSY:
+ case LAP_PRIMARY_CONFLICT:
+ pr_debug("%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n",
+ __func__);
+ reason = LM_CONNECT_FAILURE;
+ break;
+ default:
+ pr_debug("%s(), Unknown IrLAP disconnect reason %d!\n",
+ __func__, lap_reason);
+ reason = LM_LAP_DISCONNECT;
+ break;
+ }
+
+ return reason;
+}
+
+#ifdef CONFIG_PROC_FS
+
+struct irlmp_iter_state {
+ hashbin_t *hashbin;
+};
+
+#define LSAP_START_TOKEN ((void *)1)
+#define LINK_START_TOKEN ((void *)2)
+
+static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
+{
+ void *element;
+
+ spin_lock_irq(&iter->hashbin->hb_spinlock);
+ for (element = hashbin_get_first(iter->hashbin);
+ element != NULL;
+ element = hashbin_get_next(iter->hashbin)) {
+ if (!off || (*off)-- == 0) {
+ /* NB: hashbin left locked */
+ return element;
+ }
+ }
+ spin_unlock_irq(&iter->hashbin->hb_spinlock);
+ iter->hashbin = NULL;
+ return NULL;
+}
+
+
+static void *irlmp_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct irlmp_iter_state *iter = seq->private;
+ void *v;
+ loff_t off = *pos;
+
+ iter->hashbin = NULL;
+ if (off-- == 0)
+ return LSAP_START_TOKEN;
+
+ iter->hashbin = irlmp->unconnected_lsaps;
+ v = irlmp_seq_hb_idx(iter, &off);
+ if (v)
+ return v;
+
+ if (off-- == 0)
+ return LINK_START_TOKEN;
+
+ iter->hashbin = irlmp->links;
+ return irlmp_seq_hb_idx(iter, &off);
+}
+
+static void *irlmp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct irlmp_iter_state *iter = seq->private;
+
+ ++*pos;
+
+ if (v == LSAP_START_TOKEN) { /* start of list of lsaps */
+ iter->hashbin = irlmp->unconnected_lsaps;
+ v = irlmp_seq_hb_idx(iter, NULL);
+ return v ? v : LINK_START_TOKEN;
+ }
+
+ if (v == LINK_START_TOKEN) { /* start of list of links */
+ iter->hashbin = irlmp->links;
+ return irlmp_seq_hb_idx(iter, NULL);
+ }
+
+ v = hashbin_get_next(iter->hashbin);
+
+ if (v == NULL) { /* no more in this hash bin */
+ spin_unlock_irq(&iter->hashbin->hb_spinlock);
+
+ if (iter->hashbin == irlmp->unconnected_lsaps)
+ v = LINK_START_TOKEN;
+
+ iter->hashbin = NULL;
+ }
+ return v;
+}
+
+static void irlmp_seq_stop(struct seq_file *seq, void *v)
+{
+ struct irlmp_iter_state *iter = seq->private;
+
+ if (iter->hashbin)
+ spin_unlock_irq(&iter->hashbin->hb_spinlock);
+}
+
+static int irlmp_seq_show(struct seq_file *seq, void *v)
+{
+ const struct irlmp_iter_state *iter = seq->private;
+ struct lsap_cb *self = v;
+
+ if (v == LSAP_START_TOKEN)
+ seq_puts(seq, "Unconnected LSAPs:\n");
+ else if (v == LINK_START_TOKEN)
+ seq_puts(seq, "\nRegistered Link Layers:\n");
+ else if (iter->hashbin == irlmp->unconnected_lsaps) {
+ self = v;
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EINVAL; );
+ seq_printf(seq, "lsap state: %s, ",
+ irlsap_state[ self->lsap_state]);
+ seq_printf(seq,
+ "slsap_sel: %#02x, dlsap_sel: %#02x, ",
+ self->slsap_sel, self->dlsap_sel);
+ seq_printf(seq, "(%s)", self->notify.name);
+ seq_printf(seq, "\n");
+ } else if (iter->hashbin == irlmp->links) {
+ struct lap_cb *lap = v;
+
+ seq_printf(seq, "lap state: %s, ",
+ irlmp_state[lap->lap_state]);
+
+ seq_printf(seq, "saddr: %#08x, daddr: %#08x, ",
+ lap->saddr, lap->daddr);
+ seq_printf(seq, "num lsaps: %d",
+ HASHBIN_GET_SIZE(lap->lsaps));
+ seq_printf(seq, "\n");
+
+ /* Careful for priority inversions here !
+ * All other uses of attrib spinlock are independent of
+ * the object spinlock, so we are safe. Jean II */
+ spin_lock(&lap->lsaps->hb_spinlock);
+
+ seq_printf(seq, "\n Connected LSAPs:\n");
+ for (self = (struct lsap_cb *) hashbin_get_first(lap->lsaps);
+ self != NULL;
+ self = (struct lsap_cb *)hashbin_get_next(lap->lsaps)) {
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC,
+ goto outloop;);
+ seq_printf(seq, " lsap state: %s, ",
+ irlsap_state[ self->lsap_state]);
+ seq_printf(seq,
+ "slsap_sel: %#02x, dlsap_sel: %#02x, ",
+ self->slsap_sel, self->dlsap_sel);
+ seq_printf(seq, "(%s)", self->notify.name);
+ seq_putc(seq, '\n');
+
+ }
+ IRDA_ASSERT_LABEL(outloop:)
+ spin_unlock(&lap->lsaps->hb_spinlock);
+ seq_putc(seq, '\n');
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct seq_operations irlmp_seq_ops = {
+ .start = irlmp_seq_start,
+ .next = irlmp_seq_next,
+ .stop = irlmp_seq_stop,
+ .show = irlmp_seq_show,
+};
+
+static int irlmp_seq_open(struct inode *inode, struct file *file)
+{
+ IRDA_ASSERT(irlmp != NULL, return -EINVAL;);
+
+ return seq_open_private(file, &irlmp_seq_ops,
+ sizeof(struct irlmp_iter_state));
+}
+
+const struct file_operations irlmp_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = irlmp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+#endif /* PROC_FS */
diff --git a/drivers/staging/irda/net/irlmp_event.c b/drivers/staging/irda/net/irlmp_event.c
new file mode 100644
index 000000000000..e306cf2c1e04
--- /dev/null
+++ b/drivers/staging/irda/net/irlmp_event.c
@@ -0,0 +1,886 @@
+/*********************************************************************
+ *
+ * Filename: irlmp_event.c
+ * Version: 0.8
+ * Description: An IrDA LMP event driver for Linux
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Tue Dec 14 23:04:16 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/kernel.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/timer.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irlmp_frame.h>
+#include <net/irda/irlmp_event.h>
+
+const char *const irlmp_state[] = {
+ "LAP_STANDBY",
+ "LAP_U_CONNECT",
+ "LAP_ACTIVE",
+};
+
+const char *const irlsap_state[] = {
+ "LSAP_DISCONNECTED",
+ "LSAP_CONNECT",
+ "LSAP_CONNECT_PEND",
+ "LSAP_DATA_TRANSFER_READY",
+ "LSAP_SETUP",
+ "LSAP_SETUP_PEND",
+};
+
+static const char *const irlmp_event[] __maybe_unused = {
+ "LM_CONNECT_REQUEST",
+ "LM_CONNECT_CONFIRM",
+ "LM_CONNECT_RESPONSE",
+ "LM_CONNECT_INDICATION",
+
+ "LM_DISCONNECT_INDICATION",
+ "LM_DISCONNECT_REQUEST",
+
+ "LM_DATA_REQUEST",
+ "LM_UDATA_REQUEST",
+ "LM_DATA_INDICATION",
+ "LM_UDATA_INDICATION",
+
+ "LM_WATCHDOG_TIMEOUT",
+
+ /* IrLAP events */
+ "LM_LAP_CONNECT_REQUEST",
+ "LM_LAP_CONNECT_INDICATION",
+ "LM_LAP_CONNECT_CONFIRM",
+ "LM_LAP_DISCONNECT_INDICATION",
+ "LM_LAP_DISCONNECT_REQUEST",
+ "LM_LAP_DISCOVERY_REQUEST",
+ "LM_LAP_DISCOVERY_CONFIRM",
+ "LM_LAP_IDLE_TIMEOUT",
+};
+
+/* LAP Connection control proto declarations */
+static void irlmp_state_standby (struct lap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static void irlmp_state_u_connect(struct lap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static void irlmp_state_active (struct lap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+
+/* LSAP Connection control proto declarations */
+static int irlmp_state_disconnected(struct lsap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static int irlmp_state_connect (struct lsap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static int irlmp_state_connect_pend(struct lsap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static int irlmp_state_dtr (struct lsap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static int irlmp_state_setup (struct lsap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+static int irlmp_state_setup_pend (struct lsap_cb *, IRLMP_EVENT,
+ struct sk_buff *);
+
+static void (*lap_state[]) (struct lap_cb *, IRLMP_EVENT, struct sk_buff *) =
+{
+ irlmp_state_standby,
+ irlmp_state_u_connect,
+ irlmp_state_active,
+};
+
+static int (*lsap_state[])( struct lsap_cb *, IRLMP_EVENT, struct sk_buff *) =
+{
+ irlmp_state_disconnected,
+ irlmp_state_connect,
+ irlmp_state_connect_pend,
+ irlmp_state_dtr,
+ irlmp_state_setup,
+ irlmp_state_setup_pend
+};
+
+static inline void irlmp_next_lap_state(struct lap_cb *self,
+ IRLMP_STATE state)
+{
+ /*
+ pr_debug("%s(), LMP LAP = %s\n", __func__, irlmp_state[state]);
+ */
+ self->lap_state = state;
+}
+
+static inline void irlmp_next_lsap_state(struct lsap_cb *self,
+ LSAP_STATE state)
+{
+ /*
+ IRDA_ASSERT(self != NULL, return;);
+ pr_debug("%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]);
+ */
+ self->lsap_state = state;
+}
+
+/* Do connection control events */
+int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+
+ pr_debug("%s(), EVENT = %s, STATE = %s\n",
+ __func__, irlmp_event[event], irlsap_state[self->lsap_state]);
+
+ return (*lsap_state[self->lsap_state]) (self, event, skb);
+}
+
+/*
+ * Function do_lap_event (event, skb, info)
+ *
+ * Do IrLAP control events
+ *
+ */
+void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ pr_debug("%s(), EVENT = %s, STATE = %s\n", __func__,
+ irlmp_event[event],
+ irlmp_state[self->lap_state]);
+
+ (*lap_state[self->lap_state]) (self, event, skb);
+}
+
+void irlmp_discovery_timer_expired(void *data)
+{
+ /* We always cleanup the log (active & passive discovery) */
+ irlmp_do_expiry();
+
+ irlmp_do_discovery(sysctl_discovery_slots);
+
+ /* Restart timer */
+ irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout * HZ);
+}
+
+void irlmp_watchdog_timer_expired(void *data)
+{
+ struct lsap_cb *self = (struct lsap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+
+ irlmp_do_lsap_event(self, LM_WATCHDOG_TIMEOUT, NULL);
+}
+
+void irlmp_idle_timer_expired(void *data)
+{
+ struct lap_cb *self = (struct lap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ irlmp_do_lap_event(self, LM_LAP_IDLE_TIMEOUT, NULL);
+}
+
+/*
+ * Send an event on all LSAPs attached to this LAP.
+ */
+static inline void
+irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin,
+ IRLMP_EVENT event)
+{
+ struct lsap_cb *lsap;
+ struct lsap_cb *lsap_next;
+
+ /* Note : this function use the new hashbin_find_next()
+ * function, instead of the old hashbin_get_next().
+ * This make sure that we are always pointing one lsap
+ * ahead, so that if the current lsap is removed as the
+ * result of sending the event, we don't care.
+ * Also, as we store the context ourselves, if an enumeration
+ * of the same lsap hashbin happens as the result of sending the
+ * event, we don't care.
+ * The only problem is if the next lsap is removed. In that case,
+ * hashbin_find_next() will return NULL and we will abort the
+ * enumeration. - Jean II */
+
+ /* Also : we don't accept any skb in input. We can *NOT* pass
+ * the same skb to multiple clients safely, we would need to
+ * skb_clone() it. - Jean II */
+
+ lsap = (struct lsap_cb *) hashbin_get_first(lsap_hashbin);
+
+ while (NULL != hashbin_find_next(lsap_hashbin,
+ (long) lsap,
+ NULL,
+ (void *) &lsap_next) ) {
+ irlmp_do_lsap_event(lsap, event, NULL);
+ lsap = lsap_next;
+ }
+}
+
+/*********************************************************************
+ *
+ * LAP connection control states
+ *
+ ********************************************************************/
+
+/*
+ * Function irlmp_state_standby (event, skb, info)
+ *
+ * STANDBY, The IrLAP connection does not exist.
+ *
+ */
+static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self->irlap != NULL, return;);
+
+ switch (event) {
+ case LM_LAP_DISCOVERY_REQUEST:
+ /* irlmp_next_station_state( LMP_DISCOVER); */
+
+ irlap_discovery_request(self->irlap, &irlmp->discovery_cmd);
+ break;
+ case LM_LAP_CONNECT_INDICATION:
+ /* It's important to switch state first, to avoid IrLMP to
+ * think that the link is free since IrLMP may then start
+ * discovery before the connection is properly set up. DB.
+ */
+ irlmp_next_lap_state(self, LAP_ACTIVE);
+
+ /* Just accept connection TODO, this should be fixed */
+ irlap_connect_response(self->irlap, skb);
+ break;
+ case LM_LAP_CONNECT_REQUEST:
+ pr_debug("%s() LS_CONNECT_REQUEST\n", __func__);
+
+ irlmp_next_lap_state(self, LAP_U_CONNECT);
+
+ /* FIXME: need to set users requested QoS */
+ irlap_connect_request(self->irlap, self->daddr, NULL, 0);
+ break;
+ case LM_LAP_DISCONNECT_INDICATION:
+ pr_debug("%s(), Error LM_LAP_DISCONNECT_INDICATION\n",
+ __func__);
+
+ irlmp_next_lap_state(self, LAP_STANDBY);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlmp_event[event]);
+ break;
+ }
+}
+
+/*
+ * Function irlmp_state_u_connect (event, skb, info)
+ *
+ * U_CONNECT, The layer above has tried to open an LSAP connection but
+ * since the IrLAP connection does not exist, we must first start an
+ * IrLAP connection. We are now waiting response from IrLAP.
+ * */
+static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ pr_debug("%s(), event=%s\n", __func__, irlmp_event[event]);
+
+ switch (event) {
+ case LM_LAP_CONNECT_INDICATION:
+ /* It's important to switch state first, to avoid IrLMP to
+ * think that the link is free since IrLMP may then start
+ * discovery before the connection is properly set up. DB.
+ */
+ irlmp_next_lap_state(self, LAP_ACTIVE);
+
+ /* Just accept connection TODO, this should be fixed */
+ irlap_connect_response(self->irlap, skb);
+
+ /* Tell LSAPs that they can start sending data */
+ irlmp_do_all_lsap_event(self->lsaps, LM_LAP_CONNECT_CONFIRM);
+
+ /* Note : by the time we get there (LAP retries and co),
+ * the lsaps may already have gone. This avoid getting stuck
+ * forever in LAP_ACTIVE state - Jean II */
+ if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
+ pr_debug("%s() NO LSAPs !\n", __func__);
+ irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
+ }
+ break;
+ case LM_LAP_CONNECT_REQUEST:
+ /* Already trying to connect */
+ break;
+ case LM_LAP_CONNECT_CONFIRM:
+ /* For all lsap_ce E Associated do LS_Connect_confirm */
+ irlmp_next_lap_state(self, LAP_ACTIVE);
+
+ /* Tell LSAPs that they can start sending data */
+ irlmp_do_all_lsap_event(self->lsaps, LM_LAP_CONNECT_CONFIRM);
+
+ /* Note : by the time we get there (LAP retries and co),
+ * the lsaps may already have gone. This avoid getting stuck
+ * forever in LAP_ACTIVE state - Jean II */
+ if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
+ pr_debug("%s() NO LSAPs !\n", __func__);
+ irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
+ }
+ break;
+ case LM_LAP_DISCONNECT_INDICATION:
+ pr_debug("%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__);
+ irlmp_next_lap_state(self, LAP_STANDBY);
+
+ /* Send disconnect event to all LSAPs using this link */
+ irlmp_do_all_lsap_event(self->lsaps,
+ LM_LAP_DISCONNECT_INDICATION);
+ break;
+ case LM_LAP_DISCONNECT_REQUEST:
+ pr_debug("%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__);
+
+ /* One of the LSAP did timeout or was closed, if it was
+ * the last one, try to get out of here - Jean II */
+ if (HASHBIN_GET_SIZE(self->lsaps) <= 1) {
+ irlap_disconnect_request(self->irlap);
+ }
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlmp_event[event]);
+ break;
+ }
+}
+
+/*
+ * Function irlmp_state_active (event, skb, info)
+ *
+ * ACTIVE, IrLAP connection is active
+ *
+ */
+static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ switch (event) {
+ case LM_LAP_CONNECT_REQUEST:
+ pr_debug("%s(), LS_CONNECT_REQUEST\n", __func__);
+
+ /*
+ * IrLAP may have a pending disconnect. We tried to close
+ * IrLAP, but it was postponed because the link was
+ * busy or we were still sending packets. As we now
+ * need it, make sure it stays on. Jean II
+ */
+ irlap_clear_disconnect(self->irlap);
+
+ /*
+ * LAP connection already active, just bounce back! Since we
+ * don't know which LSAP that tried to do this, we have to
+ * notify all LSAPs using this LAP, but that should be safe to
+ * do anyway.
+ */
+ irlmp_do_all_lsap_event(self->lsaps, LM_LAP_CONNECT_CONFIRM);
+
+ /* Needed by connect indication */
+ irlmp_do_all_lsap_event(irlmp->unconnected_lsaps,
+ LM_LAP_CONNECT_CONFIRM);
+ /* Keep state */
+ break;
+ case LM_LAP_DISCONNECT_REQUEST:
+ /*
+ * Need to find out if we should close IrLAP or not. If there
+ * is only one LSAP connection left on this link, that LSAP
+ * must be the one that tries to close IrLAP. It will be
+ * removed later and moved to the list of unconnected LSAPs
+ */
+ if (HASHBIN_GET_SIZE(self->lsaps) > 0) {
+ /* Timer value is checked in irsysctl - Jean II */
+ irlmp_start_idle_timer(self, sysctl_lap_keepalive_time * HZ / 1000);
+ } else {
+ /* No more connections, so close IrLAP */
+
+ /* We don't want to change state just yet, because
+ * we want to reflect accurately the real state of
+ * the LAP, not the state we wish it was in,
+ * so that we don't lose LM_LAP_CONNECT_REQUEST.
+ * In some cases, IrLAP won't close the LAP
+ * immediately. For example, it might still be
+ * retrying packets or waiting for the pf bit.
+ * As the LAP always send a DISCONNECT_INDICATION
+ * in PCLOSE or SCLOSE, just change state on that.
+ * Jean II */
+ irlap_disconnect_request(self->irlap);
+ }
+ break;
+ case LM_LAP_IDLE_TIMEOUT:
+ if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
+ /* Same reasoning as above - keep state */
+ irlap_disconnect_request(self->irlap);
+ }
+ break;
+ case LM_LAP_DISCONNECT_INDICATION:
+ irlmp_next_lap_state(self, LAP_STANDBY);
+
+ /* In some case, at this point our side has already closed
+ * all lsaps, and we are waiting for the idle_timer to
+ * expire. If another device reconnect immediately, the
+ * idle timer will expire in the midle of the connection
+ * initialisation, screwing up things a lot...
+ * Therefore, we must stop the timer... */
+ irlmp_stop_idle_timer(self);
+
+ /*
+ * Inform all connected LSAP's using this link
+ */
+ irlmp_do_all_lsap_event(self->lsaps,
+ LM_LAP_DISCONNECT_INDICATION);
+
+ /* Force an expiry of the discovery log.
+ * Now that the LAP is free, the system may attempt to
+ * connect to another device. Unfortunately, our entries
+ * are stale. There is a small window (<3s) before the
+ * normal discovery will run and where irlmp_connect_request()
+ * can get the wrong info, so make sure things get
+ * cleaned *NOW* ;-) - Jean II */
+ irlmp_do_expiry();
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlmp_event[event]);
+ break;
+ }
+}
+
+/*********************************************************************
+ *
+ * LSAP connection control states
+ *
+ ********************************************************************/
+
+/*
+ * Function irlmp_state_disconnected (event, skb, info)
+ *
+ * DISCONNECTED
+ *
+ */
+static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+
+ switch (event) {
+#ifdef CONFIG_IRDA_ULTRA
+ case LM_UDATA_INDICATION:
+ /* This is most bizarre. Those packets are aka unreliable
+ * connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA.
+ * Why do we pass them as Ultra ??? Jean II */
+ irlmp_connless_data_indication(self, skb);
+ break;
+#endif /* CONFIG_IRDA_ULTRA */
+ case LM_CONNECT_REQUEST:
+ pr_debug("%s(), LM_CONNECT_REQUEST\n", __func__);
+
+ if (self->conn_skb) {
+ net_warn_ratelimited("%s: busy with another request!\n",
+ __func__);
+ return -EBUSY;
+ }
+ /* Don't forget to refcount it (see irlmp_connect_request()) */
+ skb_get(skb);
+ self->conn_skb = skb;
+
+ irlmp_next_lsap_state(self, LSAP_SETUP_PEND);
+
+ /* Start watchdog timer (5 secs for now) */
+ irlmp_start_watchdog_timer(self, 5*HZ);
+
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
+ break;
+ case LM_CONNECT_INDICATION:
+ if (self->conn_skb) {
+ net_warn_ratelimited("%s: busy with another request!\n",
+ __func__);
+ return -EBUSY;
+ }
+ /* Don't forget to refcount it (see irlap_driver_rcv()) */
+ skb_get(skb);
+ self->conn_skb = skb;
+
+ irlmp_next_lsap_state(self, LSAP_CONNECT_PEND);
+
+ /* Start watchdog timer
+ * This is not mentionned in the spec, but there is a rare
+ * race condition that can get the socket stuck.
+ * If we receive this event while our LAP is closing down,
+ * the LM_LAP_CONNECT_REQUEST get lost and we get stuck in
+ * CONNECT_PEND state forever.
+ * The other cause of getting stuck down there is if the
+ * higher layer never reply to the CONNECT_INDICATION.
+ * Anyway, it make sense to make sure that we always have
+ * a backup plan. 1 second is plenty (should be immediate).
+ * Jean II */
+ irlmp_start_watchdog_timer(self, 1*HZ);
+
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlmp_state_connect (self, event, skb)
+ *
+ * CONNECT
+ *
+ */
+static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ struct lsap_cb *lsap;
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+
+ switch (event) {
+ case LM_CONNECT_RESPONSE:
+ /*
+ * Bind this LSAP to the IrLAP link where the connect was
+ * received
+ */
+ lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self,
+ NULL);
+
+ IRDA_ASSERT(lsap == self, return -1;);
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ IRDA_ASSERT(self->lap->lsaps != NULL, return -1;);
+
+ hashbin_insert(self->lap->lsaps, (irda_queue_t *) self,
+ (long) self, NULL);
+
+ set_bit(0, &self->connected); /* TRUE */
+
+ irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, CONNECT_CNF, skb);
+
+ del_timer(&self->watchdog_timer);
+
+ irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY);
+ break;
+ case LM_WATCHDOG_TIMEOUT:
+ /* May happen, who knows...
+ * Jean II */
+ pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__);
+
+ /* Disconnect, get out... - Jean II */
+ self->lap = NULL;
+ self->dlsap_sel = LSAP_ANY;
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+ break;
+ default:
+ /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
+ * are *not* yet bound to the IrLAP link. Jean II */
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlmp_state_connect_pend (event, skb, info)
+ *
+ * CONNECT_PEND
+ *
+ */
+static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb;
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+
+ switch (event) {
+ case LM_CONNECT_REQUEST:
+ /* Keep state */
+ break;
+ case LM_CONNECT_RESPONSE:
+ pr_debug("%s(), LM_CONNECT_RESPONSE, no indication issued yet\n",
+ __func__);
+ /* Keep state */
+ break;
+ case LM_DISCONNECT_REQUEST:
+ pr_debug("%s(), LM_DISCONNECT_REQUEST, not yet bound to IrLAP connection\n",
+ __func__);
+ /* Keep state */
+ break;
+ case LM_LAP_CONNECT_CONFIRM:
+ pr_debug("%s(), LS_CONNECT_CONFIRM\n", __func__);
+ irlmp_next_lsap_state(self, LSAP_CONNECT);
+
+ tx_skb = self->conn_skb;
+ self->conn_skb = NULL;
+
+ irlmp_connect_indication(self, tx_skb);
+ /* Drop reference count - see irlmp_connect_indication(). */
+ dev_kfree_skb(tx_skb);
+ break;
+ case LM_WATCHDOG_TIMEOUT:
+ /* Will happen in some rare cases because of a race condition.
+ * Just make sure we don't stay there forever...
+ * Jean II */
+ pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__);
+
+ /* Go back to disconnected mode, keep the socket waiting */
+ self->lap = NULL;
+ self->dlsap_sel = LSAP_ANY;
+ if(self->conn_skb)
+ dev_kfree_skb(self->conn_skb);
+ self->conn_skb = NULL;
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+ break;
+ default:
+ /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
+ * are *not* yet bound to the IrLAP link. Jean II */
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlmp_state_dtr (self, event, skb)
+ *
+ * DATA_TRANSFER_READY
+ *
+ */
+static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ LM_REASON reason;
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+
+ switch (event) {
+ case LM_DATA_REQUEST: /* Optimize for the common case */
+ irlmp_send_data_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, FALSE, skb);
+ break;
+ case LM_DATA_INDICATION: /* Optimize for the common case */
+ irlmp_data_indication(self, skb);
+ break;
+ case LM_UDATA_REQUEST:
+ IRDA_ASSERT(skb != NULL, return -1;);
+ irlmp_send_data_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, TRUE, skb);
+ break;
+ case LM_UDATA_INDICATION:
+ irlmp_udata_indication(self, skb);
+ break;
+ case LM_CONNECT_REQUEST:
+ pr_debug("%s(), LM_CONNECT_REQUEST, error, LSAP already connected\n",
+ __func__);
+ /* Keep state */
+ break;
+ case LM_CONNECT_RESPONSE:
+ pr_debug("%s(), LM_CONNECT_RESPONSE, error, LSAP already connected\n",
+ __func__);
+ /* Keep state */
+ break;
+ case LM_DISCONNECT_REQUEST:
+ irlmp_send_lcf_pdu(self->lap, self->dlsap_sel, self->slsap_sel,
+ DISCONNECT, skb);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+ /* Called only from irlmp_disconnect_request(), will
+ * unbind from LAP over there. Jean II */
+
+ /* Try to close the LAP connection if its still there */
+ if (self->lap) {
+ pr_debug("%s(), trying to close IrLAP\n",
+ __func__);
+ irlmp_do_lap_event(self->lap,
+ LM_LAP_DISCONNECT_REQUEST,
+ NULL);
+ }
+ break;
+ case LM_LAP_DISCONNECT_INDICATION:
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ reason = irlmp_convert_lap_reason(self->lap->reason);
+
+ irlmp_disconnect_indication(self, reason, NULL);
+ break;
+ case LM_DISCONNECT_INDICATION:
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ IRDA_ASSERT(self->lap->magic == LMP_LAP_MAGIC, return -1;);
+
+ IRDA_ASSERT(skb != NULL, return -1;);
+ IRDA_ASSERT(skb->len > 3, return -1;);
+ reason = skb->data[3];
+
+ /* Try to close the LAP connection */
+ pr_debug("%s(), trying to close IrLAP\n", __func__);
+ irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+
+ irlmp_disconnect_indication(self, reason, skb);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlmp_state_setup (event, skb, info)
+ *
+ * SETUP, Station Control has set up the underlying IrLAP connection.
+ * An LSAP connection request has been transmitted to the peer
+ * LSAP-Connection Control FSM and we are awaiting reply.
+ */
+static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ LM_REASON reason;
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
+
+ switch (event) {
+ case LM_CONNECT_CONFIRM:
+ irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY);
+
+ del_timer(&self->watchdog_timer);
+
+ irlmp_connect_confirm(self, skb);
+ break;
+ case LM_DISCONNECT_INDICATION:
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ IRDA_ASSERT(self->lap->magic == LMP_LAP_MAGIC, return -1;);
+
+ IRDA_ASSERT(skb != NULL, return -1;);
+ IRDA_ASSERT(skb->len > 3, return -1;);
+ reason = skb->data[3];
+
+ /* Try to close the LAP connection */
+ pr_debug("%s(), trying to close IrLAP\n", __func__);
+ irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+
+ irlmp_disconnect_indication(self, reason, skb);
+ break;
+ case LM_LAP_DISCONNECT_INDICATION:
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ del_timer(&self->watchdog_timer);
+
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ IRDA_ASSERT(self->lap->magic == LMP_LAP_MAGIC, return -1;);
+
+ reason = irlmp_convert_lap_reason(self->lap->reason);
+
+ irlmp_disconnect_indication(self, reason, skb);
+ break;
+ case LM_WATCHDOG_TIMEOUT:
+ pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__);
+
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function irlmp_state_setup_pend (event, skb, info)
+ *
+ * SETUP_PEND, An LM_CONNECT_REQUEST has been received from the service
+ * user to set up an LSAP connection. A request has been sent to the
+ * LAP FSM to set up the underlying IrLAP connection, and we
+ * are awaiting confirm.
+ */
+static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
+{
+ struct sk_buff *tx_skb;
+ LM_REASON reason;
+ int ret = 0;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(irlmp != NULL, return -1;);
+
+ switch (event) {
+ case LM_LAP_CONNECT_CONFIRM:
+ IRDA_ASSERT(self->conn_skb != NULL, return -1;);
+
+ tx_skb = self->conn_skb;
+ self->conn_skb = NULL;
+
+ irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, CONNECT_CMD, tx_skb);
+ /* Drop reference count - see irlap_data_request(). */
+ dev_kfree_skb(tx_skb);
+
+ irlmp_next_lsap_state(self, LSAP_SETUP);
+ break;
+ case LM_WATCHDOG_TIMEOUT:
+ pr_debug("%s() : WATCHDOG_TIMEOUT !\n", __func__);
+
+ IRDA_ASSERT(self->lap != NULL, return -1;);
+ irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
+ break;
+ case LM_LAP_DISCONNECT_INDICATION: /* LS_Disconnect.indication */
+ del_timer( &self->watchdog_timer);
+
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
+
+ reason = irlmp_convert_lap_reason(self->lap->reason);
+
+ irlmp_disconnect_indication(self, reason, NULL);
+ break;
+ default:
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/staging/irda/net/irlmp_frame.c b/drivers/staging/irda/net/irlmp_frame.c
new file mode 100644
index 000000000000..38b0f994bc7b
--- /dev/null
+++ b/drivers/staging/irda/net/irlmp_frame.c
@@ -0,0 +1,476 @@
+/*********************************************************************
+ *
+ * Filename: irlmp_frame.c
+ * Version: 0.9
+ * Description: IrLMP frame implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Aug 19 02:09:59 1997
+ * Modified at: Mon Dec 13 13:41:12 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlap.h>
+#include <net/irda/timer.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irlmp_frame.h>
+#include <net/irda/discovery.h>
+
+static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap,
+ __u8 slsap, int status, hashbin_t *);
+
+inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ int expedited, struct sk_buff *skb)
+{
+ skb->data[0] = dlsap;
+ skb->data[1] = slsap;
+
+ if (expedited) {
+ pr_debug("%s(), sending expedited data\n", __func__);
+ irlap_data_request(self->irlap, skb, TRUE);
+ } else
+ irlap_data_request(self->irlap, skb, FALSE);
+}
+
+/*
+ * Function irlmp_send_lcf_pdu (dlsap, slsap, opcode,skb)
+ *
+ * Send Link Control Frame to IrLAP
+ */
+void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ __u8 opcode, struct sk_buff *skb)
+{
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ frame = skb->data;
+
+ frame[0] = dlsap | CONTROL_BIT;
+ frame[1] = slsap;
+
+ frame[2] = opcode;
+
+ if (opcode == DISCONNECT)
+ frame[3] = 0x01; /* Service user request */
+ else
+ frame[3] = 0x00; /* rsvd */
+
+ irlap_data_request(self->irlap, skb, FALSE);
+}
+
+/*
+ * Function irlmp_input (skb)
+ *
+ * Used by IrLAP to pass received data frames to IrLMP layer
+ *
+ */
+void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
+ int unreliable)
+{
+ struct lsap_cb *lsap;
+ __u8 slsap_sel; /* Source (this) LSAP address */
+ __u8 dlsap_sel; /* Destination LSAP address */
+ __u8 *fp;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ IRDA_ASSERT(skb->len > 2, return;);
+
+ fp = skb->data;
+
+ /*
+ * The next statements may be confusing, but we do this so that
+ * destination LSAP of received frame is source LSAP in our view
+ */
+ slsap_sel = fp[0] & LSAP_MASK;
+ dlsap_sel = fp[1];
+
+ /*
+ * Check if this is an incoming connection, since we must deal with
+ * it in a different way than other established connections.
+ */
+ if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) {
+ pr_debug("%s(), incoming connection, source LSAP=%d, dest LSAP=%d\n",
+ __func__, slsap_sel, dlsap_sel);
+
+ /* Try to find LSAP among the unconnected LSAPs */
+ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
+ irlmp->unconnected_lsaps);
+
+ /* Maybe LSAP was already connected, so try one more time */
+ if (!lsap) {
+ pr_debug("%s(), incoming connection for LSAP already connected\n",
+ __func__);
+ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
+ self->lsaps);
+ }
+ } else
+ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
+ self->lsaps);
+
+ if (lsap == NULL) {
+ pr_debug("IrLMP, Sorry, no LSAP for received frame!\n");
+ pr_debug("%s(), slsap_sel = %02x, dlsap_sel = %02x\n",
+ __func__, slsap_sel, dlsap_sel);
+ if (fp[0] & CONTROL_BIT) {
+ pr_debug("%s(), received control frame %02x\n",
+ __func__, fp[2]);
+ } else {
+ pr_debug("%s(), received data frame\n", __func__);
+ }
+ return;
+ }
+
+ /*
+ * Check if we received a control frame?
+ */
+ if (fp[0] & CONTROL_BIT) {
+ switch (fp[2]) {
+ case CONNECT_CMD:
+ lsap->lap = self;
+ irlmp_do_lsap_event(lsap, LM_CONNECT_INDICATION, skb);
+ break;
+ case CONNECT_CNF:
+ irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb);
+ break;
+ case DISCONNECT:
+ pr_debug("%s(), Disconnect indication!\n",
+ __func__);
+ irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
+ skb);
+ break;
+ case ACCESSMODE_CMD:
+ pr_debug("Access mode cmd not implemented!\n");
+ break;
+ case ACCESSMODE_CNF:
+ pr_debug("Access mode cnf not implemented!\n");
+ break;
+ default:
+ pr_debug("%s(), Unknown control frame %02x\n",
+ __func__, fp[2]);
+ break;
+ }
+ } else if (unreliable) {
+ /* Optimize and bypass the state machine if possible */
+ if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
+ irlmp_udata_indication(lsap, skb);
+ else
+ irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb);
+ } else {
+ /* Optimize and bypass the state machine if possible */
+ if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
+ irlmp_data_indication(lsap, skb);
+ else
+ irlmp_do_lsap_event(lsap, LM_DATA_INDICATION, skb);
+ }
+}
+
+/*
+ * Function irlmp_link_unitdata_indication (self, skb)
+ *
+ *
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
+{
+ struct lsap_cb *lsap;
+ __u8 slsap_sel; /* Source (this) LSAP address */
+ __u8 dlsap_sel; /* Destination LSAP address */
+ __u8 pid; /* Protocol identifier */
+ __u8 *fp;
+ unsigned long flags;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ IRDA_ASSERT(skb->len > 2, return;);
+
+ fp = skb->data;
+
+ /*
+ * The next statements may be confusing, but we do this so that
+ * destination LSAP of received frame is source LSAP in our view
+ */
+ slsap_sel = fp[0] & LSAP_MASK;
+ dlsap_sel = fp[1];
+ pid = fp[2];
+
+ if (pid & 0x80) {
+ pr_debug("%s(), extension in PID not supp!\n",
+ __func__);
+ return;
+ }
+
+ /* Check if frame is addressed to the connectionless LSAP */
+ if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) {
+ pr_debug("%s(), dropping frame!\n", __func__);
+ return;
+ }
+
+ /* Search the connectionless LSAP */
+ spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+ lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
+ while (lsap != NULL) {
+ /*
+ * Check if source LSAP and dest LSAP selectors and PID match.
+ */
+ if ((lsap->slsap_sel == slsap_sel) &&
+ (lsap->dlsap_sel == dlsap_sel) &&
+ (lsap->pid == pid))
+ {
+ break;
+ }
+ lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps);
+ }
+ spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
+
+ if (lsap)
+ irlmp_connless_data_indication(lsap, skb);
+ else {
+ pr_debug("%s(), found no matching LSAP!\n", __func__);
+ }
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irlmp_link_disconnect_indication (reason, userdata)
+ *
+ * IrLAP has disconnected
+ *
+ */
+void irlmp_link_disconnect_indication(struct lap_cb *lap,
+ struct irlap_cb *irlap,
+ LAP_REASON reason,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(lap != NULL, return;);
+ IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
+
+ lap->reason = reason;
+ lap->daddr = DEV_ADDR_ANY;
+
+ /* FIXME: must do something with the skb if any */
+
+ /*
+ * Inform station state machine
+ */
+ irlmp_do_lap_event(lap, LM_LAP_DISCONNECT_INDICATION, NULL);
+}
+
+/*
+ * Function irlmp_link_connect_indication (qos)
+ *
+ * Incoming LAP connection!
+ *
+ */
+void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
+ __u32 daddr, struct qos_info *qos,
+ struct sk_buff *skb)
+{
+ /* Copy QoS settings for this session */
+ self->qos = qos;
+
+ /* Update destination device address */
+ self->daddr = daddr;
+ IRDA_ASSERT(self->saddr == saddr, return;);
+
+ irlmp_do_lap_event(self, LM_LAP_CONNECT_INDICATION, skb);
+}
+
+/*
+ * Function irlmp_link_connect_confirm (qos)
+ *
+ * LAP connection confirmed!
+ *
+ */
+void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
+ struct sk_buff *skb)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ IRDA_ASSERT(qos != NULL, return;);
+
+ /* Don't need use the skb for now */
+
+ /* Copy QoS settings for this session */
+ self->qos = qos;
+
+ irlmp_do_lap_event(self, LM_LAP_CONNECT_CONFIRM, NULL);
+}
+
+/*
+ * Function irlmp_link_discovery_indication (self, log)
+ *
+ * Device is discovering us
+ *
+ * It's not an answer to our own discoveries, just another device trying
+ * to perform discovery, but we don't want to miss the opportunity
+ * to exploit this information, because :
+ * o We may not actively perform discovery (just passive discovery)
+ * o This type of discovery is much more reliable. In some cases, it
+ * seem that less than 50% of our discoveries get an answer, while
+ * we always get ~100% of these.
+ * o Make faster discovery, statistically divide time of discovery
+ * events by 2 (important for the latency aspect and user feel)
+ * o Even is we do active discovery, the other node might not
+ * answer our discoveries (ex: Palm). The Palm will just perform
+ * one active discovery and connect directly to us.
+ *
+ * However, when both devices discover each other, they might attempt to
+ * connect to each other following the discovery event, and it would create
+ * collisions on the medium (SNRM battle).
+ * The "fix" for that is to disable all connection requests in IrLAP
+ * for 100ms after a discovery indication by setting the media_busy flag.
+ * Previously, we used to postpone the event which was quite ugly. Now
+ * that IrLAP takes care of this problem, just pass the event up...
+ *
+ * Jean II
+ */
+void irlmp_link_discovery_indication(struct lap_cb *self,
+ discovery_t *discovery)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ /* Add to main log, cleanup */
+ irlmp_add_discovery(irlmp->cachelog, discovery);
+
+ /* Just handle it the same way as a discovery confirm,
+ * bypass the LM_LAP state machine (see below) */
+ irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_PASSIVE);
+}
+
+/*
+ * Function irlmp_link_discovery_confirm (self, log)
+ *
+ * Called by IrLAP with a list of discoveries after the discovery
+ * request has been carried out. A NULL log is received if IrLAP
+ * was unable to carry out the discovery request
+ *
+ */
+void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ /* Add to main log, cleanup */
+ irlmp_add_discovery_log(irlmp->cachelog, log);
+
+ /* Propagate event to various LSAPs registered for it.
+ * We bypass the LM_LAP state machine because
+ * 1) We do it regardless of the LM_LAP state
+ * 2) It doesn't affect the LM_LAP state
+ * 3) Faster, slimer, simpler, ...
+ * Jean II */
+ irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_ACTIVE);
+}
+
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+static inline void irlmp_update_cache(struct lap_cb *lap,
+ struct lsap_cb *lsap)
+{
+ /* Prevent concurrent read to get garbage */
+ lap->cache.valid = FALSE;
+ /* Update cache entry */
+ lap->cache.dlsap_sel = lsap->dlsap_sel;
+ lap->cache.slsap_sel = lsap->slsap_sel;
+ lap->cache.lsap = lsap;
+ lap->cache.valid = TRUE;
+}
+#endif
+
+/*
+ * Function irlmp_find_handle (self, dlsap_sel, slsap_sel, status, queue)
+ *
+ * Find handle associated with destination and source LSAP
+ *
+ * Any IrDA connection (LSAP/TSAP) is uniquely identified by
+ * 3 parameters, the local lsap, the remote lsap and the remote address.
+ * We may initiate multiple connections to the same remote service
+ * (they will have different local lsap), a remote device may initiate
+ * multiple connections to the same local service (they will have
+ * different remote lsap), or multiple devices may connect to the same
+ * service and may use the same remote lsap (and they will have
+ * different remote address).
+ * So, where is the remote address ? Each LAP connection is made with
+ * a single remote device, so imply a specific remote address.
+ * Jean II
+ */
+static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
+ __u8 slsap_sel, int status,
+ hashbin_t *queue)
+{
+ struct lsap_cb *lsap;
+ unsigned long flags;
+
+ /*
+ * Optimize for the common case. We assume that the last frame
+ * received is in the same connection as the last one, so check in
+ * cache first to avoid the linear search
+ */
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ if ((self->cache.valid) &&
+ (self->cache.slsap_sel == slsap_sel) &&
+ (self->cache.dlsap_sel == dlsap_sel))
+ {
+ return self->cache.lsap;
+ }
+#endif
+
+ spin_lock_irqsave(&queue->hb_spinlock, flags);
+
+ lsap = (struct lsap_cb *) hashbin_get_first(queue);
+ while (lsap != NULL) {
+ /*
+ * If this is an incoming connection, then the destination
+ * LSAP selector may have been specified as LM_ANY so that
+ * any client can connect. In that case we only need to check
+ * if the source LSAP (in our view!) match!
+ */
+ if ((status == CONNECT_CMD) &&
+ (lsap->slsap_sel == slsap_sel) &&
+ (lsap->dlsap_sel == LSAP_ANY)) {
+ /* This is where the dest lsap sel is set on incoming
+ * lsaps */
+ lsap->dlsap_sel = dlsap_sel;
+ break;
+ }
+ /*
+ * Check if source LSAP and dest LSAP selectors match.
+ */
+ if ((lsap->slsap_sel == slsap_sel) &&
+ (lsap->dlsap_sel == dlsap_sel))
+ break;
+
+ lsap = (struct lsap_cb *) hashbin_get_next(queue);
+ }
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ if(lsap)
+ irlmp_update_cache(self, lsap);
+#endif
+ spin_unlock_irqrestore(&queue->hb_spinlock, flags);
+
+ /* Return what we've found or NULL */
+ return lsap;
+}
diff --git a/drivers/staging/irda/net/irmod.c b/drivers/staging/irda/net/irmod.c
new file mode 100644
index 000000000000..4319f4ff66b0
--- /dev/null
+++ b/drivers/staging/irda/net/irmod.c
@@ -0,0 +1,199 @@
+/*********************************************************************
+ *
+ * Filename: irmod.c
+ * Version: 0.9
+ * Description: IrDA stack main entry points
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Dec 15 13:55:39 1997
+ * Modified at: Wed Jan 5 15:12:41 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * This file contains the main entry points of the IrDA stack.
+ * They are in this file and not af_irda.c because some developpers
+ * are using the IrDA stack without the socket API (compiling out
+ * af_irda.c).
+ * Jean II
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h> /* notify_t */
+#include <net/irda/irlap.h> /* irlap_init */
+#include <net/irda/irlmp.h> /* irlmp_init */
+#include <net/irda/iriap.h> /* iriap_init */
+#include <net/irda/irttp.h> /* irttp_init */
+#include <net/irda/irda_device.h> /* irda_device_init */
+
+/* Packet type handler.
+ * Tell the kernel how IrDA packets should be handled.
+ */
+static struct packet_type irda_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_IRDA),
+ .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */
+};
+
+/*
+ * Function irda_notify_init (notify)
+ *
+ * Used for initializing the notify structure
+ *
+ */
+void irda_notify_init(notify_t *notify)
+{
+ notify->data_indication = NULL;
+ notify->udata_indication = NULL;
+ notify->connect_confirm = NULL;
+ notify->connect_indication = NULL;
+ notify->disconnect_indication = NULL;
+ notify->flow_indication = NULL;
+ notify->status_indication = NULL;
+ notify->instance = NULL;
+ strlcpy(notify->name, "Unknown", sizeof(notify->name));
+}
+EXPORT_SYMBOL(irda_notify_init);
+
+/*
+ * Function irda_init (void)
+ *
+ * Protocol stack initialisation entry point.
+ * Initialise the various components of the IrDA stack
+ */
+static int __init irda_init(void)
+{
+ int ret = 0;
+
+ /* Lower layer of the stack */
+ irlmp_init();
+ irlap_init();
+
+ /* Driver/dongle support */
+ irda_device_init();
+
+ /* Higher layers of the stack */
+ iriap_init();
+ irttp_init();
+ ret = irsock_init();
+ if (ret < 0)
+ goto out_err_1;
+
+ /* Add IrDA packet type (Start receiving packets) */
+ dev_add_pack(&irda_packet_type);
+
+ /* External APIs */
+#ifdef CONFIG_PROC_FS
+ irda_proc_register();
+#endif
+#ifdef CONFIG_SYSCTL
+ ret = irda_sysctl_register();
+ if (ret < 0)
+ goto out_err_2;
+#endif
+
+ ret = irda_nl_register();
+ if (ret < 0)
+ goto out_err_3;
+
+ return 0;
+
+ out_err_3:
+#ifdef CONFIG_SYSCTL
+ irda_sysctl_unregister();
+ out_err_2:
+#endif
+#ifdef CONFIG_PROC_FS
+ irda_proc_unregister();
+#endif
+
+ /* Remove IrDA packet type (stop receiving packets) */
+ dev_remove_pack(&irda_packet_type);
+
+ /* Remove higher layers */
+ irsock_cleanup();
+ out_err_1:
+ irttp_cleanup();
+ iriap_cleanup();
+
+ /* Remove lower layers */
+ irda_device_cleanup();
+ irlap_cleanup(); /* Must be done before irlmp_cleanup()! DB */
+
+ /* Remove middle layer */
+ irlmp_cleanup();
+
+
+ return ret;
+}
+
+/*
+ * Function irda_cleanup (void)
+ *
+ * Protocol stack cleanup/removal entry point.
+ * Cleanup the various components of the IrDA stack
+ */
+static void __exit irda_cleanup(void)
+{
+ /* Remove External APIs */
+ irda_nl_unregister();
+
+#ifdef CONFIG_SYSCTL
+ irda_sysctl_unregister();
+#endif
+#ifdef CONFIG_PROC_FS
+ irda_proc_unregister();
+#endif
+
+ /* Remove IrDA packet type (stop receiving packets) */
+ dev_remove_pack(&irda_packet_type);
+
+ /* Remove higher layers */
+ irsock_cleanup();
+ irttp_cleanup();
+ iriap_cleanup();
+
+ /* Remove lower layers */
+ irda_device_cleanup();
+ irlap_cleanup(); /* Must be done before irlmp_cleanup()! DB */
+
+ /* Remove middle layer */
+ irlmp_cleanup();
+}
+
+/*
+ * The IrDA stack must be initialised *before* drivers get initialised,
+ * and *before* higher protocols (IrLAN/IrCOMM/IrNET) get initialised,
+ * otherwise bad things will happen (hashbins will be NULL for example).
+ * Those modules are at module_init()/device_initcall() level.
+ *
+ * On the other hand, it needs to be initialised *after* the basic
+ * networking, the /proc/net filesystem and sysctl module. Those are
+ * currently initialised in .../init/main.c (before initcalls).
+ * Also, IrDA drivers needs to be initialised *after* the random number
+ * generator (main stack and higher layer init don't need it anymore).
+ *
+ * Jean II
+ */
+device_initcall(irda_init);
+module_exit(irda_cleanup);
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> & Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("The Linux IrDA Protocol Stack");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_IRDA);
diff --git a/drivers/staging/irda/net/irnet/Kconfig b/drivers/staging/irda/net/irnet/Kconfig
new file mode 100644
index 000000000000..28c557f0fdd2
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/Kconfig
@@ -0,0 +1,13 @@
+config IRNET
+ tristate "IrNET protocol"
+ depends on IRDA && PPP
+ help
+ Say Y here if you want to build support for the IrNET protocol.
+ To compile it as a module, choose M here: the module will be
+ called irnet. IrNET is a PPP driver, so you will also need a
+ working PPP subsystem (driver, daemon and config)...
+
+ IrNET is an alternate way to transfer TCP/IP traffic over IrDA. It
+ uses synchronous PPP over a set of point to point IrDA sockets. You
+ can use it between Linux machine or with W2k.
+
diff --git a/drivers/staging/irda/net/irnet/Makefile b/drivers/staging/irda/net/irnet/Makefile
new file mode 100644
index 000000000000..61c365c8a2a0
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux IrDA IrNET protocol layer.
+#
+
+obj-$(CONFIG_IRNET) += irnet.o
+
+irnet-y := irnet_ppp.o irnet_irda.o
diff --git a/drivers/staging/irda/net/irnet/irnet.h b/drivers/staging/irda/net/irnet/irnet.h
new file mode 100644
index 000000000000..9d451f8ed47a
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/irnet.h
@@ -0,0 +1,522 @@
+/*
+ * IrNET protocol module : Synchronous PPP over an IrDA socket.
+ *
+ * Jean II - HPL `00 - <jt@hpl.hp.com>
+ *
+ * This file contains definitions and declarations global to the IrNET module,
+ * all grouped in one place...
+ * This file is a *private* header, so other modules don't want to know
+ * what's in there...
+ *
+ * Note : as most part of the Linux kernel, this module is available
+ * under the GNU General Public License (GPL).
+ */
+
+#ifndef IRNET_H
+#define IRNET_H
+
+/************************** DOCUMENTATION ***************************/
+/*
+ * What is IrNET
+ * -------------
+ * IrNET is a protocol allowing to carry TCP/IP traffic between two
+ * IrDA peers in an efficient fashion. It is a thin layer, passing PPP
+ * packets to IrTTP and vice versa. It uses PPP in synchronous mode,
+ * because IrTTP offer a reliable sequenced packet service (as opposed
+ * to a byte stream). In fact, you could see IrNET as carrying TCP/IP
+ * in a IrDA socket, using PPP to provide the glue.
+ *
+ * The main difference with traditional PPP over IrCOMM is that we
+ * avoid the framing and serial emulation which are a performance
+ * bottleneck. It also allows multipoint communications in a sensible
+ * fashion.
+ *
+ * The main difference with IrLAN is that we use PPP for the link
+ * management, which is more standard, interoperable and flexible than
+ * the IrLAN protocol. For example, PPP adds authentication,
+ * encryption, compression, header compression and automated routing
+ * setup. And, as IrNET let PPP do the hard work, the implementation
+ * is much simpler than IrLAN.
+ *
+ * The Linux implementation
+ * ------------------------
+ * IrNET is written on top of the Linux-IrDA stack, and interface with
+ * the generic Linux PPP driver. Because IrNET depend on recent
+ * changes of the PPP driver interface, IrNET will work only with very
+ * recent kernel (2.3.99-pre6 and up).
+ *
+ * The present implementation offer the following features :
+ * o simple user interface using pppd
+ * o efficient implementation (interface directly to PPP and IrTTP)
+ * o addressing (you can specify the name of the IrNET recipient)
+ * o multipoint operation (limited by IrLAP specification)
+ * o information in /proc/net/irda/irnet
+ * o IrNET events on /dev/irnet (for user space daemon)
+ * o IrNET daemon (irnetd) to automatically handle incoming requests
+ * o Windows 2000 compatibility (tested, but need more work)
+ * Currently missing :
+ * o Lot's of testing (that's your job)
+ * o Connection retries (may be too hard to do)
+ * o Check pppd persist mode
+ * o User space daemon (to automatically handle incoming requests)
+ *
+ * The setup is not currently the most easy, but this should get much
+ * better when everything will get integrated...
+ *
+ * Acknowledgements
+ * ----------------
+ * This module is based on :
+ * o The PPP driver (ppp_synctty/ppp_generic) by Paul Mackerras
+ * o The IrLAN protocol (irlan_common/XXX) by Dag Brattli
+ * o The IrSock interface (af_irda) by Dag Brattli
+ * o Some other bits from the kernel and my drivers...
+ * Infinite thanks to those brave souls for providing the infrastructure
+ * upon which IrNET is built.
+ *
+ * Thanks to all my colleagues in HP for helping me. In particular,
+ * thanks to Salil Pradhan and Bill Serra for W2k testing...
+ * Thanks to Luiz Magalhaes for irnetd and much testing...
+ *
+ * Thanks to Alan Cox for answering lot's of my stupid questions, and
+ * to Paul Mackerras answering my questions on how to best integrate
+ * IrNET and pppd.
+ *
+ * Jean II
+ *
+ * Note on some implementations choices...
+ * ------------------------------------
+ * 1) Direct interface vs tty/socket
+ * I could have used a tty interface to hook to ppp and use the full
+ * socket API to connect to IrDA. The code would have been easier to
+ * maintain, and maybe the code would have been smaller...
+ * Instead, we hook directly to ppp_generic and to IrTTP, which make
+ * things more complicated...
+ *
+ * The first reason is flexibility : this allow us to create IrNET
+ * instances on demand (no /dev/ircommX crap) and to allow linkname
+ * specification on pppd command line...
+ *
+ * Second reason is speed optimisation. If you look closely at the
+ * transmit and receive paths, you will notice that they are "super lean"
+ * (that's why they look ugly), with no function calls and as little data
+ * copy and modification as I could...
+ *
+ * 2) irnetd in user space
+ * irnetd is implemented in user space, which is necessary to call pppd.
+ * This also give maximum benefits in term of flexibility and customability,
+ * and allow to offer the event channel, useful for other stuff like debug.
+ *
+ * On the other hand, this require a loose coordination between the
+ * present module and irnetd. One critical area is how incoming request
+ * are handled.
+ * When irnet receive an incoming request, it send an event to irnetd and
+ * drop the incoming IrNET socket.
+ * irnetd start a pppd instance, which create a new IrNET socket. This new
+ * socket is then connected in the originating node to the pppd instance.
+ * At this point, in the originating node, the first socket is closed.
+ *
+ * I admit, this is a bit messy and waste some resources. The alternative
+ * is caching incoming socket, and that's also quite messy and waste
+ * resources.
+ * We also make connection time slower. For example, on a 115 kb/s link it
+ * adds 60ms to the connection time (770 ms). However, this is slower than
+ * the time it takes to fire up pppd on my P133...
+ *
+ *
+ * History :
+ * -------
+ *
+ * v1 - 15.5.00 - Jean II
+ * o Basic IrNET (hook to ppp_generic & IrTTP - incl. multipoint)
+ * o control channel on /dev/irnet (set name/address)
+ * o event channel on /dev/irnet (for user space daemon)
+ *
+ * v2 - 5.6.00 - Jean II
+ * o Enable DROP_NOT_READY to avoid PPP timeouts & other weirdness...
+ * o Add DISCONNECT_TO event and rename DISCONNECT_FROM.
+ * o Set official device number alloaction on /dev/irnet
+ *
+ * v3 - 30.8.00 - Jean II
+ * o Update to latest Linux-IrDA changes :
+ * - queue_t => irda_queue_t
+ * o Update to ppp-2.4.0 :
+ * - move irda_irnet_connect from PPPIOCATTACH to TIOCSETD
+ * o Add EXPIRE event (depend on new IrDA-Linux patch)
+ * o Switch from `hashbin_remove' to `hashbin_remove_this' to fix
+ * a multilink bug... (depend on new IrDA-Linux patch)
+ * o fix a self->daddr to self->raddr in irda_irnet_connect to fix
+ * another multilink bug (darn !)
+ * o Remove LINKNAME_IOCTL cruft
+ *
+ * v3b - 31.8.00 - Jean II
+ * o Dump discovery log at event channel startup
+ *
+ * v4 - 28.9.00 - Jean II
+ * o Fix interaction between poll/select and dump discovery log
+ * o Add IRNET_BLOCKED_LINK event (depend on new IrDA-Linux patch)
+ * o Add IRNET_NOANSWER_FROM event (mostly to help support)
+ * o Release flow control in disconnect_indication
+ * o Block packets while connecting (speed up connections)
+ *
+ * v5 - 11.01.01 - Jean II
+ * o Init self->max_header_size, just in case...
+ * o Set up ap->chan.hdrlen, to get zero copy on tx side working.
+ * o avoid tx->ttp->flow->ppp->tx->... loop, by checking flow state
+ * Thanks to Christian Gennerat for finding this bug !
+ * ---
+ * o Declare the proper MTU/MRU that we can support
+ * (but PPP doesn't read the MTU value :-()
+ * o Declare hashbin HB_NOLOCK instead of HB_LOCAL to avoid
+ * disabling and enabling irq twice
+ *
+ * v6 - 31.05.01 - Jean II
+ * o Print source address in Found, Discovery, Expiry & Request events
+ * o Print requested source address in /proc/net/irnet
+ * o Change control channel input. Allow multiple commands in one line.
+ * o Add saddr command to change ap->rsaddr (and use that in IrDA)
+ * ---
+ * o Make the IrDA connection procedure totally asynchronous.
+ * Heavy rewrite of the IAS query code and the whole connection
+ * procedure. Now, irnet_connect() no longer need to be called from
+ * a process context...
+ * o Enable IrDA connect retries in ppp_irnet_send(). The good thing
+ * is that IrDA connect retries are directly driven by PPP LCP
+ * retries (we retry for each LCP packet), so that everything
+ * is transparently controlled from pppd lcp-max-configure.
+ * o Add ttp_connect flag to prevent rentry on the connect procedure
+ * o Test and fixups to eliminate side effects of retries
+ *
+ * v7 - 22.08.01 - Jean II
+ * o Cleanup : Change "saddr = 0x0" to "saddr = DEV_ADDR_ANY"
+ * o Fix bug in BLOCK_WHEN_CONNECT introduced in v6 : due to the
+ * asynchronous IAS query, self->tsap is NULL when PPP send the
+ * first packet. This was preventing "connect-delay 0" to work.
+ * Change the test in ppp_irnet_send() to self->ttp_connect.
+ *
+ * v8 - 1.11.01 - Jean II
+ * o Tighten the use of self->ttp_connect and self->ttp_open to
+ * prevent various race conditions.
+ * o Avoid leaking discovery log and skb
+ * o Replace "self" with "server" in irnet_connect_indication() to
+ * better detect cut'n'paste error ;-)
+ *
+ * v9 - 29.11.01 - Jean II
+ * o Fix event generation in disconnect indication that I broke in v8
+ * It was always generation "No-Answer" because I was testing ttp_open
+ * just after clearing it. *blush*.
+ * o Use newly created irttp_listen() to fix potential crash when LAP
+ * destroyed before irnet module removed.
+ *
+ * v10 - 4.3.2 - Jean II
+ * o When receiving a disconnect indication, don't reenable the
+ * PPP Tx queue, this will trigger a reconnect. Instead, close
+ * the channel, which will kill pppd...
+ *
+ * v11 - 20.3.02 - Jean II
+ * o Oops ! v10 fix disabled IrNET retries and passive behaviour.
+ * Better fix in irnet_disconnect_indication() :
+ * - if connected, kill pppd via hangup.
+ * - if not connected, reenable ppp Tx, which trigger IrNET retry.
+ *
+ * v12 - 10.4.02 - Jean II
+ * o Fix race condition in irnet_connect_indication().
+ * If the socket was already trying to connect, drop old connection
+ * and use new one only if acting as primary. See comments.
+ *
+ * v13 - 30.5.02 - Jean II
+ * o Update module init code
+ *
+ * v14 - 20.2.03 - Jean II
+ * o Add discovery hint bits in the control channel.
+ * o Remove obsolete MOD_INC/DEC_USE_COUNT in favor of .owner
+ *
+ * v15 - 7.4.03 - Jean II
+ * o Replace spin_lock_irqsave() with spin_lock_bh() so that we can
+ * use ppp_unit_number(). It's probably also better overall...
+ * o Disable call to ppp_unregister_channel(), because we can't do it.
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/capability.h>
+#include <linux/ctype.h> /* isspace() */
+#include <linux/string.h> /* skip_spaces() */
+#include <linux/uaccess.h>
+#include <linux/init.h>
+
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/ppp_channel.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irttp.h>
+#include <net/irda/discovery.h>
+
+/***************************** OPTIONS *****************************/
+/*
+ * Define or undefine to compile or not some optional part of the
+ * IrNET driver...
+ * Note : the present defaults make sense, play with that at your
+ * own risk...
+ */
+/* IrDA side of the business... */
+#define DISCOVERY_NOMASK /* To enable W2k compatibility... */
+#define ADVERTISE_HINT /* Advertise IrLAN hint bit */
+#define ALLOW_SIMULT_CONNECT /* This seem to work, cross fingers... */
+#define DISCOVERY_EVENTS /* Query the discovery log to post events */
+#define INITIAL_DISCOVERY /* Dump current discovery log as events */
+#undef STREAM_COMPAT /* Not needed - potentially messy */
+#undef CONNECT_INDIC_KICK /* Might mess IrDA, not needed */
+#undef FAIL_SEND_DISCONNECT /* Might mess IrDA, not needed */
+#undef PASS_CONNECT_PACKETS /* Not needed ? Safe */
+#undef MISSING_PPP_API /* Stuff I wish I could do */
+
+/* PPP side of the business */
+#define BLOCK_WHEN_CONNECT /* Block packets when connecting */
+#define CONNECT_IN_SEND /* Retry IrDA connection procedure */
+#undef FLUSH_TO_PPP /* Not sure about this one, let's play safe */
+#undef SECURE_DEVIRNET /* Bah... */
+
+/****************************** DEBUG ******************************/
+
+/*
+ * This set of flags enable and disable all the various warning,
+ * error and debug message of this driver.
+ * Each section can be enabled and disabled independently
+ */
+/* In the PPP part */
+#define DEBUG_CTRL_TRACE 0 /* Control channel */
+#define DEBUG_CTRL_INFO 0 /* various info */
+#define DEBUG_CTRL_ERROR 1 /* problems */
+#define DEBUG_FS_TRACE 0 /* filesystem callbacks */
+#define DEBUG_FS_INFO 0 /* various info */
+#define DEBUG_FS_ERROR 1 /* problems */
+#define DEBUG_PPP_TRACE 0 /* PPP related functions */
+#define DEBUG_PPP_INFO 0 /* various info */
+#define DEBUG_PPP_ERROR 1 /* problems */
+#define DEBUG_MODULE_TRACE 0 /* module insertion/removal */
+#define DEBUG_MODULE_ERROR 1 /* problems */
+
+/* In the IrDA part */
+#define DEBUG_IRDA_SR_TRACE 0 /* IRDA subroutines */
+#define DEBUG_IRDA_SR_INFO 0 /* various info */
+#define DEBUG_IRDA_SR_ERROR 1 /* problems */
+#define DEBUG_IRDA_SOCK_TRACE 0 /* IRDA main socket functions */
+#define DEBUG_IRDA_SOCK_INFO 0 /* various info */
+#define DEBUG_IRDA_SOCK_ERROR 1 /* problems */
+#define DEBUG_IRDA_SERV_TRACE 0 /* The IrNET server */
+#define DEBUG_IRDA_SERV_INFO 0 /* various info */
+#define DEBUG_IRDA_SERV_ERROR 1 /* problems */
+#define DEBUG_IRDA_TCB_TRACE 0 /* IRDA IrTTP callbacks */
+#define DEBUG_IRDA_CB_INFO 0 /* various info */
+#define DEBUG_IRDA_CB_ERROR 1 /* problems */
+#define DEBUG_IRDA_OCB_TRACE 0 /* IRDA other callbacks */
+#define DEBUG_IRDA_OCB_INFO 0 /* various info */
+#define DEBUG_IRDA_OCB_ERROR 1 /* problems */
+
+#define DEBUG_ASSERT 0 /* Verify all assertions */
+
+/*
+ * These are the macros we are using to actually print the debug
+ * statements. Don't look at it, it's ugly...
+ *
+ * One of the trick is that, as the DEBUG_XXX are constant, the
+ * compiler will optimise away the if() in all cases.
+ */
+/* All error messages (will show up in the normal logs) */
+#define DERROR(dbg, format, args...) \
+ {if(DEBUG_##dbg) \
+ printk(KERN_INFO "irnet: %s(): " format, __func__ , ##args);}
+
+/* Normal debug message (will show up in /var/log/debug) */
+#define DEBUG(dbg, format, args...) \
+ {if(DEBUG_##dbg) \
+ printk(KERN_DEBUG "irnet: %s(): " format, __func__ , ##args);}
+
+/* Entering a function (trace) */
+#define DENTER(dbg, format, args...) \
+ {if(DEBUG_##dbg) \
+ printk(KERN_DEBUG "irnet: -> %s" format, __func__ , ##args);}
+
+/* Entering and exiting a function in one go (trace) */
+#define DPASS(dbg, format, args...) \
+ {if(DEBUG_##dbg) \
+ printk(KERN_DEBUG "irnet: <>%s" format, __func__ , ##args);}
+
+/* Exiting a function (trace) */
+#define DEXIT(dbg, format, args...) \
+ {if(DEBUG_##dbg) \
+ printk(KERN_DEBUG "irnet: <-%s()" format, __func__ , ##args);}
+
+/* Exit a function with debug */
+#define DRETURN(ret, dbg, args...) \
+ {DEXIT(dbg, ": " args);\
+ return ret; }
+
+/* Exit a function on failed condition */
+#define DABORT(cond, ret, dbg, args...) \
+ {if(cond) {\
+ DERROR(dbg, args);\
+ return ret; }}
+
+/* Invalid assertion, print out an error and exit... */
+#define DASSERT(cond, ret, dbg, args...) \
+ {if((DEBUG_ASSERT) && !(cond)) {\
+ DERROR(dbg, "Invalid assertion: " args);\
+ return ret; }}
+
+/************************ CONSTANTS & MACROS ************************/
+
+/* Paranoia */
+#define IRNET_MAGIC 0xB00754
+
+/* Number of control events in the control channel buffer... */
+#define IRNET_MAX_EVENTS 8 /* Should be more than enough... */
+
+/****************************** TYPES ******************************/
+
+/*
+ * This is the main structure where we store all the data pertaining to
+ * one instance of irnet.
+ * Note : in irnet functions, a pointer this structure is usually called
+ * "ap" or "self". If the code is borrowed from the IrDA stack, it tend
+ * to be called "self", and if it is borrowed from the PPP driver it is
+ * "ap". Apart from that, it's exactly the same structure ;-)
+ */
+typedef struct irnet_socket
+{
+ /* ------------------- Instance management ------------------- */
+ /* We manage a linked list of IrNET socket instances */
+ irda_queue_t q; /* Must be first - for hasbin */
+ int magic; /* Paranoia */
+
+ /* --------------------- FileSystem part --------------------- */
+ /* "pppd" interact directly with us on a /dev/ file */
+ struct file * file; /* File descriptor of this instance */
+ /* TTY stuff - to keep "pppd" happy */
+ struct ktermios termios; /* Various tty flags */
+ /* Stuff for the control channel */
+ int event_index; /* Last read in the event log */
+
+ /* ------------------------- PPP part ------------------------- */
+ /* We interface directly to the ppp_generic driver in the kernel */
+ int ppp_open; /* registered with ppp_generic */
+ struct ppp_channel chan; /* Interface to generic ppp layer */
+
+ int mru; /* Max size of PPP payload */
+ u32 xaccm[8]; /* Asynchronous character map (just */
+ u32 raccm; /* to please pppd - dummy) */
+ unsigned int flags; /* PPP flags (compression, ...) */
+ unsigned int rbits; /* Unused receive flags ??? */
+ struct work_struct disconnect_work; /* Process context disconnection */
+ /* ------------------------ IrTTP part ------------------------ */
+ /* We create a pseudo "socket" over the IrDA tranport */
+ unsigned long ttp_open; /* Set when IrTTP is ready */
+ unsigned long ttp_connect; /* Set when IrTTP is connecting */
+ struct tsap_cb * tsap; /* IrTTP instance (the connection) */
+
+ char rname[NICKNAME_MAX_LEN + 1];
+ /* IrDA nickname of destination */
+ __u32 rdaddr; /* Requested peer IrDA address */
+ __u32 rsaddr; /* Requested local IrDA address */
+ __u32 daddr; /* actual peer IrDA address */
+ __u32 saddr; /* my local IrDA address */
+ __u8 dtsap_sel; /* Remote TSAP selector */
+ __u8 stsap_sel; /* Local TSAP selector */
+
+ __u32 max_sdu_size_rx;/* Socket parameters used for IrTTP */
+ __u32 max_sdu_size_tx;
+ __u32 max_data_size;
+ __u8 max_header_size;
+ LOCAL_FLOW tx_flow; /* State of the Tx path in IrTTP */
+
+ /* ------------------- IrLMP and IrIAS part ------------------- */
+ /* Used for IrDA Discovery and socket name resolution */
+ void * ckey; /* IrLMP client handle */
+ __u16 mask; /* Hint bits mask (filter discov.)*/
+ int nslots; /* Number of slots for discovery */
+
+ struct iriap_cb * iriap; /* Used to query remote IAS */
+ int errno; /* status of the IAS query */
+
+ /* -------------------- Discovery log part -------------------- */
+ /* Used by initial discovery on the control channel
+ * and by irnet_discover_daddr_and_lsap_sel() */
+ struct irda_device_info *discoveries; /* Copy of the discovery log */
+ int disco_index; /* Last read in the discovery log */
+ int disco_number; /* Size of the discovery log */
+
+ struct mutex lock;
+
+} irnet_socket;
+
+/*
+ * This is the various event that we will generate on the control channel
+ */
+typedef enum irnet_event
+{
+ IRNET_DISCOVER, /* New IrNET node discovered */
+ IRNET_EXPIRE, /* IrNET node expired */
+ IRNET_CONNECT_TO, /* IrNET socket has connected to other node */
+ IRNET_CONNECT_FROM, /* Other node has connected to IrNET socket */
+ IRNET_REQUEST_FROM, /* Non satisfied connection request */
+ IRNET_NOANSWER_FROM, /* Failed connection request */
+ IRNET_BLOCKED_LINK, /* Link (IrLAP) is blocked for > 3s */
+ IRNET_DISCONNECT_FROM, /* IrNET socket has disconnected */
+ IRNET_DISCONNECT_TO /* Closing IrNET socket */
+} irnet_event;
+
+/*
+ * This is the storage for an event and its arguments
+ */
+typedef struct irnet_log
+{
+ irnet_event event;
+ int unit;
+ __u32 saddr;
+ __u32 daddr;
+ char name[NICKNAME_MAX_LEN + 1]; /* 21 + 1 */
+ __u16_host_order hints; /* Discovery hint bits */
+} irnet_log;
+
+/*
+ * This is the storage for all events and related stuff...
+ */
+typedef struct irnet_ctrl_channel
+{
+ irnet_log log[IRNET_MAX_EVENTS]; /* Event log */
+ int index; /* Current index in log */
+ spinlock_t spinlock; /* Serialize access to the event log */
+ wait_queue_head_t rwait; /* processes blocked on read (or poll) */
+} irnet_ctrl_channel;
+
+/**************************** PROTOTYPES ****************************/
+/*
+ * Global functions of the IrNET module
+ * Note : we list here also functions called from one file to the other.
+ */
+
+/* -------------------------- IRDA PART -------------------------- */
+int irda_irnet_create(irnet_socket *); /* Initialise an IrNET socket */
+int irda_irnet_connect(irnet_socket *); /* Try to connect over IrDA */
+void irda_irnet_destroy(irnet_socket *); /* Teardown an IrNET socket */
+int irda_irnet_init(void); /* Initialise IrDA part of IrNET */
+void irda_irnet_cleanup(void); /* Teardown IrDA part of IrNET */
+
+/**************************** VARIABLES ****************************/
+
+/* Control channel stuff - allocated in irnet_irda.h */
+extern struct irnet_ctrl_channel irnet_events;
+
+#endif /* IRNET_H */
diff --git a/drivers/staging/irda/net/irnet/irnet_irda.c b/drivers/staging/irda/net/irnet/irnet_irda.c
new file mode 100644
index 000000000000..e390bceeb2f8
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/irnet_irda.c
@@ -0,0 +1,1885 @@
+/*
+ * IrNET protocol module : Synchronous PPP over an IrDA socket.
+ *
+ * Jean II - HPL `00 - <jt@hpl.hp.com>
+ *
+ * This file implement the IRDA interface of IrNET.
+ * Basically, we sit on top of IrTTP. We set up IrTTP, IrIAS properly,
+ * and exchange frames with IrTTP.
+ */
+
+#include "irnet_irda.h" /* Private header */
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+/*
+ * PPP disconnect work: we need to make sure we're in
+ * process context when calling ppp_unregister_channel().
+ */
+static void irnet_ppp_disconnect(struct work_struct *work)
+{
+ irnet_socket * self =
+ container_of(work, irnet_socket, disconnect_work);
+
+ if (self == NULL)
+ return;
+ /*
+ * If we were connected, cleanup & close the PPP
+ * channel, which will kill pppd (hangup) and the rest.
+ */
+ if (self->ppp_open && !self->ttp_open && !self->ttp_connect) {
+ ppp_unregister_channel(&self->chan);
+ self->ppp_open = 0;
+ }
+}
+
+/************************* CONTROL CHANNEL *************************/
+/*
+ * When ppp is not active, /dev/irnet act as a control channel.
+ * Writing allow to set up the IrDA destination of the IrNET channel,
+ * and any application may be read events happening on IrNET...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Post an event to the control channel...
+ * Put the event in the log, and then wait all process blocked on read
+ * so they can read the log...
+ */
+static void
+irnet_post_event(irnet_socket * ap,
+ irnet_event event,
+ __u32 saddr,
+ __u32 daddr,
+ char * name,
+ __u16 hints)
+{
+ int index; /* In the log */
+
+ DENTER(CTRL_TRACE, "(ap=0x%p, event=%d, daddr=%08x, name=``%s'')\n",
+ ap, event, daddr, name);
+
+ /* Protect this section via spinlock.
+ * Note : as we are the only event producer, we only need to exclude
+ * ourself when touching the log, which is nice and easy.
+ */
+ spin_lock_bh(&irnet_events.spinlock);
+
+ /* Copy the event in the log */
+ index = irnet_events.index;
+ irnet_events.log[index].event = event;
+ irnet_events.log[index].daddr = daddr;
+ irnet_events.log[index].saddr = saddr;
+ /* Try to copy IrDA nickname */
+ if(name)
+ strcpy(irnet_events.log[index].name, name);
+ else
+ irnet_events.log[index].name[0] = '\0';
+ /* Copy hints */
+ irnet_events.log[index].hints.word = hints;
+ /* Try to get ppp unit number */
+ if((ap != (irnet_socket *) NULL) && (ap->ppp_open))
+ irnet_events.log[index].unit = ppp_unit_number(&ap->chan);
+ else
+ irnet_events.log[index].unit = -1;
+
+ /* Increment the index
+ * Note that we increment the index only after the event is written,
+ * to make sure that the readers don't get garbage... */
+ irnet_events.index = (index + 1) % IRNET_MAX_EVENTS;
+
+ DEBUG(CTRL_INFO, "New event index is %d\n", irnet_events.index);
+
+ /* Spin lock end */
+ spin_unlock_bh(&irnet_events.spinlock);
+
+ /* Now : wake up everybody waiting for events... */
+ wake_up_interruptible_all(&irnet_events.rwait);
+
+ DEXIT(CTRL_TRACE, "\n");
+}
+
+/************************* IRDA SUBROUTINES *************************/
+/*
+ * These are a bunch of subroutines called from other functions
+ * down there, mostly common code or to improve readability...
+ *
+ * Note : we duplicate quite heavily some routines of af_irda.c,
+ * because our input structure (self) is quite different
+ * (struct irnet instead of struct irda_sock), which make sharing
+ * the same code impossible (at least, without templates).
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_open_tsap (self)
+ *
+ * Open local Transport Service Access Point (TSAP)
+ *
+ * Create a IrTTP instance for us and set all the IrTTP callbacks.
+ */
+static inline int
+irnet_open_tsap(irnet_socket * self)
+{
+ notify_t notify; /* Callback structure */
+
+ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self);
+
+ DABORT(self->tsap != NULL, -EBUSY, IRDA_SR_ERROR, "Already busy !\n");
+
+ /* Initialize IrTTP callbacks to be used by the IrDA stack */
+ irda_notify_init(&notify);
+ notify.connect_confirm = irnet_connect_confirm;
+ notify.connect_indication = irnet_connect_indication;
+ notify.disconnect_indication = irnet_disconnect_indication;
+ notify.data_indication = irnet_data_indication;
+ /*notify.udata_indication = NULL;*/
+ notify.flow_indication = irnet_flow_indication;
+ notify.status_indication = irnet_status_indication;
+ notify.instance = self;
+ strlcpy(notify.name, IRNET_NOTIFY_NAME, sizeof(notify.name));
+
+ /* Open an IrTTP instance */
+ self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
+ &notify);
+ DABORT(self->tsap == NULL, -ENOMEM,
+ IRDA_SR_ERROR, "Unable to allocate TSAP !\n");
+
+ /* Remember which TSAP selector we actually got */
+ self->stsap_sel = self->tsap->stsap_sel;
+
+ DEXIT(IRDA_SR_TRACE, " - tsap=0x%p, sel=0x%X\n",
+ self->tsap, self->stsap_sel);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_ias_to_tsap (self, result, value)
+ *
+ * Examine an IAS object and extract TSAP
+ *
+ * We do an IAP query to find the TSAP associated with the IrNET service.
+ * When IrIAP pass us the result of the query, this function look at
+ * the return values to check for failures and extract the TSAP if
+ * possible.
+ * Also deallocate value
+ * The failure is in self->errno
+ * Return TSAP or -1
+ */
+static inline __u8
+irnet_ias_to_tsap(irnet_socket * self,
+ int result,
+ struct ias_value * value)
+{
+ __u8 dtsap_sel = 0; /* TSAP we are looking for */
+
+ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self);
+
+ /* By default, no error */
+ self->errno = 0;
+
+ /* Check if request succeeded */
+ switch(result)
+ {
+ /* Standard errors : service not available */
+ case IAS_CLASS_UNKNOWN:
+ case IAS_ATTRIB_UNKNOWN:
+ DEBUG(IRDA_SR_INFO, "IAS object doesn't exist ! (%d)\n", result);
+ self->errno = -EADDRNOTAVAIL;
+ break;
+
+ /* Other errors, most likely IrDA stack failure */
+ default :
+ DEBUG(IRDA_SR_INFO, "IAS query failed ! (%d)\n", result);
+ self->errno = -EHOSTUNREACH;
+ break;
+
+ /* Success : we got what we wanted */
+ case IAS_SUCCESS:
+ break;
+ }
+
+ /* Check what was returned to us */
+ if(value != NULL)
+ {
+ /* What type of argument have we got ? */
+ switch(value->type)
+ {
+ case IAS_INTEGER:
+ DEBUG(IRDA_SR_INFO, "result=%d\n", value->t.integer);
+ if(value->t.integer != -1)
+ /* Get the remote TSAP selector */
+ dtsap_sel = value->t.integer;
+ else
+ self->errno = -EADDRNOTAVAIL;
+ break;
+ default:
+ self->errno = -EADDRNOTAVAIL;
+ DERROR(IRDA_SR_ERROR, "bad type ! (0x%X)\n", value->type);
+ break;
+ }
+
+ /* Cleanup */
+ irias_delete_value(value);
+ }
+ else /* value == NULL */
+ {
+ /* Nothing returned to us - usually result != SUCCESS */
+ if(!(self->errno))
+ {
+ DERROR(IRDA_SR_ERROR,
+ "IrDA bug : result == SUCCESS && value == NULL\n");
+ self->errno = -EHOSTUNREACH;
+ }
+ }
+ DEXIT(IRDA_SR_TRACE, "\n");
+
+ /* Return the TSAP */
+ return dtsap_sel;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_find_lsap_sel (self)
+ *
+ * Try to lookup LSAP selector in remote LM-IAS
+ *
+ * Basically, we start a IAP query, and then go to sleep. When the query
+ * return, irnet_getvalue_confirm will wake us up, and we can examine the
+ * result of the query...
+ * Note that in some case, the query fail even before we go to sleep,
+ * creating some races...
+ */
+static inline int
+irnet_find_lsap_sel(irnet_socket * self)
+{
+ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self);
+
+ /* This should not happen */
+ DABORT(self->iriap, -EBUSY, IRDA_SR_ERROR, "busy with a previous query.\n");
+
+ /* Create an IAP instance, will be closed in irnet_getvalue_confirm() */
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ irnet_getvalue_confirm);
+
+ /* Treat unexpected signals as disconnect */
+ self->errno = -EHOSTUNREACH;
+
+ /* Query remote LM-IAS */
+ iriap_getvaluebyclass_request(self->iriap, self->rsaddr, self->daddr,
+ IRNET_SERVICE_NAME, IRNET_IAS_VALUE);
+
+ /* The above request is non-blocking.
+ * After a while, IrDA will call us back in irnet_getvalue_confirm()
+ * We will then call irnet_ias_to_tsap() and finish the
+ * connection procedure */
+
+ DEXIT(IRDA_SR_TRACE, "\n");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_connect_tsap (self)
+ *
+ * Initialise the TTP socket and initiate TTP connection
+ *
+ */
+static inline int
+irnet_connect_tsap(irnet_socket * self)
+{
+ int err;
+
+ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self);
+
+ /* Open a local TSAP (an IrTTP instance) */
+ err = irnet_open_tsap(self);
+ if(err != 0)
+ {
+ clear_bit(0, &self->ttp_connect);
+ DERROR(IRDA_SR_ERROR, "connect aborted!\n");
+ return err;
+ }
+
+ /* Connect to remote device */
+ err = irttp_connect_request(self->tsap, self->dtsap_sel,
+ self->rsaddr, self->daddr, NULL,
+ self->max_sdu_size_rx, NULL);
+ if(err != 0)
+ {
+ clear_bit(0, &self->ttp_connect);
+ DERROR(IRDA_SR_ERROR, "connect aborted!\n");
+ return err;
+ }
+
+ /* The above call is non-blocking.
+ * After a while, the IrDA stack will either call us back in
+ * irnet_connect_confirm() or irnet_disconnect_indication()
+ * See you there ;-) */
+
+ DEXIT(IRDA_SR_TRACE, "\n");
+ return err;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_discover_next_daddr (self)
+ *
+ * Query the IrNET TSAP of the next device in the log.
+ *
+ * Used in the TSAP discovery procedure.
+ */
+static inline int
+irnet_discover_next_daddr(irnet_socket * self)
+{
+ /* Close the last instance of IrIAP, and open a new one.
+ * We can't reuse the IrIAP instance in the IrIAP callback */
+ if(self->iriap)
+ {
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+ }
+ /* Create a new IAP instance */
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ irnet_discovervalue_confirm);
+ if(self->iriap == NULL)
+ return -ENOMEM;
+
+ /* Next discovery - before the call to avoid races */
+ self->disco_index++;
+
+ /* Check if we have one more address to try */
+ if(self->disco_index < self->disco_number)
+ {
+ /* Query remote LM-IAS */
+ iriap_getvaluebyclass_request(self->iriap,
+ self->discoveries[self->disco_index].saddr,
+ self->discoveries[self->disco_index].daddr,
+ IRNET_SERVICE_NAME, IRNET_IAS_VALUE);
+ /* The above request is non-blocking.
+ * After a while, IrDA will call us back in irnet_discovervalue_confirm()
+ * We will then call irnet_ias_to_tsap() and come back here again... */
+ return 0;
+ }
+ else
+ return 1;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_discover_daddr_and_lsap_sel (self)
+ *
+ * This try to find a device with the requested service.
+ *
+ * Initiate a TSAP discovery procedure.
+ * It basically look into the discovery log. For each address in the list,
+ * it queries the LM-IAS of the device to find if this device offer
+ * the requested service.
+ * If there is more than one node supporting the service, we complain
+ * to the user (it should move devices around).
+ * If we find one node which have the requested TSAP, we connect to it.
+ *
+ * This function just start the whole procedure. It request the discovery
+ * log and submit the first IAS query.
+ * The bulk of the job is handled in irnet_discovervalue_confirm()
+ *
+ * Note : this procedure fails if there is more than one device in range
+ * on the same dongle, because IrLMP doesn't disconnect the LAP when the
+ * last LSAP is closed. Moreover, we would need to wait the LAP
+ * disconnection...
+ */
+static inline int
+irnet_discover_daddr_and_lsap_sel(irnet_socket * self)
+{
+ int ret;
+
+ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self);
+
+ /* Ask lmp for the current discovery log */
+ self->discoveries = irlmp_get_discoveries(&self->disco_number, self->mask,
+ DISCOVERY_DEFAULT_SLOTS);
+
+ /* Check if the we got some results */
+ if(self->discoveries == NULL)
+ {
+ self->disco_number = -1;
+ clear_bit(0, &self->ttp_connect);
+ DRETURN(-ENETUNREACH, IRDA_SR_INFO, "No Cachelog...\n");
+ }
+ DEBUG(IRDA_SR_INFO, "Got the log (0x%p), size is %d\n",
+ self->discoveries, self->disco_number);
+
+ /* Start with the first discovery */
+ self->disco_index = -1;
+ self->daddr = DEV_ADDR_ANY;
+
+ /* This will fail if the log is empty - this is non-blocking */
+ ret = irnet_discover_next_daddr(self);
+ if(ret)
+ {
+ /* Close IAP */
+ if(self->iriap)
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+
+ /* Cleanup our copy of the discovery log */
+ kfree(self->discoveries);
+ self->discoveries = NULL;
+
+ clear_bit(0, &self->ttp_connect);
+ DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n");
+ }
+
+ /* Follow me in irnet_discovervalue_confirm() */
+
+ DEXIT(IRDA_SR_TRACE, "\n");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_dname_to_daddr (self)
+ *
+ * Convert an IrDA nickname to a valid IrDA address
+ *
+ * It basically look into the discovery log until there is a match.
+ */
+static inline int
+irnet_dname_to_daddr(irnet_socket * self)
+{
+ struct irda_device_info *discoveries; /* Copy of the discovery log */
+ int number; /* Number of nodes in the log */
+ int i;
+
+ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self);
+
+ /* Ask lmp for the current discovery log */
+ discoveries = irlmp_get_discoveries(&number, 0xffff,
+ DISCOVERY_DEFAULT_SLOTS);
+ /* Check if the we got some results */
+ if(discoveries == NULL)
+ DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n");
+
+ /*
+ * Now, check all discovered devices (if any), and connect
+ * client only about the services that the client is
+ * interested in...
+ */
+ for(i = 0; i < number; i++)
+ {
+ /* Does the name match ? */
+ if(!strncmp(discoveries[i].info, self->rname, NICKNAME_MAX_LEN))
+ {
+ /* Yes !!! Get it.. */
+ self->daddr = discoveries[i].daddr;
+ DEBUG(IRDA_SR_INFO, "discovered device ``%s'' at address 0x%08x.\n",
+ self->rname, self->daddr);
+ kfree(discoveries);
+ DEXIT(IRDA_SR_TRACE, "\n");
+ return 0;
+ }
+ }
+ /* No luck ! */
+ DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname);
+ kfree(discoveries);
+ return -EADDRNOTAVAIL;
+}
+
+
+/************************* SOCKET ROUTINES *************************/
+/*
+ * This are the main operations on IrNET sockets, basically to create
+ * and destroy IrNET sockets. These are called from the PPP part...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Create a IrNET instance : just initialise some parameters...
+ */
+int
+irda_irnet_create(irnet_socket * self)
+{
+ DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self);
+
+ self->magic = IRNET_MAGIC; /* Paranoia */
+
+ self->ttp_open = 0; /* Prevent higher layer from accessing IrTTP */
+ self->ttp_connect = 0; /* Not connecting yet */
+ self->rname[0] = '\0'; /* May be set via control channel */
+ self->rdaddr = DEV_ADDR_ANY; /* May be set via control channel */
+ self->rsaddr = DEV_ADDR_ANY; /* May be set via control channel */
+ self->daddr = DEV_ADDR_ANY; /* Until we get connected */
+ self->saddr = DEV_ADDR_ANY; /* Until we get connected */
+ self->max_sdu_size_rx = TTP_SAR_UNBOUND;
+
+ /* Register as a client with IrLMP */
+ self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
+#ifdef DISCOVERY_NOMASK
+ self->mask = 0xffff; /* For W2k compatibility */
+#else /* DISCOVERY_NOMASK */
+ self->mask = irlmp_service_to_hint(S_LAN);
+#endif /* DISCOVERY_NOMASK */
+ self->tx_flow = FLOW_START; /* Flow control from IrTTP */
+
+ INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect);
+
+ DEXIT(IRDA_SOCK_TRACE, "\n");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Connect to the other side :
+ * o convert device name to an address
+ * o find the socket number (dlsap)
+ * o Establish the connection
+ *
+ * Note : We no longer mimic af_irda. The IAS query for finding the TSAP
+ * is done asynchronously, like the TTP connection. This allow us to
+ * call this function from any context (not only process).
+ * The downside is that following what's happening in there is tricky
+ * because it involve various functions all over the place...
+ */
+int
+irda_irnet_connect(irnet_socket * self)
+{
+ int err;
+
+ DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self);
+
+ /* Check if we are already trying to connect.
+ * Because irda_irnet_connect() can be called directly by pppd plus
+ * packet retries in ppp_generic and connect may take time, plus we may
+ * race with irnet_connect_indication(), we need to be careful there... */
+ if(test_and_set_bit(0, &self->ttp_connect))
+ DRETURN(-EBUSY, IRDA_SOCK_INFO, "Already connecting...\n");
+ if((self->iriap != NULL) || (self->tsap != NULL))
+ DERROR(IRDA_SOCK_ERROR, "Socket not cleaned up...\n");
+
+ /* Insert ourselves in the hashbin so that the IrNET server can find us.
+ * Notes : 4th arg is string of 32 char max and must be null terminated
+ * When 4th arg is used (string), 3rd arg isn't (int)
+ * Can't re-insert (MUST remove first) so check for that... */
+ if((irnet_server.running) && (self->q.q_next == NULL))
+ {
+ spin_lock_bh(&irnet_server.spinlock);
+ hashbin_insert(irnet_server.list, (irda_queue_t *) self, 0, self->rname);
+ spin_unlock_bh(&irnet_server.spinlock);
+ DEBUG(IRDA_SOCK_INFO, "Inserted ``%s'' in hashbin...\n", self->rname);
+ }
+
+ /* If we don't have anything (no address, no name) */
+ if((self->rdaddr == DEV_ADDR_ANY) && (self->rname[0] == '\0'))
+ {
+ /* Try to find a suitable address */
+ if((err = irnet_discover_daddr_and_lsap_sel(self)) != 0)
+ DRETURN(err, IRDA_SOCK_INFO, "auto-connect failed!\n");
+ /* In most cases, the call above is non-blocking */
+ }
+ else
+ {
+ /* If we have only the name (no address), try to get an address */
+ if(self->rdaddr == DEV_ADDR_ANY)
+ {
+ if((err = irnet_dname_to_daddr(self)) != 0)
+ DRETURN(err, IRDA_SOCK_INFO, "name connect failed!\n");
+ }
+ else
+ /* Use the requested destination address */
+ self->daddr = self->rdaddr;
+
+ /* Query remote LM-IAS to find LSAP selector */
+ irnet_find_lsap_sel(self);
+ /* The above call is non blocking */
+ }
+
+ /* At this point, we are waiting for the IrDA stack to call us back,
+ * or we have already failed.
+ * We will finish the connection procedure in irnet_connect_tsap().
+ */
+ DEXIT(IRDA_SOCK_TRACE, "\n");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_irnet_destroy(self)
+ *
+ * Destroy irnet instance
+ *
+ * Note : this need to be called from a process context.
+ */
+void
+irda_irnet_destroy(irnet_socket * self)
+{
+ DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self);
+ if(self == NULL)
+ return;
+
+ /* Remove ourselves from hashbin (if we are queued in hashbin)
+ * Note : `irnet_server.running' protect us from calls in hashbin_delete() */
+ if((irnet_server.running) && (self->q.q_next != NULL))
+ {
+ struct irnet_socket * entry;
+ DEBUG(IRDA_SOCK_INFO, "Removing from hash..\n");
+ spin_lock_bh(&irnet_server.spinlock);
+ entry = hashbin_remove_this(irnet_server.list, (irda_queue_t *) self);
+ self->q.q_next = NULL;
+ spin_unlock_bh(&irnet_server.spinlock);
+ DASSERT(entry == self, , IRDA_SOCK_ERROR, "Can't remove from hash.\n");
+ }
+
+ /* If we were connected, post a message */
+ if(test_bit(0, &self->ttp_open))
+ {
+ /* Note : as the disconnect comes from ppp_generic, the unit number
+ * doesn't exist anymore when we post the event, so we need to pass
+ * NULL as the first arg... */
+ irnet_post_event(NULL, IRNET_DISCONNECT_TO,
+ self->saddr, self->daddr, self->rname, 0);
+ }
+
+ /* Prevent various IrDA callbacks from messing up things
+ * Need to be first */
+ clear_bit(0, &self->ttp_connect);
+
+ /* Prevent higher layer from accessing IrTTP */
+ clear_bit(0, &self->ttp_open);
+
+ /* Unregister with IrLMP */
+ irlmp_unregister_client(self->ckey);
+
+ /* Unregister with LM-IAS */
+ if(self->iriap)
+ {
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+ }
+
+ /* Cleanup eventual discoveries from connection attempt or control channel */
+ if(self->discoveries != NULL)
+ {
+ /* Cleanup our copy of the discovery log */
+ kfree(self->discoveries);
+ self->discoveries = NULL;
+ }
+
+ /* Close our IrTTP connection */
+ if(self->tsap)
+ {
+ DEBUG(IRDA_SOCK_INFO, "Closing our TTP connection.\n");
+ irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+ self->stsap_sel = 0;
+
+ DEXIT(IRDA_SOCK_TRACE, "\n");
+}
+
+
+/************************** SERVER SOCKET **************************/
+/*
+ * The IrNET service is composed of one server socket and a variable
+ * number of regular IrNET sockets. The server socket is supposed to
+ * handle incoming connections and redirect them to one IrNET sockets.
+ * It's a superset of the regular IrNET socket, but has a very distinct
+ * behaviour...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_daddr_to_dname (self)
+ *
+ * Convert an IrDA address to a IrDA nickname
+ *
+ * It basically look into the discovery log until there is a match.
+ */
+static inline int
+irnet_daddr_to_dname(irnet_socket * self)
+{
+ struct irda_device_info *discoveries; /* Copy of the discovery log */
+ int number; /* Number of nodes in the log */
+ int i;
+
+ DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self);
+
+ /* Ask lmp for the current discovery log */
+ discoveries = irlmp_get_discoveries(&number, 0xffff,
+ DISCOVERY_DEFAULT_SLOTS);
+ /* Check if the we got some results */
+ if (discoveries == NULL)
+ DRETURN(-ENETUNREACH, IRDA_SERV_INFO, "Cachelog empty...\n");
+
+ /* Now, check all discovered devices (if any) */
+ for(i = 0; i < number; i++)
+ {
+ /* Does the name match ? */
+ if(discoveries[i].daddr == self->daddr)
+ {
+ /* Yes !!! Get it.. */
+ strlcpy(self->rname, discoveries[i].info, sizeof(self->rname));
+ self->rname[sizeof(self->rname) - 1] = '\0';
+ DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n",
+ self->daddr, self->rname);
+ kfree(discoveries);
+ DEXIT(IRDA_SERV_TRACE, "\n");
+ return 0;
+ }
+ }
+ /* No luck ! */
+ DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr);
+ kfree(discoveries);
+ return -EADDRNOTAVAIL;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_find_socket (self)
+ *
+ * Find the correct IrNET socket
+ *
+ * Look into the list of IrNET sockets and finds one with the right
+ * properties...
+ */
+static inline irnet_socket *
+irnet_find_socket(irnet_socket * self)
+{
+ irnet_socket * new = (irnet_socket *) NULL;
+ int err;
+
+ DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self);
+
+ /* Get the addresses of the requester */
+ self->daddr = irttp_get_daddr(self->tsap);
+ self->saddr = irttp_get_saddr(self->tsap);
+
+ /* Try to get the IrDA nickname of the requester */
+ err = irnet_daddr_to_dname(self);
+
+ /* Protect access to the instance list */
+ spin_lock_bh(&irnet_server.spinlock);
+
+ /* So now, try to get an socket having specifically
+ * requested that nickname */
+ if(err == 0)
+ {
+ new = (irnet_socket *) hashbin_find(irnet_server.list,
+ 0, self->rname);
+ if(new)
+ DEBUG(IRDA_SERV_INFO, "Socket 0x%p matches rname ``%s''.\n",
+ new, new->rname);
+ }
+
+ /* If no name matches, try to find an socket by the destination address */
+ /* It can be either the requested destination address (set via the
+ * control channel), or the current destination address if the
+ * socket is in the middle of a connection request */
+ if(new == (irnet_socket *) NULL)
+ {
+ new = (irnet_socket *) hashbin_get_first(irnet_server.list);
+ while(new !=(irnet_socket *) NULL)
+ {
+ /* Does it have the same address ? */
+ if((new->rdaddr == self->daddr) || (new->daddr == self->daddr))
+ {
+ /* Yes !!! Get it.. */
+ DEBUG(IRDA_SERV_INFO, "Socket 0x%p matches daddr %#08x.\n",
+ new, self->daddr);
+ break;
+ }
+ new = (irnet_socket *) hashbin_get_next(irnet_server.list);
+ }
+ }
+
+ /* If we don't have any socket, get the first unconnected socket */
+ if(new == (irnet_socket *) NULL)
+ {
+ new = (irnet_socket *) hashbin_get_first(irnet_server.list);
+ while(new !=(irnet_socket *) NULL)
+ {
+ /* Is it available ? */
+ if(!(test_bit(0, &new->ttp_open)) && (new->rdaddr == DEV_ADDR_ANY) &&
+ (new->rname[0] == '\0') && (new->ppp_open))
+ {
+ /* Yes !!! Get it.. */
+ DEBUG(IRDA_SERV_INFO, "Socket 0x%p is free.\n",
+ new);
+ break;
+ }
+ new = (irnet_socket *) hashbin_get_next(irnet_server.list);
+ }
+ }
+
+ /* Spin lock end */
+ spin_unlock_bh(&irnet_server.spinlock);
+
+ DEXIT(IRDA_SERV_TRACE, " - new = 0x%p\n", new);
+ return new;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_connect_socket (self)
+ *
+ * Connect an incoming connection to the socket
+ *
+ */
+static inline int
+irnet_connect_socket(irnet_socket * server,
+ irnet_socket * new,
+ struct qos_info * qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size)
+{
+ DENTER(IRDA_SERV_TRACE, "(server=0x%p, new=0x%p)\n",
+ server, new);
+
+ /* Now attach up the new socket */
+ new->tsap = irttp_dup(server->tsap, new);
+ DABORT(new->tsap == NULL, -1, IRDA_SERV_ERROR, "dup failed!\n");
+
+ /* Set up all the relevant parameters on the new socket */
+ new->stsap_sel = new->tsap->stsap_sel;
+ new->dtsap_sel = new->tsap->dtsap_sel;
+ new->saddr = irttp_get_saddr(new->tsap);
+ new->daddr = irttp_get_daddr(new->tsap);
+
+ new->max_header_size = max_header_size;
+ new->max_sdu_size_tx = max_sdu_size;
+ new->max_data_size = max_sdu_size;
+#ifdef STREAM_COMPAT
+ /* If we want to receive "stream sockets" */
+ if(max_sdu_size == 0)
+ new->max_data_size = irttp_get_max_seg_size(new->tsap);
+#endif /* STREAM_COMPAT */
+
+ /* Clean up the original one to keep it in listen state */
+ irttp_listen(server->tsap);
+
+ /* Send a connection response on the new socket */
+ irttp_connect_response(new->tsap, new->max_sdu_size_rx, NULL);
+
+ /* Allow PPP to send its junk over the new socket... */
+ set_bit(0, &new->ttp_open);
+
+ /* Not connecting anymore, and clean up last possible remains
+ * of connection attempts on the socket */
+ clear_bit(0, &new->ttp_connect);
+ if(new->iriap)
+ {
+ iriap_close(new->iriap);
+ new->iriap = NULL;
+ }
+ if(new->discoveries != NULL)
+ {
+ kfree(new->discoveries);
+ new->discoveries = NULL;
+ }
+
+#ifdef CONNECT_INDIC_KICK
+ /* As currently we don't block packets in ppp_irnet_send() while passive,
+ * this is not really needed...
+ * Also, not doing it give IrDA a chance to finish the setup properly
+ * before being swamped with packets... */
+ ppp_output_wakeup(&new->chan);
+#endif /* CONNECT_INDIC_KICK */
+
+ /* Notify the control channel */
+ irnet_post_event(new, IRNET_CONNECT_FROM,
+ new->saddr, new->daddr, server->rname, 0);
+
+ DEXIT(IRDA_SERV_TRACE, "\n");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_disconnect_server (self)
+ *
+ * Cleanup the server socket when the incoming connection abort
+ *
+ */
+static inline void
+irnet_disconnect_server(irnet_socket * self,
+ struct sk_buff *skb)
+{
+ DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self);
+
+ /* Put the received packet in the black hole */
+ kfree_skb(skb);
+
+#ifdef FAIL_SEND_DISCONNECT
+ /* Tell the other party we don't want to be connected */
+ /* Hum... Is it the right thing to do ? And do we need to send
+ * a connect response before ? It looks ok without this... */
+ irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
+#endif /* FAIL_SEND_DISCONNECT */
+
+ /* Notify the control channel (see irnet_find_socket()) */
+ irnet_post_event(NULL, IRNET_REQUEST_FROM,
+ self->saddr, self->daddr, self->rname, 0);
+
+ /* Clean up the server to keep it in listen state */
+ irttp_listen(self->tsap);
+
+ DEXIT(IRDA_SERV_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_setup_server (self)
+ *
+ * Create a IrTTP server and set it up...
+ *
+ * Register the IrLAN hint bit, create a IrTTP instance for us,
+ * set all the IrTTP callbacks and create an IrIAS entry...
+ */
+static inline int
+irnet_setup_server(void)
+{
+ __u16 hints;
+
+ DENTER(IRDA_SERV_TRACE, "()\n");
+
+ /* Initialise the regular socket part of the server */
+ irda_irnet_create(&irnet_server.s);
+
+ /* Open a local TSAP (an IrTTP instance) for the server */
+ irnet_open_tsap(&irnet_server.s);
+
+ /* PPP part setup */
+ irnet_server.s.ppp_open = 0;
+ irnet_server.s.chan.private = NULL;
+ irnet_server.s.file = NULL;
+
+ /* Get the hint bit corresponding to IrLAN */
+ /* Note : we overload the IrLAN hint bit. As it is only a "hint", and as
+ * we provide roughly the same functionality as IrLAN, this is ok.
+ * In fact, the situation is similar as JetSend overloading the Obex hint
+ */
+ hints = irlmp_service_to_hint(S_LAN);
+
+#ifdef ADVERTISE_HINT
+ /* Register with IrLMP as a service (advertise our hint bit) */
+ irnet_server.skey = irlmp_register_service(hints);
+#endif /* ADVERTISE_HINT */
+
+ /* Register with LM-IAS (so that people can connect to us) */
+ irnet_server.ias_obj = irias_new_object(IRNET_SERVICE_NAME, jiffies);
+ irias_add_integer_attrib(irnet_server.ias_obj, IRNET_IAS_VALUE,
+ irnet_server.s.stsap_sel, IAS_KERNEL_ATTR);
+ irias_insert_object(irnet_server.ias_obj);
+
+#ifdef DISCOVERY_EVENTS
+ /* Tell IrLMP we want to be notified of newly discovered nodes */
+ irlmp_update_client(irnet_server.s.ckey, hints,
+ irnet_discovery_indication, irnet_expiry_indication,
+ (void *) &irnet_server.s);
+#endif
+
+ DEXIT(IRDA_SERV_TRACE, " - self=0x%p\n", &irnet_server.s);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_destroy_server (self)
+ *
+ * Destroy the IrTTP server...
+ *
+ * Reverse of the previous function...
+ */
+static inline void
+irnet_destroy_server(void)
+{
+ DENTER(IRDA_SERV_TRACE, "()\n");
+
+#ifdef ADVERTISE_HINT
+ /* Unregister with IrLMP */
+ irlmp_unregister_service(irnet_server.skey);
+#endif /* ADVERTISE_HINT */
+
+ /* Unregister with LM-IAS */
+ if(irnet_server.ias_obj)
+ irias_delete_object(irnet_server.ias_obj);
+
+ /* Cleanup the socket part */
+ irda_irnet_destroy(&irnet_server.s);
+
+ DEXIT(IRDA_SERV_TRACE, "\n");
+}
+
+
+/************************ IRDA-TTP CALLBACKS ************************/
+/*
+ * When we create a IrTTP instance, we pass to it a set of callbacks
+ * that IrTTP will call in case of various events.
+ * We take care of those events here.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_data_indication (instance, sap, skb)
+ *
+ * Received some data from TinyTP. Just queue it on the receive queue
+ *
+ */
+static int
+irnet_data_indication(void * instance,
+ void * sap,
+ struct sk_buff *skb)
+{
+ irnet_socket * ap = (irnet_socket *) instance;
+ unsigned char * p;
+ int code = 0;
+
+ DENTER(IRDA_TCB_TRACE, "(self/ap=0x%p, skb=0x%p)\n",
+ ap, skb);
+ DASSERT(skb != NULL, 0, IRDA_CB_ERROR, "skb is NULL !!!\n");
+
+ /* Check is ppp is ready to receive our packet */
+ if(!ap->ppp_open)
+ {
+ DERROR(IRDA_CB_ERROR, "PPP not ready, dropping packet...\n");
+ /* When we return error, TTP will need to requeue the skb and
+ * will stop the sender. IrTTP will stall until we send it a
+ * flow control request... */
+ return -ENOMEM;
+ }
+
+ /* strip address/control field if present */
+ p = skb->data;
+ if((p[0] == PPP_ALLSTATIONS) && (p[1] == PPP_UI))
+ {
+ /* chop off address/control */
+ if(skb->len < 3)
+ goto err_exit;
+ p = skb_pull(skb, 2);
+ }
+
+ /* decompress protocol field if compressed */
+ if(p[0] & 1)
+ {
+ /* protocol is compressed */
+ *(u8 *)skb_push(skb, 1) = 0;
+ }
+ else
+ if(skb->len < 2)
+ goto err_exit;
+
+ /* pass to generic ppp layer */
+ /* Note : how do I know if ppp can accept or not the packet ? This is
+ * essential if I want to manage flow control smoothly... */
+ ppp_input(&ap->chan, skb);
+
+ DEXIT(IRDA_TCB_TRACE, "\n");
+ return 0;
+
+ err_exit:
+ DERROR(IRDA_CB_ERROR, "Packet too small, dropping...\n");
+ kfree_skb(skb);
+ ppp_input_error(&ap->chan, code);
+ return 0; /* Don't return an error code, only for flow control... */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_disconnect_indication (instance, sap, reason, skb)
+ *
+ * Connection has been closed. Chech reason to find out why
+ *
+ * Note : there are many cases where we come here :
+ * o attempted to connect, timeout
+ * o connected, link is broken, LAP has timeout
+ * o connected, other side close the link
+ * o connection request on the server not handled
+ */
+static void
+irnet_disconnect_indication(void * instance,
+ void * sap,
+ LM_REASON reason,
+ struct sk_buff *skb)
+{
+ irnet_socket * self = (irnet_socket *) instance;
+ int test_open;
+ int test_connect;
+
+ DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self);
+ DASSERT(self != NULL, , IRDA_CB_ERROR, "Self is NULL !!!\n");
+
+ /* Don't care about it, but let's not leak it */
+ if(skb)
+ dev_kfree_skb(skb);
+
+ /* Prevent higher layer from accessing IrTTP */
+ test_open = test_and_clear_bit(0, &self->ttp_open);
+ /* Not connecting anymore...
+ * (note : TSAP is open, so IAP callbacks are no longer pending...) */
+ test_connect = test_and_clear_bit(0, &self->ttp_connect);
+
+ /* If both self->ttp_open and self->ttp_connect are NULL, it mean that we
+ * have a race condition with irda_irnet_destroy() or
+ * irnet_connect_indication(), so don't mess up tsap...
+ */
+ if(!(test_open || test_connect))
+ {
+ DERROR(IRDA_CB_ERROR, "Race condition detected...\n");
+ return;
+ }
+
+ /* If we were active, notify the control channel */
+ if(test_open)
+ irnet_post_event(self, IRNET_DISCONNECT_FROM,
+ self->saddr, self->daddr, self->rname, 0);
+ else
+ /* If we were trying to connect, notify the control channel */
+ if((self->tsap) && (self != &irnet_server.s))
+ irnet_post_event(self, IRNET_NOANSWER_FROM,
+ self->saddr, self->daddr, self->rname, 0);
+
+ /* Close our IrTTP connection, cleanup tsap */
+ if((self->tsap) && (self != &irnet_server.s))
+ {
+ DEBUG(IRDA_CB_INFO, "Closing our TTP connection.\n");
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+ /* Cleanup the socket in case we want to reconnect in ppp_output_wakeup() */
+ self->stsap_sel = 0;
+ self->daddr = DEV_ADDR_ANY;
+ self->tx_flow = FLOW_START;
+
+ /* Deal with the ppp instance if it's still alive */
+ if(self->ppp_open)
+ {
+ if(test_open)
+ {
+ /* ppp_unregister_channel() wants a user context. */
+ schedule_work(&self->disconnect_work);
+ }
+ else
+ {
+ /* If we were trying to connect, flush (drain) ppp_generic
+ * Tx queue (most often we have blocked it), which will
+ * trigger an other attempt to connect. If we are passive,
+ * this will empty the Tx queue after last try. */
+ ppp_output_wakeup(&self->chan);
+ }
+ }
+
+ DEXIT(IRDA_TCB_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_connect_confirm (instance, sap, qos, max_sdu_size, skb)
+ *
+ * Connections has been confirmed by the remote device
+ *
+ */
+static void
+irnet_connect_confirm(void * instance,
+ void * sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ irnet_socket * self = (irnet_socket *) instance;
+
+ DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self);
+
+ /* Check if socket is closing down (via irda_irnet_destroy()) */
+ if(! test_bit(0, &self->ttp_connect))
+ {
+ DERROR(IRDA_CB_ERROR, "Socket no longer connecting. Ouch !\n");
+ return;
+ }
+
+ /* How much header space do we need to reserve */
+ self->max_header_size = max_header_size;
+
+ /* IrTTP max SDU size in transmit direction */
+ self->max_sdu_size_tx = max_sdu_size;
+ self->max_data_size = max_sdu_size;
+#ifdef STREAM_COMPAT
+ if(max_sdu_size == 0)
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+#endif /* STREAM_COMPAT */
+
+ /* At this point, IrLMP has assigned our source address */
+ self->saddr = irttp_get_saddr(self->tsap);
+
+ /* Allow higher layer to access IrTTP */
+ set_bit(0, &self->ttp_open);
+ clear_bit(0, &self->ttp_connect); /* Not racy, IrDA traffic is serial */
+ /* Give a kick in the ass of ppp_generic so that he sends us some data */
+ ppp_output_wakeup(&self->chan);
+
+ /* Check size of received packet */
+ if(skb->len > 0)
+ {
+#ifdef PASS_CONNECT_PACKETS
+ DEBUG(IRDA_CB_INFO, "Passing connect packet to PPP.\n");
+ /* Try to pass it to PPP */
+ irnet_data_indication(instance, sap, skb);
+#else /* PASS_CONNECT_PACKETS */
+ DERROR(IRDA_CB_ERROR, "Dropping non empty packet.\n");
+ kfree_skb(skb); /* Note : will be optimised with other kfree... */
+#endif /* PASS_CONNECT_PACKETS */
+ }
+ else
+ kfree_skb(skb);
+
+ /* Notify the control channel */
+ irnet_post_event(self, IRNET_CONNECT_TO,
+ self->saddr, self->daddr, self->rname, 0);
+
+ DEXIT(IRDA_TCB_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_flow_indication (instance, sap, flow)
+ *
+ * Used by TinyTP to tell us if it can accept more data or not
+ *
+ */
+static void
+irnet_flow_indication(void * instance,
+ void * sap,
+ LOCAL_FLOW flow)
+{
+ irnet_socket * self = (irnet_socket *) instance;
+ LOCAL_FLOW oldflow = self->tx_flow;
+
+ DENTER(IRDA_TCB_TRACE, "(self=0x%p, flow=%d)\n", self, flow);
+
+ /* Update our state */
+ self->tx_flow = flow;
+
+ /* Check what IrTTP want us to do... */
+ switch(flow)
+ {
+ case FLOW_START:
+ DEBUG(IRDA_CB_INFO, "IrTTP wants us to start again\n");
+ /* Check if we really need to wake up PPP */
+ if(oldflow == FLOW_STOP)
+ ppp_output_wakeup(&self->chan);
+ else
+ DEBUG(IRDA_CB_INFO, "But we were already transmitting !!!\n");
+ break;
+ case FLOW_STOP:
+ DEBUG(IRDA_CB_INFO, "IrTTP wants us to slow down\n");
+ break;
+ default:
+ DEBUG(IRDA_CB_INFO, "Unknown flow command!\n");
+ break;
+ }
+
+ DEXIT(IRDA_TCB_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_status_indication (instance, sap, reason, skb)
+ *
+ * Link (IrLAP) status report.
+ *
+ */
+static void
+irnet_status_indication(void * instance,
+ LINK_STATUS link,
+ LOCK_STATUS lock)
+{
+ irnet_socket * self = (irnet_socket *) instance;
+
+ DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self);
+ DASSERT(self != NULL, , IRDA_CB_ERROR, "Self is NULL !!!\n");
+
+ /* We can only get this event if we are connected */
+ switch(link)
+ {
+ case STATUS_NO_ACTIVITY:
+ irnet_post_event(self, IRNET_BLOCKED_LINK,
+ self->saddr, self->daddr, self->rname, 0);
+ break;
+ default:
+ DEBUG(IRDA_CB_INFO, "Unknown status...\n");
+ }
+
+ DEXIT(IRDA_TCB_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_connect_indication(instance, sap, qos, max_sdu_size, userdata)
+ *
+ * Incoming connection
+ *
+ * In theory, this function is called only on the server socket.
+ * Some other node is attempting to connect to the IrNET service, and has
+ * sent a connection request on our server socket.
+ * We just redirect the connection to the relevant IrNET socket.
+ *
+ * Note : we also make sure that between 2 irnet nodes, there can
+ * exist only one irnet connection.
+ */
+static void
+irnet_connect_indication(void * instance,
+ void * sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ irnet_socket * server = &irnet_server.s;
+ irnet_socket * new = (irnet_socket *) NULL;
+
+ DENTER(IRDA_TCB_TRACE, "(server=0x%p)\n", server);
+ DASSERT(instance == &irnet_server, , IRDA_CB_ERROR,
+ "Invalid instance (0x%p) !!!\n", instance);
+ DASSERT(sap == irnet_server.s.tsap, , IRDA_CB_ERROR, "Invalid sap !!!\n");
+
+ /* Try to find the most appropriate IrNET socket */
+ new = irnet_find_socket(server);
+
+ /* After all this hard work, do we have an socket ? */
+ if(new == (irnet_socket *) NULL)
+ {
+ DEXIT(IRDA_CB_INFO, ": No socket waiting for this connection.\n");
+ irnet_disconnect_server(server, skb);
+ return;
+ }
+
+ /* Is the socket already busy ? */
+ if(test_bit(0, &new->ttp_open))
+ {
+ DEXIT(IRDA_CB_INFO, ": Socket already connected.\n");
+ irnet_disconnect_server(server, skb);
+ return;
+ }
+
+ /* The following code is a bit tricky, so need comments ;-)
+ */
+ /* If ttp_connect is set, the socket is trying to connect to the other
+ * end and may have sent a IrTTP connection request and is waiting for
+ * a connection response (that may never come).
+ * Now, the pain is that the socket may have opened a tsap and is
+ * waiting on it, while the other end is trying to connect to it on
+ * another tsap.
+ * Because IrNET can be peer to peer, we need to workaround this.
+ * Furthermore, the way the irnetd script is implemented, the
+ * target will create a second IrNET connection back to the
+ * originator and expect the originator to bind this new connection
+ * to the original PPPD instance.
+ * And of course, if we don't use irnetd, we can have a race when
+ * both side try to connect simultaneously, which could leave both
+ * connections half closed (yuck).
+ * Conclusions :
+ * 1) The "originator" must accept the new connection and get rid
+ * of the old one so that irnetd works
+ * 2) One side must deny the new connection to avoid races,
+ * but both side must agree on which side it is...
+ * Most often, the originator is primary at the LAP layer.
+ * Jean II
+ */
+ /* Now, let's look at the way I wrote the test...
+ * We need to clear up the ttp_connect flag atomically to prevent
+ * irnet_disconnect_indication() to mess up the tsap we are going to close.
+ * We want to clear the ttp_connect flag only if we close the tsap,
+ * otherwise we will never close it, so we need to check for primary
+ * *before* doing the test on the flag.
+ * And of course, ALLOW_SIMULT_CONNECT can disable this entirely...
+ * Jean II
+ */
+
+ /* Socket already connecting ? On primary ? */
+ if(0
+#ifdef ALLOW_SIMULT_CONNECT
+ || ((irttp_is_primary(server->tsap) == 1) && /* primary */
+ (test_and_clear_bit(0, &new->ttp_connect)))
+#endif /* ALLOW_SIMULT_CONNECT */
+ )
+ {
+ DERROR(IRDA_CB_ERROR, "Socket already connecting, but going to reuse it !\n");
+
+ /* Cleanup the old TSAP if necessary - IrIAP will be cleaned up later */
+ if(new->tsap != NULL)
+ {
+ /* Close the old connection the new socket was attempting,
+ * so that we can hook it up to the new connection.
+ * It's now safe to do it... */
+ irttp_close_tsap(new->tsap);
+ new->tsap = NULL;
+ }
+ }
+ else
+ {
+ /* Three options :
+ * 1) socket was not connecting or connected : ttp_connect should be 0.
+ * 2) we don't want to connect the socket because we are secondary or
+ * ALLOW_SIMULT_CONNECT is undefined. ttp_connect should be 1.
+ * 3) we are half way in irnet_disconnect_indication(), and it's a
+ * nice race condition... Fortunately, we can detect that by checking
+ * if tsap is still alive. On the other hand, we can't be in
+ * irda_irnet_destroy() otherwise we would not have found this
+ * socket in the hashbin.
+ * Jean II */
+ if((test_bit(0, &new->ttp_connect)) || (new->tsap != NULL))
+ {
+ /* Don't mess this socket, somebody else in in charge... */
+ DERROR(IRDA_CB_ERROR, "Race condition detected, socket in use, abort connect...\n");
+ irnet_disconnect_server(server, skb);
+ return;
+ }
+ }
+
+ /* So : at this point, we have a socket, and it is idle. Good ! */
+ irnet_connect_socket(server, new, qos, max_sdu_size, max_header_size);
+
+ /* Check size of received packet */
+ if(skb->len > 0)
+ {
+#ifdef PASS_CONNECT_PACKETS
+ DEBUG(IRDA_CB_INFO, "Passing connect packet to PPP.\n");
+ /* Try to pass it to PPP */
+ irnet_data_indication(new, new->tsap, skb);
+#else /* PASS_CONNECT_PACKETS */
+ DERROR(IRDA_CB_ERROR, "Dropping non empty packet.\n");
+ kfree_skb(skb); /* Note : will be optimised with other kfree... */
+#endif /* PASS_CONNECT_PACKETS */
+ }
+ else
+ kfree_skb(skb);
+
+ DEXIT(IRDA_TCB_TRACE, "\n");
+}
+
+
+/********************** IRDA-IAS/LMP CALLBACKS **********************/
+/*
+ * These are the callbacks called by other layers of the IrDA stack,
+ * mainly LMP for discovery and IAS for name queries.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_getvalue_confirm (result, obj_id, value, priv)
+ *
+ * Got answer from remote LM-IAS, just connect
+ *
+ * This is the reply to a IAS query we were doing to find the TSAP of
+ * the device we want to connect to.
+ * If we have found a valid TSAP, just initiate the TTP connection
+ * on this TSAP.
+ */
+static void
+irnet_getvalue_confirm(int result,
+ __u16 obj_id,
+ struct ias_value *value,
+ void * priv)
+{
+ irnet_socket * self = (irnet_socket *) priv;
+
+ DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self);
+ DASSERT(self != NULL, , IRDA_OCB_ERROR, "Self is NULL !!!\n");
+
+ /* Check if already connected (via irnet_connect_socket())
+ * or socket is closing down (via irda_irnet_destroy()) */
+ if(! test_bit(0, &self->ttp_connect))
+ {
+ DERROR(IRDA_OCB_ERROR, "Socket no longer connecting. Ouch !\n");
+ return;
+ }
+
+ /* We probably don't need to make any more queries */
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+
+ /* Post process the IAS reply */
+ self->dtsap_sel = irnet_ias_to_tsap(self, result, value);
+
+ /* If error, just go out */
+ if(self->errno)
+ {
+ clear_bit(0, &self->ttp_connect);
+ DERROR(IRDA_OCB_ERROR, "IAS connect failed ! (0x%X)\n", self->errno);
+ return;
+ }
+
+ DEBUG(IRDA_OCB_INFO, "daddr = %08x, lsap = %d, starting IrTTP connection\n",
+ self->daddr, self->dtsap_sel);
+
+ /* Start up TTP - non blocking */
+ irnet_connect_tsap(self);
+
+ DEXIT(IRDA_OCB_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_discovervalue_confirm (result, obj_id, value, priv)
+ *
+ * Handle the TSAP discovery procedure state machine.
+ * Got answer from remote LM-IAS, try next device
+ *
+ * We are doing a TSAP discovery procedure, and we got an answer to
+ * a IAS query we were doing to find the TSAP on one of the address
+ * in the discovery log.
+ *
+ * If we have found a valid TSAP for the first time, save it. If it's
+ * not the first time we found one, complain.
+ *
+ * If we have more addresses in the log, just initiate a new query.
+ * Note that those query may fail (see irnet_discover_daddr_and_lsap_sel())
+ *
+ * Otherwise, wrap up the procedure (cleanup), check if we have found
+ * any device and connect to it.
+ */
+static void
+irnet_discovervalue_confirm(int result,
+ __u16 obj_id,
+ struct ias_value *value,
+ void * priv)
+{
+ irnet_socket * self = (irnet_socket *) priv;
+ __u8 dtsap_sel; /* TSAP we are looking for */
+
+ DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self);
+ DASSERT(self != NULL, , IRDA_OCB_ERROR, "Self is NULL !!!\n");
+
+ /* Check if already connected (via irnet_connect_socket())
+ * or socket is closing down (via irda_irnet_destroy()) */
+ if(! test_bit(0, &self->ttp_connect))
+ {
+ DERROR(IRDA_OCB_ERROR, "Socket no longer connecting. Ouch !\n");
+ return;
+ }
+
+ /* Post process the IAS reply */
+ dtsap_sel = irnet_ias_to_tsap(self, result, value);
+
+ /* Have we got something ? */
+ if(self->errno == 0)
+ {
+ /* We found the requested service */
+ if(self->daddr != DEV_ADDR_ANY)
+ {
+ DERROR(IRDA_OCB_ERROR, "More than one device in range supports IrNET...\n");
+ }
+ else
+ {
+ /* First time we found that one, save it ! */
+ self->daddr = self->discoveries[self->disco_index].daddr;
+ self->dtsap_sel = dtsap_sel;
+ }
+ }
+
+ /* If no failure */
+ if((self->errno == -EADDRNOTAVAIL) || (self->errno == 0))
+ {
+ int ret;
+
+ /* Search the next node */
+ ret = irnet_discover_next_daddr(self);
+ if(!ret)
+ {
+ /* In this case, the above request was non-blocking.
+ * We will return here after a while... */
+ return;
+ }
+ /* In this case, we have processed the last discovery item */
+ }
+
+ /* No more queries to be done (failure or last one) */
+
+ /* We probably don't need to make any more queries */
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+
+ /* No more items : remove the log and signal termination */
+ DEBUG(IRDA_OCB_INFO, "Cleaning up log (0x%p)\n",
+ self->discoveries);
+ if(self->discoveries != NULL)
+ {
+ /* Cleanup our copy of the discovery log */
+ kfree(self->discoveries);
+ self->discoveries = NULL;
+ }
+ self->disco_number = -1;
+
+ /* Check out what we found */
+ if(self->daddr == DEV_ADDR_ANY)
+ {
+ self->daddr = DEV_ADDR_ANY;
+ clear_bit(0, &self->ttp_connect);
+ DEXIT(IRDA_OCB_TRACE, ": cannot discover IrNET in any device !!!\n");
+ return;
+ }
+
+ /* We have a valid address - just connect */
+
+ DEBUG(IRDA_OCB_INFO, "daddr = %08x, lsap = %d, starting IrTTP connection\n",
+ self->daddr, self->dtsap_sel);
+
+ /* Start up TTP - non blocking */
+ irnet_connect_tsap(self);
+
+ DEXIT(IRDA_OCB_TRACE, "\n");
+}
+
+#ifdef DISCOVERY_EVENTS
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_discovery_indication (discovery)
+ *
+ * Got a discovery indication from IrLMP, post an event
+ *
+ * Note : IrLMP take care of matching the hint mask for us, and also
+ * check if it is a "new" node for us...
+ *
+ * As IrLMP filter on the IrLAN hint bit, we get both IrLAN and IrNET
+ * nodes, so it's only at connection time that we will know if the
+ * node support IrNET, IrLAN or both. The other solution is to check
+ * in IAS the PNP ids and service name.
+ * Note : even if a node support IrNET (or IrLAN), it's no guarantee
+ * that we will be able to connect to it, the node might already be
+ * busy...
+ *
+ * One last thing : in some case, this function will trigger duplicate
+ * discovery events. On the other hand, we should catch all
+ * discoveries properly (i.e. not miss one). Filtering duplicate here
+ * is to messy, so we leave that to user space...
+ */
+static void
+irnet_discovery_indication(discinfo_t * discovery,
+ DISCOVERY_MODE mode,
+ void * priv)
+{
+ irnet_socket * self = &irnet_server.s;
+
+ DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self);
+ DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR,
+ "Invalid instance (0x%p) !!!\n", priv);
+
+ DEBUG(IRDA_OCB_INFO, "Discovered new IrNET/IrLAN node %s...\n",
+ discovery->info);
+
+ /* Notify the control channel */
+ irnet_post_event(NULL, IRNET_DISCOVER,
+ discovery->saddr, discovery->daddr, discovery->info,
+ get_unaligned((__u16 *)discovery->hints));
+
+ DEXIT(IRDA_OCB_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_expiry_indication (expiry)
+ *
+ * Got a expiry indication from IrLMP, post an event
+ *
+ * Note : IrLMP take care of matching the hint mask for us, we only
+ * check if it is a "new" node...
+ */
+static void
+irnet_expiry_indication(discinfo_t * expiry,
+ DISCOVERY_MODE mode,
+ void * priv)
+{
+ irnet_socket * self = &irnet_server.s;
+
+ DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self);
+ DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR,
+ "Invalid instance (0x%p) !!!\n", priv);
+
+ DEBUG(IRDA_OCB_INFO, "IrNET/IrLAN node %s expired...\n",
+ expiry->info);
+
+ /* Notify the control channel */
+ irnet_post_event(NULL, IRNET_EXPIRE,
+ expiry->saddr, expiry->daddr, expiry->info,
+ get_unaligned((__u16 *)expiry->hints));
+
+ DEXIT(IRDA_OCB_TRACE, "\n");
+}
+#endif /* DISCOVERY_EVENTS */
+
+
+/*********************** PROC ENTRY CALLBACKS ***********************/
+/*
+ * We create a instance in the /proc filesystem, and here we take care
+ * of that...
+ */
+
+#ifdef CONFIG_PROC_FS
+static int
+irnet_proc_show(struct seq_file *m, void *v)
+{
+ irnet_socket * self;
+ char * state;
+ int i = 0;
+
+ /* Get the IrNET server information... */
+ seq_printf(m, "IrNET server - ");
+ seq_printf(m, "IrDA state: %s, ",
+ (irnet_server.running ? "running" : "dead"));
+ seq_printf(m, "stsap_sel: %02x, ", irnet_server.s.stsap_sel);
+ seq_printf(m, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel);
+
+ /* Do we need to continue ? */
+ if(!irnet_server.running)
+ return 0;
+
+ /* Protect access to the instance list */
+ spin_lock_bh(&irnet_server.spinlock);
+
+ /* Get the sockets one by one... */
+ self = (irnet_socket *) hashbin_get_first(irnet_server.list);
+ while(self != NULL)
+ {
+ /* Start printing info about the socket. */
+ seq_printf(m, "\nIrNET socket %d - ", i++);
+
+ /* First, get the requested configuration */
+ seq_printf(m, "Requested IrDA name: \"%s\", ", self->rname);
+ seq_printf(m, "daddr: %08x, ", self->rdaddr);
+ seq_printf(m, "saddr: %08x\n", self->rsaddr);
+
+ /* Second, get all the PPP info */
+ seq_printf(m, " PPP state: %s",
+ (self->ppp_open ? "registered" : "unregistered"));
+ if(self->ppp_open)
+ {
+ seq_printf(m, ", unit: ppp%d",
+ ppp_unit_number(&self->chan));
+ seq_printf(m, ", channel: %d",
+ ppp_channel_index(&self->chan));
+ seq_printf(m, ", mru: %d",
+ self->mru);
+ /* Maybe add self->flags ? Later... */
+ }
+
+ /* Then, get all the IrDA specific info... */
+ if(self->ttp_open)
+ state = "connected";
+ else
+ if(self->tsap != NULL)
+ state = "connecting";
+ else
+ if(self->iriap != NULL)
+ state = "searching";
+ else
+ if(self->ttp_connect)
+ state = "weird";
+ else
+ state = "idle";
+ seq_printf(m, "\n IrDA state: %s, ", state);
+ seq_printf(m, "daddr: %08x, ", self->daddr);
+ seq_printf(m, "stsap_sel: %02x, ", self->stsap_sel);
+ seq_printf(m, "dtsap_sel: %02x\n", self->dtsap_sel);
+
+ /* Next socket, please... */
+ self = (irnet_socket *) hashbin_get_next(irnet_server.list);
+ }
+
+ /* Spin lock end */
+ spin_unlock_bh(&irnet_server.spinlock);
+
+ return 0;
+}
+
+static int irnet_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irnet_proc_show, NULL);
+}
+
+static const struct file_operations irnet_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = irnet_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* PROC_FS */
+
+
+/********************** CONFIGURATION/CLEANUP **********************/
+/*
+ * Initialisation and teardown of the IrDA part, called at module
+ * insertion and removal...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Prepare the IrNET layer for operation...
+ */
+int __init
+irda_irnet_init(void)
+{
+ int err = 0;
+
+ DENTER(MODULE_TRACE, "()\n");
+
+ /* Pure paranoia - should be redundant */
+ memset(&irnet_server, 0, sizeof(struct irnet_root));
+
+ /* Setup start of irnet instance list */
+ irnet_server.list = hashbin_new(HB_NOLOCK);
+ DABORT(irnet_server.list == NULL, -ENOMEM,
+ MODULE_ERROR, "Can't allocate hashbin!\n");
+ /* Init spinlock for instance list */
+ spin_lock_init(&irnet_server.spinlock);
+
+ /* Initialise control channel */
+ init_waitqueue_head(&irnet_events.rwait);
+ irnet_events.index = 0;
+ /* Init spinlock for event logging */
+ spin_lock_init(&irnet_events.spinlock);
+
+#ifdef CONFIG_PROC_FS
+ /* Add a /proc file for irnet infos */
+ proc_create("irnet", 0, proc_irda, &irnet_proc_fops);
+#endif /* CONFIG_PROC_FS */
+
+ /* Setup the IrNET server */
+ err = irnet_setup_server();
+
+ if(!err)
+ /* We are no longer functional... */
+ irnet_server.running = 1;
+
+ DEXIT(MODULE_TRACE, "\n");
+ return err;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Cleanup at exit...
+ */
+void __exit
+irda_irnet_cleanup(void)
+{
+ DENTER(MODULE_TRACE, "()\n");
+
+ /* We are no longer there... */
+ irnet_server.running = 0;
+
+#ifdef CONFIG_PROC_FS
+ /* Remove our /proc file */
+ remove_proc_entry("irnet", proc_irda);
+#endif /* CONFIG_PROC_FS */
+
+ /* Remove our IrNET server from existence */
+ irnet_destroy_server();
+
+ /* Remove all instances of IrNET socket still present */
+ hashbin_delete(irnet_server.list, (FREE_FUNC) irda_irnet_destroy);
+
+ DEXIT(MODULE_TRACE, "\n");
+}
diff --git a/drivers/staging/irda/net/irnet/irnet_irda.h b/drivers/staging/irda/net/irnet/irnet_irda.h
new file mode 100644
index 000000000000..3e408952a3f1
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/irnet_irda.h
@@ -0,0 +1,178 @@
+/*
+ * IrNET protocol module : Synchronous PPP over an IrDA socket.
+ *
+ * Jean II - HPL `00 - <jt@hpl.hp.com>
+ *
+ * This file contains all definitions and declarations necessary for the
+ * IRDA part of the IrNET module (dealing with IrTTP, IrIAS and co).
+ * This file is a private header, so other modules don't want to know
+ * what's in there...
+ */
+
+#ifndef IRNET_IRDA_H
+#define IRNET_IRDA_H
+
+/***************************** INCLUDES *****************************/
+/* Please add other headers in irnet.h */
+
+#include "irnet.h" /* Module global include */
+
+/************************ CONSTANTS & MACROS ************************/
+
+/*
+ * Name of the service (socket name) used by IrNET
+ */
+/* IAS object name (or part of it) */
+#define IRNET_SERVICE_NAME "IrNetv1"
+/* IAS attribute */
+#define IRNET_IAS_VALUE "IrDA:TinyTP:LsapSel"
+/* LMP notify name for client (only for /proc/net/irda/irlmp) */
+#define IRNET_NOTIFY_NAME "IrNET socket"
+/* LMP notify name for server (only for /proc/net/irda/irlmp) */
+#define IRNET_NOTIFY_NAME_SERV "IrNET server"
+
+/****************************** TYPES ******************************/
+
+/*
+ * This is the main structure where we store all the data pertaining to
+ * the IrNET server (listen for connection requests) and the root
+ * of the IrNET socket list
+ */
+typedef struct irnet_root
+{
+ irnet_socket s; /* To pretend we are a client... */
+
+ /* Generic stuff */
+ int magic; /* Paranoia */
+ int running; /* Are we operational ? */
+
+ /* Link list of all IrNET instances opened */
+ hashbin_t * list;
+ spinlock_t spinlock; /* Serialize access to the list */
+ /* Note : the way hashbin has been designed is absolutely not
+ * reentrant, beware... So, we blindly protect all with spinlock */
+
+ /* Handle for the hint bit advertised in IrLMP */
+ void * skey;
+
+ /* Server socket part */
+ struct ias_object * ias_obj; /* Our service name + lsap in IAS */
+
+} irnet_root;
+
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- CONTROL CHANNEL ----------------------- */
+static void
+ irnet_post_event(irnet_socket *,
+ irnet_event,
+ __u32,
+ __u32,
+ char *,
+ __u16);
+/* ----------------------- IRDA SUBROUTINES ----------------------- */
+static inline int
+ irnet_open_tsap(irnet_socket *);
+static inline __u8
+ irnet_ias_to_tsap(irnet_socket *,
+ int,
+ struct ias_value *);
+static inline int
+ irnet_find_lsap_sel(irnet_socket *);
+static inline int
+ irnet_connect_tsap(irnet_socket *);
+static inline int
+ irnet_discover_next_daddr(irnet_socket *);
+static inline int
+ irnet_discover_daddr_and_lsap_sel(irnet_socket *);
+static inline int
+ irnet_dname_to_daddr(irnet_socket *);
+/* ------------------------ SERVER SOCKET ------------------------ */
+static inline int
+ irnet_daddr_to_dname(irnet_socket *);
+static inline irnet_socket *
+ irnet_find_socket(irnet_socket *);
+static inline int
+ irnet_connect_socket(irnet_socket *,
+ irnet_socket *,
+ struct qos_info *,
+ __u32,
+ __u8);
+static inline void
+ irnet_disconnect_server(irnet_socket *,
+ struct sk_buff *);
+static inline int
+ irnet_setup_server(void);
+static inline void
+ irnet_destroy_server(void);
+/* ---------------------- IRDA-TTP CALLBACKS ---------------------- */
+static int
+ irnet_data_indication(void *, /* instance */
+ void *, /* sap */
+ struct sk_buff *);
+static void
+ irnet_disconnect_indication(void *,
+ void *,
+ LM_REASON,
+ struct sk_buff *);
+static void
+ irnet_connect_confirm(void *,
+ void *,
+ struct qos_info *,
+ __u32,
+ __u8,
+ struct sk_buff *);
+static void
+ irnet_flow_indication(void *,
+ void *,
+ LOCAL_FLOW);
+static void
+ irnet_status_indication(void *,
+ LINK_STATUS,
+ LOCK_STATUS);
+static void
+ irnet_connect_indication(void *,
+ void *,
+ struct qos_info *,
+ __u32,
+ __u8,
+ struct sk_buff *);
+/* -------------------- IRDA-IAS/LMP CALLBACKS -------------------- */
+static void
+ irnet_getvalue_confirm(int,
+ __u16,
+ struct ias_value *,
+ void *);
+static void
+ irnet_discovervalue_confirm(int,
+ __u16,
+ struct ias_value *,
+ void *);
+#ifdef DISCOVERY_EVENTS
+static void
+ irnet_discovery_indication(discinfo_t *,
+ DISCOVERY_MODE,
+ void *);
+static void
+ irnet_expiry_indication(discinfo_t *,
+ DISCOVERY_MODE,
+ void *);
+#endif
+
+/**************************** VARIABLES ****************************/
+
+/*
+ * The IrNET server. Listen to connection requests and co...
+ */
+static struct irnet_root irnet_server;
+
+/* Control channel stuff (note : extern) */
+struct irnet_ctrl_channel irnet_events;
+
+/* The /proc/net/irda directory, defined elsewhere... */
+#ifdef CONFIG_PROC_FS
+extern struct proc_dir_entry *proc_irda;
+#endif /* CONFIG_PROC_FS */
+
+#endif /* IRNET_IRDA_H */
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c
new file mode 100644
index 000000000000..7025dcb853d0
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.c
@@ -0,0 +1,1189 @@
+/*
+ * IrNET protocol module : Synchronous PPP over an IrDA socket.
+ *
+ * Jean II - HPL `00 - <jt@hpl.hp.com>
+ *
+ * This file implement the PPP interface and /dev/irnet character device.
+ * The PPP interface hook to the ppp_generic module, handle all our
+ * relationship to the PPP code in the kernel (and by extension to pppd),
+ * and exchange PPP frames with this module (send/receive).
+ * The /dev/irnet device is used primarily for 2 functions :
+ * 1) as a stub for pppd (the ppp daemon), so that we can appropriately
+ * generate PPP sessions (we pretend we are a tty).
+ * 2) as a control channel (write commands, read events)
+ */
+
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+
+#include "irnet_ppp.h" /* Private header */
+/* Please put other headers in irnet.h - Thanks */
+
+/* Generic PPP callbacks (to call us) */
+static const struct ppp_channel_ops irnet_ppp_ops = {
+ .start_xmit = ppp_irnet_send,
+ .ioctl = ppp_irnet_ioctl
+};
+
+/************************* CONTROL CHANNEL *************************/
+/*
+ * When a pppd instance is not active on /dev/irnet, it acts as a control
+ * channel.
+ * Writing allow to set up the IrDA destination of the IrNET channel,
+ * and any application may be read events happening in IrNET...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write is used to send a command to configure a IrNET channel
+ * before it is open by pppd. The syntax is : "command argument"
+ * Currently there is only two defined commands :
+ * o name : set the requested IrDA nickname of the IrNET peer.
+ * o addr : set the requested IrDA address of the IrNET peer.
+ * Note : the code is crude, but effective...
+ */
+static inline ssize_t
+irnet_ctrl_write(irnet_socket * ap,
+ const char __user *buf,
+ size_t count)
+{
+ char command[IRNET_MAX_COMMAND];
+ char * start; /* Current command being processed */
+ char * next; /* Next command to process */
+ int length; /* Length of current command */
+
+ DENTER(CTRL_TRACE, "(ap=0x%p, count=%zd)\n", ap, count);
+
+ /* Check for overflow... */
+ DABORT(count >= IRNET_MAX_COMMAND, -ENOMEM,
+ CTRL_ERROR, "Too much data !!!\n");
+
+ /* Get the data in the driver */
+ if(copy_from_user(command, buf, count))
+ {
+ DERROR(CTRL_ERROR, "Invalid user space pointer.\n");
+ return -EFAULT;
+ }
+
+ /* Safe terminate the string */
+ command[count] = '\0';
+ DEBUG(CTRL_INFO, "Command line received is ``%s'' (%zd).\n",
+ command, count);
+
+ /* Check every commands in the command line */
+ next = command;
+ while(next != NULL)
+ {
+ /* Look at the next command */
+ start = next;
+
+ /* Scrap whitespaces before the command */
+ start = skip_spaces(start);
+
+ /* ',' is our command separator */
+ next = strchr(start, ',');
+ if(next)
+ {
+ *next = '\0'; /* Terminate command */
+ length = next - start; /* Length */
+ next++; /* Skip the '\0' */
+ }
+ else
+ length = strlen(start);
+
+ DEBUG(CTRL_INFO, "Found command ``%s'' (%d).\n", start, length);
+
+ /* Check if we recognised one of the known command
+ * We can't use "switch" with strings, so hack with "continue" */
+
+ /* First command : name -> Requested IrDA nickname */
+ if(!strncmp(start, "name", 4))
+ {
+ /* Copy the name only if is included and not "any" */
+ if((length > 5) && (strcmp(start + 5, "any")))
+ {
+ /* Strip out trailing whitespaces */
+ while(isspace(start[length - 1]))
+ length--;
+
+ DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5,
+ -EINVAL, CTRL_ERROR, "Invalid nickname.\n");
+
+ /* Copy the name for later reuse */
+ memcpy(ap->rname, start + 5, length - 5);
+ ap->rname[length - 5] = '\0';
+ }
+ else
+ ap->rname[0] = '\0';
+ DEBUG(CTRL_INFO, "Got rname = ``%s''\n", ap->rname);
+
+ /* Restart the loop */
+ continue;
+ }
+
+ /* Second command : addr, daddr -> Requested IrDA destination address
+ * Also process : saddr -> Requested IrDA source address */
+ if((!strncmp(start, "addr", 4)) ||
+ (!strncmp(start, "daddr", 5)) ||
+ (!strncmp(start, "saddr", 5)))
+ {
+ __u32 addr = DEV_ADDR_ANY;
+
+ /* Copy the address only if is included and not "any" */
+ if((length > 5) && (strcmp(start + 5, "any")))
+ {
+ char * begp = start + 5;
+ char * endp;
+
+ /* Scrap whitespaces before the command */
+ begp = skip_spaces(begp);
+
+ /* Convert argument to a number (last arg is the base) */
+ addr = simple_strtoul(begp, &endp, 16);
+ /* Has it worked ? (endp should be start + length) */
+ DABORT(endp <= (start + 5), -EINVAL,
+ CTRL_ERROR, "Invalid address.\n");
+ }
+ /* Which type of address ? */
+ if(start[0] == 's')
+ {
+ /* Save it */
+ ap->rsaddr = addr;
+ DEBUG(CTRL_INFO, "Got rsaddr = %08x\n", ap->rsaddr);
+ }
+ else
+ {
+ /* Save it */
+ ap->rdaddr = addr;
+ DEBUG(CTRL_INFO, "Got rdaddr = %08x\n", ap->rdaddr);
+ }
+
+ /* Restart the loop */
+ continue;
+ }
+
+ /* Other possible command : connect N (number of retries) */
+
+ /* No command matched -> Failed... */
+ DABORT(1, -EINVAL, CTRL_ERROR, "Not a recognised IrNET command.\n");
+ }
+
+ /* Success : we have parsed all commands successfully */
+ return count;
+}
+
+#ifdef INITIAL_DISCOVERY
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_get_discovery_log (self)
+ *
+ * Query the content on the discovery log if not done
+ *
+ * This function query the current content of the discovery log
+ * at the startup of the event channel and save it in the internal struct.
+ */
+static void
+irnet_get_discovery_log(irnet_socket * ap)
+{
+ __u16 mask = irlmp_service_to_hint(S_LAN);
+
+ /* Ask IrLMP for the current discovery log */
+ ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask,
+ DISCOVERY_DEFAULT_SLOTS);
+
+ /* Check if the we got some results */
+ if(ap->discoveries == NULL)
+ ap->disco_number = -1;
+
+ DEBUG(CTRL_INFO, "Got the log (0x%p), size is %d\n",
+ ap->discoveries, ap->disco_number);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irnet_read_discovery_log (self, event)
+ *
+ * Read the content on the discovery log
+ *
+ * This function dump the current content of the discovery log
+ * at the startup of the event channel.
+ * Return 1 if wrote an event on the control channel...
+ *
+ * State of the ap->disco_XXX variables :
+ * Socket creation : discoveries = NULL ; disco_index = 0 ; disco_number = 0
+ * While reading : discoveries = ptr ; disco_index = X ; disco_number = Y
+ * After reading : discoveries = NULL ; disco_index = Y ; disco_number = -1
+ */
+static inline int
+irnet_read_discovery_log(irnet_socket *ap, char *event, int buf_size)
+{
+ int done_event = 0;
+
+ DENTER(CTRL_TRACE, "(ap=0x%p, event=0x%p)\n",
+ ap, event);
+
+ /* Test if we have some work to do or we have already finished */
+ if(ap->disco_number == -1)
+ {
+ DEBUG(CTRL_INFO, "Already done\n");
+ return 0;
+ }
+
+ /* Test if it's the first time and therefore we need to get the log */
+ if(ap->discoveries == NULL)
+ irnet_get_discovery_log(ap);
+
+ /* Check if we have more item to dump */
+ if(ap->disco_index < ap->disco_number)
+ {
+ /* Write an event */
+ snprintf(event, buf_size,
+ "Found %08x (%s) behind %08x {hints %02X-%02X}\n",
+ ap->discoveries[ap->disco_index].daddr,
+ ap->discoveries[ap->disco_index].info,
+ ap->discoveries[ap->disco_index].saddr,
+ ap->discoveries[ap->disco_index].hints[0],
+ ap->discoveries[ap->disco_index].hints[1]);
+ DEBUG(CTRL_INFO, "Writing discovery %d : %s\n",
+ ap->disco_index, ap->discoveries[ap->disco_index].info);
+
+ /* We have an event */
+ done_event = 1;
+ /* Next discovery */
+ ap->disco_index++;
+ }
+
+ /* Check if we have done the last item */
+ if(ap->disco_index >= ap->disco_number)
+ {
+ /* No more items : remove the log and signal termination */
+ DEBUG(CTRL_INFO, "Cleaning up log (0x%p)\n",
+ ap->discoveries);
+ if(ap->discoveries != NULL)
+ {
+ /* Cleanup our copy of the discovery log */
+ kfree(ap->discoveries);
+ ap->discoveries = NULL;
+ }
+ ap->disco_number = -1;
+ }
+
+ return done_event;
+}
+#endif /* INITIAL_DISCOVERY */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read is used to get IrNET events
+ */
+static inline ssize_t
+irnet_ctrl_read(irnet_socket * ap,
+ struct file * file,
+ char __user * buf,
+ size_t count)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ char event[75];
+ ssize_t ret = 0;
+
+ DENTER(CTRL_TRACE, "(ap=0x%p, count=%zd)\n", ap, count);
+
+#ifdef INITIAL_DISCOVERY
+ /* Check if we have read the log */
+ if (irnet_read_discovery_log(ap, event, sizeof(event)))
+ {
+ count = min(strlen(event), count);
+ if (copy_to_user(buf, event, count))
+ {
+ DERROR(CTRL_ERROR, "Invalid user space pointer.\n");
+ return -EFAULT;
+ }
+
+ DEXIT(CTRL_TRACE, "\n");
+ return count;
+ }
+#endif /* INITIAL_DISCOVERY */
+
+ /* Put ourselves on the wait queue to be woken up */
+ add_wait_queue(&irnet_events.rwait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ for(;;)
+ {
+ /* If there is unread events */
+ ret = 0;
+ if(ap->event_index != irnet_events.index)
+ break;
+ ret = -EAGAIN;
+ if(file->f_flags & O_NONBLOCK)
+ break;
+ ret = -ERESTARTSYS;
+ if(signal_pending(current))
+ break;
+ /* Yield and wait to be woken up */
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&irnet_events.rwait, &wait);
+
+ /* Did we got it ? */
+ if(ret != 0)
+ {
+ /* No, return the error code */
+ DEXIT(CTRL_TRACE, " - ret %zd\n", ret);
+ return ret;
+ }
+
+ /* Which event is it ? */
+ switch(irnet_events.log[ap->event_index].event)
+ {
+ case IRNET_DISCOVER:
+ snprintf(event, sizeof(event),
+ "Discovered %08x (%s) behind %08x {hints %02X-%02X}\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].saddr,
+ irnet_events.log[ap->event_index].hints.byte[0],
+ irnet_events.log[ap->event_index].hints.byte[1]);
+ break;
+ case IRNET_EXPIRE:
+ snprintf(event, sizeof(event),
+ "Expired %08x (%s) behind %08x {hints %02X-%02X}\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].saddr,
+ irnet_events.log[ap->event_index].hints.byte[0],
+ irnet_events.log[ap->event_index].hints.byte[1]);
+ break;
+ case IRNET_CONNECT_TO:
+ snprintf(event, sizeof(event), "Connected to %08x (%s) on ppp%d\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].unit);
+ break;
+ case IRNET_CONNECT_FROM:
+ snprintf(event, sizeof(event), "Connection from %08x (%s) on ppp%d\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].unit);
+ break;
+ case IRNET_REQUEST_FROM:
+ snprintf(event, sizeof(event), "Request from %08x (%s) behind %08x\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].saddr);
+ break;
+ case IRNET_NOANSWER_FROM:
+ snprintf(event, sizeof(event), "No-answer from %08x (%s) on ppp%d\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].unit);
+ break;
+ case IRNET_BLOCKED_LINK:
+ snprintf(event, sizeof(event), "Blocked link with %08x (%s) on ppp%d\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].unit);
+ break;
+ case IRNET_DISCONNECT_FROM:
+ snprintf(event, sizeof(event), "Disconnection from %08x (%s) on ppp%d\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name,
+ irnet_events.log[ap->event_index].unit);
+ break;
+ case IRNET_DISCONNECT_TO:
+ snprintf(event, sizeof(event), "Disconnected to %08x (%s)\n",
+ irnet_events.log[ap->event_index].daddr,
+ irnet_events.log[ap->event_index].name);
+ break;
+ default:
+ snprintf(event, sizeof(event), "Bug\n");
+ }
+ /* Increment our event index */
+ ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS;
+
+ DEBUG(CTRL_INFO, "Event is :%s", event);
+
+ count = min(strlen(event), count);
+ if (copy_to_user(buf, event, count))
+ {
+ DERROR(CTRL_ERROR, "Invalid user space pointer.\n");
+ return -EFAULT;
+ }
+
+ DEXIT(CTRL_TRACE, "\n");
+ return count;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Poll : called when someone do a select on /dev/irnet.
+ * Just check if there are new events...
+ */
+static inline unsigned int
+irnet_ctrl_poll(irnet_socket * ap,
+ struct file * file,
+ poll_table * wait)
+{
+ unsigned int mask;
+
+ DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap);
+
+ poll_wait(file, &irnet_events.rwait, wait);
+ mask = POLLOUT | POLLWRNORM;
+ /* If there is unread events */
+ if(ap->event_index != irnet_events.index)
+ mask |= POLLIN | POLLRDNORM;
+#ifdef INITIAL_DISCOVERY
+ if(ap->disco_number != -1)
+ {
+ /* Test if it's the first time and therefore we need to get the log */
+ if(ap->discoveries == NULL)
+ irnet_get_discovery_log(ap);
+ /* Recheck */
+ if(ap->disco_number != -1)
+ mask |= POLLIN | POLLRDNORM;
+ }
+#endif /* INITIAL_DISCOVERY */
+
+ DEXIT(CTRL_TRACE, " - mask=0x%X\n", mask);
+ return mask;
+}
+
+
+/*********************** FILESYSTEM CALLBACKS ***********************/
+/*
+ * Implement the usual open, read, write functions that will be called
+ * by the file system when some action is performed on /dev/irnet.
+ * Most of those actions will in fact be performed by "pppd" or
+ * the control channel, we just act as a redirector...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Open : when somebody open /dev/irnet
+ * We basically create a new instance of irnet and initialise it.
+ */
+static int
+dev_irnet_open(struct inode * inode,
+ struct file * file)
+{
+ struct irnet_socket * ap;
+ int err;
+
+ DENTER(FS_TRACE, "(file=0x%p)\n", file);
+
+#ifdef SECURE_DEVIRNET
+ /* This could (should?) be enforced by the permissions on /dev/irnet. */
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+#endif /* SECURE_DEVIRNET */
+
+ /* Allocate a private structure for this IrNET instance */
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+ DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n");
+
+ /* initialize the irnet structure */
+ ap->file = file;
+
+ /* PPP channel setup */
+ ap->ppp_open = 0;
+ ap->chan.private = ap;
+ ap->chan.ops = &irnet_ppp_ops;
+ ap->chan.mtu = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN);
+ ap->chan.hdrlen = 2 + TTP_MAX_HEADER; /* for A/C + Max IrDA hdr */
+ /* PPP parameters */
+ ap->mru = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN);
+ ap->xaccm[0] = ~0U;
+ ap->xaccm[3] = 0x60000000U;
+ ap->raccm = ~0U;
+
+ /* Setup the IrDA part... */
+ err = irda_irnet_create(ap);
+ if(err)
+ {
+ DERROR(FS_ERROR, "Can't setup IrDA link...\n");
+ kfree(ap);
+
+ return err;
+ }
+
+ /* For the control channel */
+ ap->event_index = irnet_events.index; /* Cancel all past events */
+
+ mutex_init(&ap->lock);
+
+ /* Put our stuff where we will be able to find it later */
+ file->private_data = ap;
+
+ DEXIT(FS_TRACE, " - ap=0x%p\n", ap);
+
+ return 0;
+}
+
+
+/*------------------------------------------------------------------*/
+/*
+ * Close : when somebody close /dev/irnet
+ * Destroy the instance of /dev/irnet
+ */
+static int
+dev_irnet_close(struct inode * inode,
+ struct file * file)
+{
+ irnet_socket * ap = file->private_data;
+
+ DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
+ file, ap);
+ DABORT(ap == NULL, 0, FS_ERROR, "ap is NULL !!!\n");
+
+ /* Detach ourselves */
+ file->private_data = NULL;
+
+ /* Close IrDA stuff */
+ irda_irnet_destroy(ap);
+
+ /* Disconnect from the generic PPP layer if not already done */
+ if(ap->ppp_open)
+ {
+ DERROR(FS_ERROR, "Channel still registered - deregistering !\n");
+ ap->ppp_open = 0;
+ ppp_unregister_channel(&ap->chan);
+ }
+
+ kfree(ap);
+
+ DEXIT(FS_TRACE, "\n");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Write does nothing.
+ * (we receive packet from ppp_generic through ppp_irnet_send())
+ */
+static ssize_t
+dev_irnet_write(struct file * file,
+ const char __user *buf,
+ size_t count,
+ loff_t * ppos)
+{
+ irnet_socket * ap = file->private_data;
+
+ DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%zd)\n",
+ file, ap, count);
+ DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n");
+
+ /* If we are connected to ppp_generic, let it handle the job */
+ if(ap->ppp_open)
+ return -EAGAIN;
+ else
+ return irnet_ctrl_write(ap, buf, count);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Read doesn't do much either.
+ * (pppd poll us, but ultimately reads through /dev/ppp)
+ */
+static ssize_t
+dev_irnet_read(struct file * file,
+ char __user * buf,
+ size_t count,
+ loff_t * ppos)
+{
+ irnet_socket * ap = file->private_data;
+
+ DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%zd)\n",
+ file, ap, count);
+ DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n");
+
+ /* If we are connected to ppp_generic, let it handle the job */
+ if(ap->ppp_open)
+ return -EAGAIN;
+ else
+ return irnet_ctrl_read(ap, file, buf, count);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Poll : called when someone do a select on /dev/irnet
+ */
+static unsigned int
+dev_irnet_poll(struct file * file,
+ poll_table * wait)
+{
+ irnet_socket * ap = file->private_data;
+ unsigned int mask;
+
+ DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
+ file, ap);
+
+ mask = POLLOUT | POLLWRNORM;
+ DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n");
+
+ /* If we are connected to ppp_generic, let it handle the job */
+ if(!ap->ppp_open)
+ mask |= irnet_ctrl_poll(ap, file, wait);
+
+ DEXIT(FS_TRACE, " - mask=0x%X\n", mask);
+ return mask;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * IOCtl : Called when someone does some ioctls on /dev/irnet
+ * This is the way pppd configure us and control us while the PPP
+ * instance is active.
+ */
+static long
+dev_irnet_ioctl(
+ struct file * file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ irnet_socket * ap = file->private_data;
+ int err;
+ int val;
+ void __user *argp = (void __user *)arg;
+
+ DENTER(FS_TRACE, "(file=0x%p, ap=0x%p, cmd=0x%X)\n",
+ file, ap, cmd);
+
+ /* Basic checks... */
+ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n");
+#ifdef SECURE_DEVIRNET
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+#endif /* SECURE_DEVIRNET */
+
+ err = -EFAULT;
+ switch(cmd)
+ {
+ /* Set discipline (should be N_SYNC_PPP or N_TTY) */
+ case TIOCSETD:
+ if(get_user(val, (int __user *)argp))
+ break;
+ if((val == N_SYNC_PPP) || (val == N_PPP))
+ {
+ DEBUG(FS_INFO, "Entering PPP discipline.\n");
+ /* PPP channel setup (ap->chan in configured in dev_irnet_open())*/
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+ err = ppp_register_channel(&ap->chan);
+ if(err == 0)
+ {
+ /* Our ppp side is active */
+ ap->ppp_open = 1;
+
+ DEBUG(FS_INFO, "Trying to establish a connection.\n");
+ /* Setup the IrDA link now - may fail... */
+ irda_irnet_connect(ap);
+ }
+ else
+ DERROR(FS_ERROR, "Can't setup PPP channel...\n");
+
+ mutex_unlock(&ap->lock);
+ }
+ else
+ {
+ /* In theory, should be N_TTY */
+ DEBUG(FS_INFO, "Exiting PPP discipline.\n");
+ /* Disconnect from the generic PPP layer */
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+ if(ap->ppp_open)
+ {
+ ap->ppp_open = 0;
+ ppp_unregister_channel(&ap->chan);
+ }
+ else
+ DERROR(FS_ERROR, "Channel not registered !\n");
+ err = 0;
+
+ mutex_unlock(&ap->lock);
+ }
+ break;
+
+ /* Query PPP channel and unit number */
+ case PPPIOCGCHAN:
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+ if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
+ (int __user *)argp))
+ err = 0;
+
+ mutex_unlock(&ap->lock);
+ break;
+ case PPPIOCGUNIT:
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+ if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
+ (int __user *)argp))
+ err = 0;
+
+ mutex_unlock(&ap->lock);
+ break;
+
+ /* All these ioctls can be passed both directly and from ppp_generic,
+ * so we just deal with them in one place...
+ */
+ case PPPIOCGFLAGS:
+ case PPPIOCSFLAGS:
+ case PPPIOCGASYNCMAP:
+ case PPPIOCSASYNCMAP:
+ case PPPIOCGRASYNCMAP:
+ case PPPIOCSRASYNCMAP:
+ case PPPIOCGXASYNCMAP:
+ case PPPIOCSXASYNCMAP:
+ case PPPIOCGMRU:
+ case PPPIOCSMRU:
+ DEBUG(FS_INFO, "Standard PPP ioctl.\n");
+ if(!capable(CAP_NET_ADMIN))
+ err = -EPERM;
+ else {
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+ err = ppp_irnet_ioctl(&ap->chan, cmd, arg);
+
+ mutex_unlock(&ap->lock);
+ }
+ break;
+
+ /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */
+ /* Get termios */
+ case TCGETS:
+ DEBUG(FS_INFO, "Get termios.\n");
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+#ifndef TCGETS2
+ if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
+ err = 0;
+#else
+ if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
+ err = 0;
+#endif
+
+ mutex_unlock(&ap->lock);
+ break;
+ /* Set termios */
+ case TCSETSF:
+ DEBUG(FS_INFO, "Set termios.\n");
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+
+#ifndef TCGETS2
+ if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
+ err = 0;
+#else
+ if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
+ err = 0;
+#endif
+
+ mutex_unlock(&ap->lock);
+ break;
+
+ /* Set DTR/RTS */
+ case TIOCMBIS:
+ case TIOCMBIC:
+ /* Set exclusive/non-exclusive mode */
+ case TIOCEXCL:
+ case TIOCNXCL:
+ DEBUG(FS_INFO, "TTY compatibility.\n");
+ err = 0;
+ break;
+
+ case TCGETA:
+ DEBUG(FS_INFO, "TCGETA\n");
+ break;
+
+ case TCFLSH:
+ DEBUG(FS_INFO, "TCFLSH\n");
+ /* Note : this will flush buffers in PPP, so it *must* be done
+ * We should also worry that we don't accept junk here and that
+ * we get rid of our own buffers */
+#ifdef FLUSH_TO_PPP
+ if (mutex_lock_interruptible(&ap->lock))
+ return -EINTR;
+ ppp_output_wakeup(&ap->chan);
+ mutex_unlock(&ap->lock);
+#endif /* FLUSH_TO_PPP */
+ err = 0;
+ break;
+
+ case FIONREAD:
+ DEBUG(FS_INFO, "FIONREAD\n");
+ val = 0;
+ if(put_user(val, (int __user *)argp))
+ break;
+ err = 0;
+ break;
+
+ default:
+ DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd);
+ err = -ENOTTY;
+ }
+
+ DEXIT(FS_TRACE, " - err = 0x%X\n", err);
+ return err;
+}
+
+/************************** PPP CALLBACKS **************************/
+/*
+ * This are the functions that the generic PPP driver in the kernel
+ * will call to communicate to us.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Prepare the ppp frame for transmission over the IrDA socket.
+ * We make sure that the header space is enough, and we change ppp header
+ * according to flags passed by pppd.
+ * This is not a callback, but just a helper function used in ppp_irnet_send()
+ */
+static inline struct sk_buff *
+irnet_prepare_skb(irnet_socket * ap,
+ struct sk_buff * skb)
+{
+ unsigned char * data;
+ int proto; /* PPP protocol */
+ int islcp; /* Protocol == LCP */
+ int needaddr; /* Need PPP address */
+
+ DENTER(PPP_TRACE, "(ap=0x%p, skb=0x%p)\n",
+ ap, skb);
+
+ /* Extract PPP protocol from the frame */
+ data = skb->data;
+ proto = (data[0] << 8) + data[1];
+
+ /* LCP packets with codes between 1 (configure-request)
+ * and 7 (code-reject) must be sent as though no options
+ * have been negotiated. */
+ islcp = (proto == PPP_LCP) && (1 <= data[2]) && (data[2] <= 7);
+
+ /* compress protocol field if option enabled */
+ if((data[0] == 0) && (ap->flags & SC_COMP_PROT) && (!islcp))
+ skb_pull(skb,1);
+
+ /* Check if we need address/control fields */
+ needaddr = 2*((ap->flags & SC_COMP_AC) == 0 || islcp);
+
+ /* Is the skb headroom large enough to contain all IrDA-headers? */
+ if((skb_headroom(skb) < (ap->max_header_size + needaddr)) ||
+ (skb_shared(skb)))
+ {
+ struct sk_buff * new_skb;
+
+ DEBUG(PPP_INFO, "Reallocating skb\n");
+
+ /* Create a new skb */
+ new_skb = skb_realloc_headroom(skb, ap->max_header_size + needaddr);
+
+ /* We have to free the original skb anyway */
+ dev_kfree_skb(skb);
+
+ /* Did the realloc succeed ? */
+ DABORT(new_skb == NULL, NULL, PPP_ERROR, "Could not realloc skb\n");
+
+ /* Use the new skb instead */
+ skb = new_skb;
+ }
+
+ /* prepend address/control fields if necessary */
+ if(needaddr)
+ {
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ALLSTATIONS;
+ skb->data[1] = PPP_UI;
+ }
+
+ DEXIT(PPP_TRACE, "\n");
+
+ return skb;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Send a packet to the peer over the IrTTP connection.
+ * Returns 1 iff the packet was accepted.
+ * Returns 0 iff packet was not consumed.
+ * If the packet was not accepted, we will call ppp_output_wakeup
+ * at some later time to reactivate flow control in ppp_generic.
+ */
+static int
+ppp_irnet_send(struct ppp_channel * chan,
+ struct sk_buff * skb)
+{
+ irnet_socket * self = (struct irnet_socket *) chan->private;
+ int ret;
+
+ DENTER(PPP_TRACE, "(channel=0x%p, ap/self=0x%p)\n",
+ chan, self);
+
+ /* Check if things are somewhat valid... */
+ DASSERT(self != NULL, 0, PPP_ERROR, "Self is NULL !!!\n");
+
+ /* Check if we are connected */
+ if(!(test_bit(0, &self->ttp_open)))
+ {
+#ifdef CONNECT_IN_SEND
+ /* Let's try to connect one more time... */
+ /* Note : we won't be connected after this call, but we should be
+ * ready for next packet... */
+ /* If we are already connecting, this will fail */
+ irda_irnet_connect(self);
+#endif /* CONNECT_IN_SEND */
+
+ DEBUG(PPP_INFO, "IrTTP not ready ! (%ld-%ld)\n",
+ self->ttp_open, self->ttp_connect);
+
+ /* Note : we can either drop the packet or block the packet.
+ *
+ * Blocking the packet allow us a better connection time,
+ * because by calling ppp_output_wakeup() we can have
+ * ppp_generic resending the LCP request immediately to us,
+ * rather than waiting for one of pppd periodic transmission of
+ * LCP request.
+ *
+ * On the other hand, if we block all packet, all those periodic
+ * transmissions of pppd accumulate in ppp_generic, creating a
+ * backlog of LCP request. When we eventually connect later on,
+ * we have to transmit all this backlog before we can connect
+ * proper (if we don't timeout before).
+ *
+ * The current strategy is as follow :
+ * While we are attempting to connect, we block packets to get
+ * a better connection time.
+ * If we fail to connect, we drain the queue and start dropping packets
+ */
+#ifdef BLOCK_WHEN_CONNECT
+ /* If we are attempting to connect */
+ if(test_bit(0, &self->ttp_connect))
+ {
+ /* Blocking packet, ppp_generic will retry later */
+ return 0;
+ }
+#endif /* BLOCK_WHEN_CONNECT */
+
+ /* Dropping packet, pppd will retry later */
+ dev_kfree_skb(skb);
+ return 1;
+ }
+
+ /* Check if the queue can accept any packet, otherwise block */
+ if(self->tx_flow != FLOW_START)
+ DRETURN(0, PPP_INFO, "IrTTP queue full (%d skbs)...\n",
+ skb_queue_len(&self->tsap->tx_queue));
+
+ /* Prepare ppp frame for transmission */
+ skb = irnet_prepare_skb(self, skb);
+ DABORT(skb == NULL, 1, PPP_ERROR, "Prepare skb for Tx failed.\n");
+
+ /* Send the packet to IrTTP */
+ ret = irttp_data_request(self->tsap, skb);
+ if(ret < 0)
+ {
+ /*
+ * > IrTTPs tx queue is full, so we just have to
+ * > drop the frame! You might think that we should
+ * > just return -1 and don't deallocate the frame,
+ * > but that is dangerous since it's possible that
+ * > we have replaced the original skb with a new
+ * > one with larger headroom, and that would really
+ * > confuse do_dev_queue_xmit() in dev.c! I have
+ * > tried :-) DB
+ * Correction : we verify the flow control above (self->tx_flow),
+ * so we come here only if IrTTP doesn't like the packet (empty,
+ * too large, IrTTP not connected). In those rare cases, it's ok
+ * to drop it, we don't want to see it here again...
+ * Jean II
+ */
+ DERROR(PPP_ERROR, "IrTTP doesn't like this packet !!! (0x%X)\n", ret);
+ /* irttp_data_request already free the packet */
+ }
+
+ DEXIT(PPP_TRACE, "\n");
+ return 1; /* Packet has been consumed */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Take care of the ioctls that ppp_generic doesn't want to deal with...
+ * Note : we are also called from dev_irnet_ioctl().
+ */
+static int
+ppp_irnet_ioctl(struct ppp_channel * chan,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ irnet_socket * ap = (struct irnet_socket *) chan->private;
+ int err;
+ int val;
+ u32 accm[8];
+ void __user *argp = (void __user *)arg;
+
+ DENTER(PPP_TRACE, "(channel=0x%p, ap=0x%p, cmd=0x%X)\n",
+ chan, ap, cmd);
+
+ /* Basic checks... */
+ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n");
+
+ err = -EFAULT;
+ switch(cmd)
+ {
+ /* PPP flags */
+ case PPPIOCGFLAGS:
+ val = ap->flags | ap->rbits;
+ if(put_user(val, (int __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSFLAGS:
+ if(get_user(val, (int __user *) argp))
+ break;
+ ap->flags = val & ~SC_RCV_BITS;
+ ap->rbits = val & SC_RCV_BITS;
+ err = 0;
+ break;
+
+ /* Async map stuff - all dummy to please pppd */
+ case PPPIOCGASYNCMAP:
+ if(put_user(ap->xaccm[0], (u32 __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSASYNCMAP:
+ if(get_user(ap->xaccm[0], (u32 __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCGRASYNCMAP:
+ if(put_user(ap->raccm, (u32 __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSRASYNCMAP:
+ if(get_user(ap->raccm, (u32 __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCGXASYNCMAP:
+ if(copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSXASYNCMAP:
+ if(copy_from_user(accm, argp, sizeof(accm)))
+ break;
+ accm[2] &= ~0x40000000U; /* can't escape 0x5e */
+ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
+ memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
+ err = 0;
+ break;
+
+ /* Max PPP frame size */
+ case PPPIOCGMRU:
+ if(put_user(ap->mru, (int __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSMRU:
+ if(get_user(val, (int __user *) argp))
+ break;
+ if(val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+ err = 0;
+ break;
+
+ default:
+ DEBUG(PPP_INFO, "Unsupported ioctl (0x%X)\n", cmd);
+ err = -ENOIOCTLCMD;
+ }
+
+ DEXIT(PPP_TRACE, " - err = 0x%X\n", err);
+ return err;
+}
+
+/************************** INITIALISATION **************************/
+/*
+ * Module initialisation and all that jazz...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Hook our device callbacks in the filesystem, to connect our code
+ * to /dev/irnet
+ */
+static inline int __init
+ppp_irnet_init(void)
+{
+ int err = 0;
+
+ DENTER(MODULE_TRACE, "()\n");
+
+ /* Allocate ourselves as a minor in the misc range */
+ err = misc_register(&irnet_misc_device);
+
+ DEXIT(MODULE_TRACE, "\n");
+ return err;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Cleanup at exit...
+ */
+static inline void __exit
+ppp_irnet_cleanup(void)
+{
+ DENTER(MODULE_TRACE, "()\n");
+
+ /* De-allocate /dev/irnet minor in misc range */
+ misc_deregister(&irnet_misc_device);
+
+ DEXIT(MODULE_TRACE, "\n");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Module main entry point
+ */
+static int __init
+irnet_init(void)
+{
+ int err;
+
+ /* Initialise both parts... */
+ err = irda_irnet_init();
+ if(!err)
+ err = ppp_irnet_init();
+ return err;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Module exit
+ */
+static void __exit
+irnet_cleanup(void)
+{
+ irda_irnet_cleanup();
+ ppp_irnet_cleanup();
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Module magic
+ */
+module_init(irnet_init);
+module_exit(irnet_cleanup);
+MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("IrNET : Synchronous PPP over IrDA");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV(10, 187);
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.h b/drivers/staging/irda/net/irnet/irnet_ppp.h
new file mode 100644
index 000000000000..32061442cc8e
--- /dev/null
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.h
@@ -0,0 +1,116 @@
+/*
+ * IrNET protocol module : Synchronous PPP over an IrDA socket.
+ *
+ * Jean II - HPL `00 - <jt@hpl.hp.com>
+ *
+ * This file contains all definitions and declarations necessary for the
+ * PPP part of the IrNET module.
+ * This file is a private header, so other modules don't want to know
+ * what's in there...
+ */
+
+#ifndef IRNET_PPP_H
+#define IRNET_PPP_H
+
+/***************************** INCLUDES *****************************/
+
+#include "irnet.h" /* Module global include */
+#include <linux/miscdevice.h>
+
+/************************ CONSTANTS & MACROS ************************/
+
+/* IrNET control channel stuff */
+#define IRNET_MAX_COMMAND 256 /* Max length of a command line */
+
+/* PPP hardcore stuff */
+
+/* Bits in rbits (PPP flags in irnet struct) */
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+/* Bit numbers in busy */
+#define XMIT_BUSY 0
+#define RECV_BUSY 1
+#define XMIT_WAKEUP 2
+#define XMIT_FULL 3
+
+/* Queue management */
+#define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
+
+/****************************** TYPES ******************************/
+
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- CONTROL CHANNEL ----------------------- */
+static inline ssize_t
+ irnet_ctrl_write(irnet_socket *,
+ const char *,
+ size_t);
+static inline ssize_t
+ irnet_ctrl_read(irnet_socket *,
+ struct file *,
+ char *,
+ size_t);
+static inline unsigned int
+ irnet_ctrl_poll(irnet_socket *,
+ struct file *,
+ poll_table *);
+/* ----------------------- CHARACTER DEVICE ----------------------- */
+static int
+ dev_irnet_open(struct inode *, /* fs callback : open */
+ struct file *),
+ dev_irnet_close(struct inode *,
+ struct file *);
+static ssize_t
+ dev_irnet_write(struct file *,
+ const char __user *,
+ size_t,
+ loff_t *),
+ dev_irnet_read(struct file *,
+ char __user *,
+ size_t,
+ loff_t *);
+static unsigned int
+ dev_irnet_poll(struct file *,
+ poll_table *);
+static long
+ dev_irnet_ioctl(struct file *,
+ unsigned int,
+ unsigned long);
+/* ------------------------ PPP INTERFACE ------------------------ */
+static inline struct sk_buff *
+ irnet_prepare_skb(irnet_socket *,
+ struct sk_buff *);
+static int
+ ppp_irnet_send(struct ppp_channel *,
+ struct sk_buff *);
+static int
+ ppp_irnet_ioctl(struct ppp_channel *,
+ unsigned int,
+ unsigned long);
+
+/**************************** VARIABLES ****************************/
+
+/* Filesystem callbacks (to call us) */
+static const struct file_operations irnet_device_fops =
+{
+ .owner = THIS_MODULE,
+ .read = dev_irnet_read,
+ .write = dev_irnet_write,
+ .poll = dev_irnet_poll,
+ .unlocked_ioctl = dev_irnet_ioctl,
+ .open = dev_irnet_open,
+ .release = dev_irnet_close,
+ .llseek = noop_llseek,
+ /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
+};
+
+/* Structure so that the misc major (drivers/char/misc.c) take care of us... */
+static struct miscdevice irnet_misc_device =
+{
+ .minor = IRNET_MINOR,
+ .name = "irnet",
+ .fops = &irnet_device_fops
+};
+
+#endif /* IRNET_PPP_H */
diff --git a/drivers/staging/irda/net/irnetlink.c b/drivers/staging/irda/net/irnetlink.c
new file mode 100644
index 000000000000..7fc340e574cf
--- /dev/null
+++ b/drivers/staging/irda/net/irnetlink.c
@@ -0,0 +1,162 @@
+/*
+ * IrDA netlink layer, for stack configuration.
+ *
+ * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
+ *
+ * Partly based on the 802.11 nelink implementation
+ * (see net/wireless/nl80211.c) which is:
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/socket.h>
+#include <linux/irda.h>
+#include <linux/gfp.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/irda/irda.h>
+#include <net/irda/irlap.h>
+#include <net/genetlink.h>
+
+
+
+static struct genl_family irda_nl_family;
+
+static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *info)
+{
+ char * ifname;
+
+ if (!info->attrs[IRDA_NL_ATTR_IFNAME])
+ return NULL;
+
+ ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]);
+
+ pr_debug("%s(): Looking for %s\n", __func__, ifname);
+
+ return dev_get_by_name(net, ifname);
+}
+
+static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device * dev;
+ struct irlap_cb * irlap;
+ u32 mode;
+
+ if (!info->attrs[IRDA_NL_ATTR_MODE])
+ return -EINVAL;
+
+ mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]);
+
+ pr_debug("%s(): Switching to mode: %d\n", __func__, mode);
+
+ dev = ifname_to_netdev(&init_net, info);
+ if (!dev)
+ return -ENODEV;
+
+ irlap = (struct irlap_cb *)dev->atalk_ptr;
+ if (!irlap) {
+ dev_put(dev);
+ return -ENODEV;
+ }
+
+ irlap->mode = mode;
+
+ dev_put(dev);
+
+ return 0;
+}
+
+static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device * dev;
+ struct irlap_cb * irlap;
+ struct sk_buff *msg;
+ void *hdr;
+ int ret = -ENOBUFS;
+
+ dev = ifname_to_netdev(&init_net, info);
+ if (!dev)
+ return -ENODEV;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ dev_put(dev);
+ return -ENOMEM;
+ }
+
+ irlap = (struct irlap_cb *)dev->atalk_ptr;
+ if (!irlap) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+ &irda_nl_family, 0, IRDA_NL_CMD_GET_MODE);
+ if (hdr == NULL) {
+ ret = -EMSGSIZE;
+ goto err_out;
+ }
+
+ if(nla_put_string(msg, IRDA_NL_ATTR_IFNAME,
+ dev->name))
+ goto err_out;
+
+ if(nla_put_u32(msg, IRDA_NL_ATTR_MODE, irlap->mode))
+ goto err_out;
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+
+ err_out:
+ nlmsg_free(msg);
+ dev_put(dev);
+
+ return ret;
+}
+
+static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
+ [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
+ .len = IFNAMSIZ-1 },
+ [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
+};
+
+static const struct genl_ops irda_nl_ops[] = {
+ {
+ .cmd = IRDA_NL_CMD_SET_MODE,
+ .doit = irda_nl_set_mode,
+ .policy = irda_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = IRDA_NL_CMD_GET_MODE,
+ .doit = irda_nl_get_mode,
+ .policy = irda_nl_policy,
+ /* can be retrieved by unprivileged users */
+ },
+
+};
+
+static struct genl_family irda_nl_family __ro_after_init = {
+ .name = IRDA_NL_NAME,
+ .hdrsize = 0,
+ .version = IRDA_NL_VERSION,
+ .maxattr = IRDA_NL_CMD_MAX,
+ .module = THIS_MODULE,
+ .ops = irda_nl_ops,
+ .n_ops = ARRAY_SIZE(irda_nl_ops),
+};
+
+int __init irda_nl_register(void)
+{
+ return genl_register_family(&irda_nl_family);
+}
+
+void irda_nl_unregister(void)
+{
+ genl_unregister_family(&irda_nl_family);
+}
diff --git a/drivers/staging/irda/net/irproc.c b/drivers/staging/irda/net/irproc.c
new file mode 100644
index 000000000000..77cfdde9d82f
--- /dev/null
+++ b/drivers/staging/irda/net/irproc.c
@@ -0,0 +1,96 @@
+/*********************************************************************
+ *
+ * Filename: irproc.c
+ * Version: 1.0
+ * Description: Various entries in the /proc file system
+ * Status: Experimental.
+ * Author: Thomas Davis, <ratbert@radiks.net>
+ * Created at: Sat Feb 21 21:33:24 1998
+ * Modified at: Sun Nov 14 08:54:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999, Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * I, Thomas Davis, provide no warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/net_namespace.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlmp.h>
+
+extern const struct file_operations discovery_seq_fops;
+extern const struct file_operations irlap_seq_fops;
+extern const struct file_operations irlmp_seq_fops;
+extern const struct file_operations irttp_seq_fops;
+extern const struct file_operations irias_seq_fops;
+
+struct irda_entry {
+ const char *name;
+ const struct file_operations *fops;
+};
+
+struct proc_dir_entry *proc_irda;
+EXPORT_SYMBOL(proc_irda);
+
+static const struct irda_entry irda_dirs[] = {
+ {"discovery", &discovery_seq_fops},
+ {"irttp", &irttp_seq_fops},
+ {"irlmp", &irlmp_seq_fops},
+ {"irlap", &irlap_seq_fops},
+ {"irias", &irias_seq_fops},
+};
+
+/*
+ * Function irda_proc_register (void)
+ *
+ * Register irda entry in /proc file system
+ *
+ */
+void __init irda_proc_register(void)
+{
+ int i;
+
+ proc_irda = proc_mkdir("irda", init_net.proc_net);
+ if (proc_irda == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
+ (void) proc_create(irda_dirs[i].name, 0, proc_irda,
+ irda_dirs[i].fops);
+}
+
+/*
+ * Function irda_proc_unregister (void)
+ *
+ * Unregister irda entry in /proc file system
+ *
+ */
+void irda_proc_unregister(void)
+{
+ int i;
+
+ if (proc_irda) {
+ for (i=0; i<ARRAY_SIZE(irda_dirs); i++)
+ remove_proc_entry(irda_dirs[i].name, proc_irda);
+
+ remove_proc_entry("irda", init_net.proc_net);
+ proc_irda = NULL;
+ }
+}
+
+
diff --git a/drivers/staging/irda/net/irqueue.c b/drivers/staging/irda/net/irqueue.c
new file mode 100644
index 000000000000..160dc89335e2
--- /dev/null
+++ b/drivers/staging/irda/net/irqueue.c
@@ -0,0 +1,911 @@
+/*********************************************************************
+ *
+ * Filename: irqueue.c
+ * Version: 0.3
+ * Description: General queue implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Jun 9 13:29:31 1998
+ * Modified at: Sun Dec 12 13:48:22 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Thu Jan 4 14:29:10 CET 2001
+ * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
+ *
+ * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
+ * Copyright (C) 1998, Dag Brattli,
+ * All Rights Reserved.
+ *
+ * This code is taken from the Vortex Operating System written by Aage
+ * Kvalnes. Aage has agreed that this code can use the GPL licence,
+ * although he does not use that licence in his own code.
+ *
+ * This copyright does however _not_ include the ELF hash() function
+ * which I currently don't know which licence or copyright it
+ * has. Please inform me if you know.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * NOTE :
+ * There are various problems with this package :
+ * o the hash function for ints is pathetic (but could be changed)
+ * o locking is sometime suspicious (especially during enumeration)
+ * o most users have only a few elements (== overhead)
+ * o most users never use search, so don't benefit from hashing
+ * Problem already fixed :
+ * o not 64 bit compliant (most users do hashv = (int) self)
+ * o hashbin_remove() is broken => use hashbin_remove_this()
+ * I think most users would be better served by a simple linked list
+ * (like include/linux/list.h) with a global spinlock per list.
+ * Jean II
+ */
+
+/*
+ * Notes on the concurrent access to hashbin and other SMP issues
+ * -------------------------------------------------------------
+ * Hashbins are very often in the IrDA stack a global repository of
+ * information, and therefore used in a very asynchronous manner following
+ * various events (driver calls, timers, user calls...).
+ * Therefore, very often it is highly important to consider the
+ * management of concurrent access to the hashbin and how to guarantee the
+ * consistency of the operations on it.
+ *
+ * First, we need to define the objective of locking :
+ * 1) Protect user data (content pointed by the hashbin)
+ * 2) Protect hashbin structure itself (linked list in each bin)
+ *
+ * OLD LOCKING
+ * -----------
+ *
+ * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
+ * both inadequate in *both* aspect.
+ * o HB_GLOBAL was using a spinlock for each bin (local locking).
+ * o HB_LOCAL was disabling irq on *all* CPUs, so use a single
+ * global semaphore.
+ * The problems were :
+ * A) Global irq disabling is no longer supported by the kernel
+ * B) No protection for the hashbin struct global data
+ * o hashbin_delete()
+ * o hb_current
+ * C) No protection for user data in some cases
+ *
+ * A) HB_LOCAL use global irq disabling, so doesn't work on kernel
+ * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
+ * performance is not satisfactory on SMP setups. Most hashbins were
+ * HB_LOCAL, so (A) definitely need fixing.
+ * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
+ * lock only the individual bins, it will never be able to lock the
+ * global data, so can't do (B).
+ * C) Some functions return pointer to data that is still in the
+ * hashbin :
+ * o hashbin_find()
+ * o hashbin_get_first()
+ * o hashbin_get_next()
+ * As the data is still in the hashbin, it may be changed or free'd
+ * while the caller is examinimg the data. In those case, locking can't
+ * be done within the hashbin, but must include use of the data within
+ * the caller.
+ * The caller can easily do this with HB_LOCAL (just disable irqs).
+ * However, this is impossible with HB_GLOBAL because the caller has no
+ * way to know the proper bin, so don't know which spinlock to use.
+ *
+ * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
+ * fundamentally broken and will never work.
+ *
+ * NEW LOCKING
+ * -----------
+ *
+ * To fix those problems, I've introduce a few changes in the
+ * hashbin locking :
+ * 1) New HB_LOCK scheme
+ * 2) hashbin->hb_spinlock
+ * 3) New hashbin usage policy
+ *
+ * HB_LOCK :
+ * -------
+ * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
+ * and HB_GLOBAL. It uses a single spinlock to protect the whole content
+ * of the hashbin. As it is a single spinlock, it can protect the global
+ * data of the hashbin and not only the bins themselves.
+ * HB_LOCK can only protect some of the hashbin calls, so it only lock
+ * call that can be made 100% safe and leave other call unprotected.
+ * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
+ * content is always small contention is not high, so it doesn't matter
+ * much. HB_LOCK is probably faster than HB_LOCAL.
+ *
+ * hashbin->hb_spinlock :
+ * --------------------
+ * The spinlock that HB_LOCK uses is available for caller, so that
+ * the caller can protect unprotected calls (see below).
+ * If the caller want to do entirely its own locking (HB_NOLOCK), he
+ * can do so and may use safely this spinlock.
+ * Locking is done like this :
+ * spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ * Releasing the lock :
+ * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ *
+ * Safe & Protected calls :
+ * ----------------------
+ * The following calls are safe or protected via HB_LOCK :
+ * o hashbin_new() -> safe
+ * o hashbin_delete()
+ * o hashbin_insert()
+ * o hashbin_remove_first()
+ * o hashbin_remove()
+ * o hashbin_remove_this()
+ * o HASHBIN_GET_SIZE() -> atomic
+ *
+ * The following calls only protect the hashbin itself :
+ * o hashbin_lock_find()
+ * o hashbin_find_next()
+ *
+ * Unprotected calls :
+ * -----------------
+ * The following calls need to be protected by the caller :
+ * o hashbin_find()
+ * o hashbin_get_first()
+ * o hashbin_get_next()
+ *
+ * Locking Policy :
+ * --------------
+ * If the hashbin is used only in a single thread of execution
+ * (explicitly or implicitely), you can use HB_NOLOCK
+ * If the calling module already provide concurrent access protection,
+ * you may use HB_NOLOCK.
+ *
+ * In all other cases, you need to use HB_LOCK and lock the hashbin
+ * every time before calling one of the unprotected calls. You also must
+ * use the pointer returned by the unprotected call within the locked
+ * region.
+ *
+ * Extra care for enumeration :
+ * --------------------------
+ * hashbin_get_first() and hashbin_get_next() use the hashbin to
+ * store the current position, in hb_current.
+ * As long as the hashbin remains locked, this is safe. If you unlock
+ * the hashbin, the current position may change if anybody else modify
+ * or enumerate the hashbin.
+ * Summary : do the full enumeration while locked.
+ *
+ * Alternatively, you may use hashbin_find_next(). But, this will
+ * be slower, is more complex to use and doesn't protect the hashbin
+ * content. So, care is needed here as well.
+ *
+ * Other issues :
+ * ------------
+ * I believe that we are overdoing it by using spin_lock_irqsave()
+ * and we should use only spin_lock_bh() or similar. But, I don't have
+ * the balls to try it out.
+ * Don't believe that because hashbin are now (somewhat) SMP safe
+ * that the rest of the code is. Higher layers tend to be safest,
+ * but LAP and LMP would need some serious dedicated love.
+ *
+ * Jean II
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irqueue.h>
+
+/************************ QUEUE SUBROUTINES ************************/
+
+/*
+ * Hashbin
+ */
+#define GET_HASHBIN(x) ( x & HASHBIN_MASK )
+
+/*
+ * Function hash (name)
+ *
+ * This function hash the input string 'name' using the ELF hash
+ * function for strings.
+ */
+static __u32 hash( const char* name)
+{
+ __u32 h = 0;
+ __u32 g;
+
+ while(*name) {
+ h = (h<<4) + *name++;
+ if ((g = (h & 0xf0000000)))
+ h ^=g>>24;
+ h &=~g;
+ }
+ return h;
+}
+
+/*
+ * Function enqueue_first (queue, proc)
+ *
+ * Insert item first in queue.
+ *
+ */
+static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
+{
+
+ /*
+ * Check if queue is empty.
+ */
+ if ( *queue == NULL ) {
+ /*
+ * Queue is empty. Insert one element into the queue.
+ */
+ element->q_next = element->q_prev = *queue = element;
+
+ } else {
+ /*
+ * Queue is not empty. Insert element into front of queue.
+ */
+ element->q_next = (*queue);
+ (*queue)->q_prev->q_next = element;
+ element->q_prev = (*queue)->q_prev;
+ (*queue)->q_prev = element;
+ (*queue) = element;
+ }
+}
+
+
+/*
+ * Function dequeue (queue)
+ *
+ * Remove first entry in queue
+ *
+ */
+static irda_queue_t *dequeue_first(irda_queue_t **queue)
+{
+ irda_queue_t *ret;
+
+ pr_debug("dequeue_first()\n");
+
+ /*
+ * Set return value
+ */
+ ret = *queue;
+
+ if ( *queue == NULL ) {
+ /*
+ * Queue was empty.
+ */
+ } else if ( (*queue)->q_next == *queue ) {
+ /*
+ * Queue only contained a single element. It will now be
+ * empty.
+ */
+ *queue = NULL;
+ } else {
+ /*
+ * Queue contained several element. Remove the first one.
+ */
+ (*queue)->q_prev->q_next = (*queue)->q_next;
+ (*queue)->q_next->q_prev = (*queue)->q_prev;
+ *queue = (*queue)->q_next;
+ }
+
+ /*
+ * Return the removed entry (or NULL of queue was empty).
+ */
+ return ret;
+}
+
+/*
+ * Function dequeue_general (queue, element)
+ *
+ *
+ */
+static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
+{
+ irda_queue_t *ret;
+
+ pr_debug("dequeue_general()\n");
+
+ /*
+ * Set return value
+ */
+ ret = *queue;
+
+ if ( *queue == NULL ) {
+ /*
+ * Queue was empty.
+ */
+ } else if ( (*queue)->q_next == *queue ) {
+ /*
+ * Queue only contained a single element. It will now be
+ * empty.
+ */
+ *queue = NULL;
+
+ } else {
+ /*
+ * Remove specific element.
+ */
+ element->q_prev->q_next = element->q_next;
+ element->q_next->q_prev = element->q_prev;
+ if ( (*queue) == element)
+ (*queue) = element->q_next;
+ }
+
+ /*
+ * Return the removed entry (or NULL of queue was empty).
+ */
+ return ret;
+}
+
+/************************ HASHBIN MANAGEMENT ************************/
+
+/*
+ * Function hashbin_create ( type, name )
+ *
+ * Create hashbin!
+ *
+ */
+hashbin_t *hashbin_new(int type)
+{
+ hashbin_t* hashbin;
+
+ /*
+ * Allocate new hashbin
+ */
+ hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC);
+ if (!hashbin)
+ return NULL;
+
+ /*
+ * Initialize structure
+ */
+ hashbin->hb_type = type;
+ hashbin->magic = HB_MAGIC;
+ //hashbin->hb_current = NULL;
+
+ /* Make sure all spinlock's are unlocked */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_init(&hashbin->hb_spinlock);
+ }
+
+ return hashbin;
+}
+EXPORT_SYMBOL(hashbin_new);
+
+
+/*
+ * Function hashbin_delete (hashbin, free_func)
+ *
+ * Destroy hashbin, the free_func can be a user supplied special routine
+ * for deallocating this structure if it's complex. If not the user can
+ * just supply kfree, which should take care of the job.
+ */
+int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+{
+ irda_queue_t* queue;
+ unsigned long flags = 0;
+ int i;
+
+ IRDA_ASSERT(hashbin != NULL, return -1;);
+ IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
+
+ /* Synchronize */
+ if (hashbin->hb_type & HB_LOCK)
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+
+ /*
+ * Free the entries in the hashbin, TODO: use hashbin_clear when
+ * it has been shown to work
+ */
+ for (i = 0; i < HASHBIN_SIZE; i ++ ) {
+ while (1) {
+ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+
+ if (!queue)
+ break;
+
+ if (free_func) {
+ if (hashbin->hb_type & HB_LOCK)
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ free_func(queue);
+ if (hashbin->hb_type & HB_LOCK)
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ }
+ }
+ }
+
+ /* Cleanup local data */
+ hashbin->hb_current = NULL;
+ hashbin->magic = ~HB_MAGIC;
+
+ /* Release lock */
+ if (hashbin->hb_type & HB_LOCK)
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+
+ /*
+ * Free the hashbin structure
+ */
+ kfree(hashbin);
+
+ return 0;
+}
+EXPORT_SYMBOL(hashbin_delete);
+
+/********************* HASHBIN LIST OPERATIONS *********************/
+
+/*
+ * Function hashbin_insert (hashbin, entry, name)
+ *
+ * Insert an entry into the hashbin
+ *
+ */
+void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
+ const char* name)
+{
+ unsigned long flags = 0;
+ int bin;
+
+ IRDA_ASSERT( hashbin != NULL, return;);
+ IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
+
+ /*
+ * Locate hashbin
+ */
+ if ( name )
+ hashv = hash( name );
+ bin = GET_HASHBIN( hashv );
+
+ /* Synchronize */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+ /*
+ * Store name and key
+ */
+ entry->q_hash = hashv;
+ if ( name )
+ strlcpy( entry->q_name, name, sizeof(entry->q_name));
+
+ /*
+ * Insert new entry first
+ */
+ enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ],
+ entry);
+ hashbin->hb_size++;
+
+ /* Release lock */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+}
+EXPORT_SYMBOL(hashbin_insert);
+
+/*
+ * Function hashbin_remove_first (hashbin)
+ *
+ * Remove first entry of the hashbin
+ *
+ * Note : this function no longer use hashbin_remove(), but does things
+ * similar to hashbin_remove_this(), so can be considered safe.
+ * Jean II
+ */
+void *hashbin_remove_first( hashbin_t *hashbin)
+{
+ unsigned long flags = 0;
+ irda_queue_t *entry = NULL;
+
+ /* Synchronize */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+ entry = hashbin_get_first( hashbin);
+ if ( entry != NULL) {
+ int bin;
+ long hashv;
+ /*
+ * Locate hashbin
+ */
+ hashv = entry->q_hash;
+ bin = GET_HASHBIN( hashv );
+
+ /*
+ * Dequeue the entry...
+ */
+ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
+ entry);
+ hashbin->hb_size--;
+ entry->q_next = NULL;
+ entry->q_prev = NULL;
+
+ /*
+ * Check if this item is the currently selected item, and in
+ * that case we must reset hb_current
+ */
+ if ( entry == hashbin->hb_current)
+ hashbin->hb_current = NULL;
+ }
+
+ /* Release lock */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+ return entry;
+}
+
+
+/*
+ * Function hashbin_remove (hashbin, hashv, name)
+ *
+ * Remove entry with the given name
+ *
+ * The use of this function is highly discouraged, because the whole
+ * concept behind hashbin_remove() is broken. In many cases, it's not
+ * possible to guarantee the unicity of the index (either hashv or name),
+ * leading to removing the WRONG entry.
+ * The only simple safe use is :
+ * hashbin_remove(hasbin, (int) self, NULL);
+ * In other case, you must think hard to guarantee unicity of the index.
+ * Jean II
+ */
+void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
+{
+ int bin, found = FALSE;
+ unsigned long flags = 0;
+ irda_queue_t* entry;
+
+ IRDA_ASSERT( hashbin != NULL, return NULL;);
+ IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
+
+ /*
+ * Locate hashbin
+ */
+ if ( name )
+ hashv = hash( name );
+ bin = GET_HASHBIN( hashv );
+
+ /* Synchronize */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+ /*
+ * Search for entry
+ */
+ entry = hashbin->hb_queue[ bin ];
+ if ( entry ) {
+ do {
+ /*
+ * Check for key
+ */
+ if ( entry->q_hash == hashv ) {
+ /*
+ * Name compare too?
+ */
+ if ( name ) {
+ if ( strcmp( entry->q_name, name) == 0)
+ {
+ found = TRUE;
+ break;
+ }
+ } else {
+ found = TRUE;
+ break;
+ }
+ }
+ entry = entry->q_next;
+ } while ( entry != hashbin->hb_queue[ bin ] );
+ }
+
+ /*
+ * If entry was found, dequeue it
+ */
+ if ( found ) {
+ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
+ entry);
+ hashbin->hb_size--;
+
+ /*
+ * Check if this item is the currently selected item, and in
+ * that case we must reset hb_current
+ */
+ if ( entry == hashbin->hb_current)
+ hashbin->hb_current = NULL;
+ }
+
+ /* Release lock */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+
+ /* Return */
+ if ( found )
+ return entry;
+ else
+ return NULL;
+
+}
+EXPORT_SYMBOL(hashbin_remove);
+
+/*
+ * Function hashbin_remove_this (hashbin, entry)
+ *
+ * Remove entry with the given name
+ *
+ * In some cases, the user of hashbin can't guarantee the unicity
+ * of either the hashv or name.
+ * In those cases, using the above function is guaranteed to cause troubles,
+ * so we use this one instead...
+ * And by the way, it's also faster, because we skip the search phase ;-)
+ */
+void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
+{
+ unsigned long flags = 0;
+ int bin;
+ long hashv;
+
+ IRDA_ASSERT( hashbin != NULL, return NULL;);
+ IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
+ IRDA_ASSERT( entry != NULL, return NULL;);
+
+ /* Synchronize */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+ /* Check if valid and not already removed... */
+ if((entry->q_next == NULL) || (entry->q_prev == NULL)) {
+ entry = NULL;
+ goto out;
+ }
+
+ /*
+ * Locate hashbin
+ */
+ hashv = entry->q_hash;
+ bin = GET_HASHBIN( hashv );
+
+ /*
+ * Dequeue the entry...
+ */
+ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
+ entry);
+ hashbin->hb_size--;
+ entry->q_next = NULL;
+ entry->q_prev = NULL;
+
+ /*
+ * Check if this item is the currently selected item, and in
+ * that case we must reset hb_current
+ */
+ if ( entry == hashbin->hb_current)
+ hashbin->hb_current = NULL;
+out:
+ /* Release lock */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ } /* Default is no-lock */
+
+ return entry;
+}
+EXPORT_SYMBOL(hashbin_remove_this);
+
+/*********************** HASHBIN ENUMERATION ***********************/
+
+/*
+ * Function hashbin_common_find (hashbin, hashv, name)
+ *
+ * Find item with the given hashv or name
+ *
+ */
+void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
+{
+ int bin;
+ irda_queue_t* entry;
+
+ pr_debug("hashbin_find()\n");
+
+ IRDA_ASSERT( hashbin != NULL, return NULL;);
+ IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
+
+ /*
+ * Locate hashbin
+ */
+ if ( name )
+ hashv = hash( name );
+ bin = GET_HASHBIN( hashv );
+
+ /*
+ * Search for entry
+ */
+ entry = hashbin->hb_queue[ bin];
+ if ( entry ) {
+ do {
+ /*
+ * Check for key
+ */
+ if ( entry->q_hash == hashv ) {
+ /*
+ * Name compare too?
+ */
+ if ( name ) {
+ if ( strcmp( entry->q_name, name ) == 0 ) {
+ return entry;
+ }
+ } else {
+ return entry;
+ }
+ }
+ entry = entry->q_next;
+ } while ( entry != hashbin->hb_queue[ bin ] );
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(hashbin_find);
+
+/*
+ * Function hashbin_lock_find (hashbin, hashv, name)
+ *
+ * Find item with the given hashv or name
+ *
+ * Same, but with spinlock protection...
+ * I call it safe, but it's only safe with respect to the hashbin, not its
+ * content. - Jean II
+ */
+void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name )
+{
+ unsigned long flags = 0;
+ irda_queue_t* entry;
+
+ /* Synchronize */
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+
+ /*
+ * Search for entry
+ */
+ entry = hashbin_find(hashbin, hashv, name);
+
+ /* Release lock */
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+
+ return entry;
+}
+EXPORT_SYMBOL(hashbin_lock_find);
+
+/*
+ * Function hashbin_find (hashbin, hashv, name, pnext)
+ *
+ * Find an item with the given hashv or name, and its successor
+ *
+ * This function allow to do concurrent enumerations without the
+ * need to lock over the whole session, because the caller keep the
+ * context of the search. On the other hand, it might fail and return
+ * NULL if the entry is removed. - Jean II
+ */
+void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
+ void ** pnext)
+{
+ unsigned long flags = 0;
+ irda_queue_t* entry;
+
+ /* Synchronize */
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+
+ /*
+ * Search for current entry
+ * This allow to check if the current item is still in the
+ * hashbin or has been removed.
+ */
+ entry = hashbin_find(hashbin, hashv, name);
+
+ /*
+ * Trick hashbin_get_next() to return what we want
+ */
+ if(entry) {
+ hashbin->hb_current = entry;
+ *pnext = hashbin_get_next( hashbin );
+ } else
+ *pnext = NULL;
+
+ /* Release lock */
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+
+ return entry;
+}
+
+/*
+ * Function hashbin_get_first (hashbin)
+ *
+ * Get a pointer to first element in hashbin, this function must be
+ * called before any calls to hashbin_get_next()!
+ *
+ */
+irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
+{
+ irda_queue_t *entry;
+ int i;
+
+ IRDA_ASSERT( hashbin != NULL, return NULL;);
+ IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
+
+ if ( hashbin == NULL)
+ return NULL;
+
+ for ( i = 0; i < HASHBIN_SIZE; i ++ ) {
+ entry = hashbin->hb_queue[ i];
+ if ( entry) {
+ hashbin->hb_current = entry;
+ return entry;
+ }
+ }
+ /*
+ * Did not find any item in hashbin
+ */
+ return NULL;
+}
+EXPORT_SYMBOL(hashbin_get_first);
+
+/*
+ * Function hashbin_get_next (hashbin)
+ *
+ * Get next item in hashbin. A series of hashbin_get_next() calls must
+ * be started by a call to hashbin_get_first(). The function returns
+ * NULL when all items have been traversed
+ *
+ * The context of the search is stored within the hashbin, so you must
+ * protect yourself from concurrent enumerations. - Jean II
+ */
+irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
+{
+ irda_queue_t* entry;
+ int bin;
+ int i;
+
+ IRDA_ASSERT( hashbin != NULL, return NULL;);
+ IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
+
+ if ( hashbin->hb_current == NULL) {
+ IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
+ return NULL;
+ }
+ entry = hashbin->hb_current->q_next;
+ bin = GET_HASHBIN( entry->q_hash);
+
+ /*
+ * Make sure that we are not back at the beginning of the queue
+ * again
+ */
+ if ( entry != hashbin->hb_queue[ bin ]) {
+ hashbin->hb_current = entry;
+
+ return entry;
+ }
+
+ /*
+ * Check that this is not the last queue in hashbin
+ */
+ if ( bin >= HASHBIN_SIZE)
+ return NULL;
+
+ /*
+ * Move to next queue in hashbin
+ */
+ bin++;
+ for ( i = bin; i < HASHBIN_SIZE; i++ ) {
+ entry = hashbin->hb_queue[ i];
+ if ( entry) {
+ hashbin->hb_current = entry;
+
+ return entry;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(hashbin_get_next);
diff --git a/drivers/staging/irda/net/irsysctl.c b/drivers/staging/irda/net/irsysctl.c
new file mode 100644
index 000000000000..873da5e7d428
--- /dev/null
+++ b/drivers/staging/irda/net/irsysctl.c
@@ -0,0 +1,258 @@
+/*********************************************************************
+ *
+ * Filename: irsysctl.c
+ * Version: 1.0
+ * Description: Sysctl interface for IrDA
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun May 24 22:12:06 1998
+ * Modified at: Fri Jun 4 02:50:15 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/mm.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h> /* irda_debug */
+#include <net/irda/irlmp.h>
+#include <net/irda/timer.h>
+#include <net/irda/irias_object.h>
+
+extern int sysctl_discovery;
+extern int sysctl_discovery_slots;
+extern int sysctl_discovery_timeout;
+extern int sysctl_slot_timeout;
+extern int sysctl_fast_poll_increase;
+extern char sysctl_devname[];
+extern int sysctl_max_baud_rate;
+extern unsigned int sysctl_min_tx_turn_time;
+extern unsigned int sysctl_max_tx_data_size;
+extern unsigned int sysctl_max_tx_window;
+extern int sysctl_max_noreply_time;
+extern int sysctl_warn_noreply_time;
+extern int sysctl_lap_keepalive_time;
+
+extern struct irlmp_cb *irlmp;
+
+/* this is needed for the proc_dointvec_minmax - Jean II */
+static int max_discovery_slots = 16; /* ??? */
+static int min_discovery_slots = 1;
+/* IrLAP 6.13.2 says 25ms to 10+70ms - allow higher since some devices
+ * seems to require it. (from Dag's comment) */
+static int max_slot_timeout = 160;
+static int min_slot_timeout = 20;
+static int max_max_baud_rate = 16000000; /* See qos.c - IrLAP spec */
+static int min_max_baud_rate = 2400;
+static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */
+static int min_min_tx_turn_time;
+static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */
+static int min_max_tx_data_size = 64;
+static int max_max_tx_window = 7; /* See qos.c - IrLAP spec */
+static int min_max_tx_window = 1;
+static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */
+static int min_max_noreply_time = 3;
+static int max_warn_noreply_time = 3; /* 3s == standard */
+static int min_warn_noreply_time = 1; /* 1s == min WD_TIMER */
+static int max_lap_keepalive_time = 10000; /* 10s */
+static int min_lap_keepalive_time = 100; /* 100us */
+/* For other sysctl, I've no idea of the range. Maybe Dag could help
+ * us on that - Jean II */
+
+static int do_devname(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dostring(table, write, buffer, lenp, ppos);
+ if (ret == 0 && write) {
+ struct ias_value *val;
+
+ val = irias_new_string_value(sysctl_devname);
+ if (val)
+ irias_object_change_attribute("Device", "DeviceName", val);
+ }
+ return ret;
+}
+
+
+static int do_discovery(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+
+ if (irlmp == NULL)
+ return -ENODEV;
+
+ if (sysctl_discovery)
+ irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout*HZ);
+ else
+ del_timer_sync(&irlmp->discovery_timer);
+
+ return ret;
+}
+
+/* One file */
+static struct ctl_table irda_table[] = {
+ {
+ .procname = "discovery",
+ .data = &sysctl_discovery,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = do_discovery,
+ },
+ {
+ .procname = "devname",
+ .data = sysctl_devname,
+ .maxlen = 65,
+ .mode = 0644,
+ .proc_handler = do_devname,
+ },
+#ifdef CONFIG_IRDA_FAST_RR
+ {
+ .procname = "fast_poll_increase",
+ .data = &sysctl_fast_poll_increase,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
+ {
+ .procname = "discovery_slots",
+ .data = &sysctl_discovery_slots,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_discovery_slots,
+ .extra2 = &max_discovery_slots
+ },
+ {
+ .procname = "discovery_timeout",
+ .data = &sysctl_discovery_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "slot_timeout",
+ .data = &sysctl_slot_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_slot_timeout,
+ .extra2 = &max_slot_timeout
+ },
+ {
+ .procname = "max_baud_rate",
+ .data = &sysctl_max_baud_rate,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_max_baud_rate,
+ .extra2 = &max_max_baud_rate
+ },
+ {
+ .procname = "min_tx_turn_time",
+ .data = &sysctl_min_tx_turn_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_min_tx_turn_time,
+ .extra2 = &max_min_tx_turn_time
+ },
+ {
+ .procname = "max_tx_data_size",
+ .data = &sysctl_max_tx_data_size,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_max_tx_data_size,
+ .extra2 = &max_max_tx_data_size
+ },
+ {
+ .procname = "max_tx_window",
+ .data = &sysctl_max_tx_window,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_max_tx_window,
+ .extra2 = &max_max_tx_window
+ },
+ {
+ .procname = "max_noreply_time",
+ .data = &sysctl_max_noreply_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_max_noreply_time,
+ .extra2 = &max_max_noreply_time
+ },
+ {
+ .procname = "warn_noreply_time",
+ .data = &sysctl_warn_noreply_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_warn_noreply_time,
+ .extra2 = &max_warn_noreply_time
+ },
+ {
+ .procname = "lap_keepalive_time",
+ .data = &sysctl_lap_keepalive_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_lap_keepalive_time,
+ .extra2 = &max_lap_keepalive_time
+ },
+ { }
+};
+
+static struct ctl_table_header *irda_table_header;
+
+/*
+ * Function irda_sysctl_register (void)
+ *
+ * Register our sysctl interface
+ *
+ */
+int __init irda_sysctl_register(void)
+{
+ irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table);
+ if (!irda_table_header)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Function irda_sysctl_unregister (void)
+ *
+ * Unregister our sysctl interface
+ *
+ */
+void irda_sysctl_unregister(void)
+{
+ unregister_net_sysctl_table(irda_table_header);
+}
+
+
+
diff --git a/drivers/staging/irda/net/irttp.c b/drivers/staging/irda/net/irttp.c
new file mode 100644
index 000000000000..b6ab41d5b3a3
--- /dev/null
+++ b/drivers/staging/irda/net/irttp.c
@@ -0,0 +1,1891 @@
+/*********************************************************************
+ *
+ * Filename: irttp.c
+ * Version: 1.2
+ * Description: Tiny Transport Protocol (TTP) implementation
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:31 1997
+ * Modified at: Wed Jan 5 11:31:27 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/parameters.h>
+#include <net/irda/irttp.h>
+
+static struct irttp_cb *irttp;
+
+static void __irttp_close_tsap(struct tsap_cb *self);
+
+static int irttp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static int irttp_udata_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static void irttp_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *);
+static void irttp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 header_size, struct sk_buff *skb);
+static void irttp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 header_size, struct sk_buff *skb);
+static void irttp_run_tx_queue(struct tsap_cb *self);
+static void irttp_run_rx_queue(struct tsap_cb *self);
+
+static void irttp_flush_queues(struct tsap_cb *self);
+static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
+static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
+static void irttp_todo_expired(unsigned long data);
+static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
+ int get);
+
+static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
+static void irttp_status_indication(void *instance,
+ LINK_STATUS link, LOCK_STATUS lock);
+
+/* Information for parsing parameters in IrTTP */
+static const pi_minor_info_t pi_minor_call_table[] = {
+ { NULL, 0 }, /* 0x00 */
+ { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
+};
+static const pi_major_info_t pi_major_call_table[] = {
+ { pi_minor_call_table, 2 }
+};
+static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
+
+/************************ GLOBAL PROCEDURES ************************/
+
+/*
+ * Function irttp_init (void)
+ *
+ * Initialize the IrTTP layer. Called by module initialization code
+ *
+ */
+int __init irttp_init(void)
+{
+ irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
+ if (irttp == NULL)
+ return -ENOMEM;
+
+ irttp->magic = TTP_MAGIC;
+
+ irttp->tsaps = hashbin_new(HB_LOCK);
+ if (!irttp->tsaps) {
+ net_err_ratelimited("%s: can't allocate IrTTP hashbin!\n",
+ __func__);
+ kfree(irttp);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Function irttp_cleanup (void)
+ *
+ * Called by module destruction/cleanup code
+ *
+ */
+void irttp_cleanup(void)
+{
+ /* Check for main structure */
+ IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
+
+ /*
+ * Delete hashbin and close all TSAP instances in it
+ */
+ hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
+
+ irttp->magic = 0;
+
+ /* De-allocate main structure */
+ kfree(irttp);
+
+ irttp = NULL;
+}
+
+/*************************** SUBROUTINES ***************************/
+
+/*
+ * Function irttp_start_todo_timer (self, timeout)
+ *
+ * Start todo timer.
+ *
+ * Made it more effient and unsensitive to race conditions - Jean II
+ */
+static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
+{
+ /* Set new value for timer */
+ mod_timer(&self->todo_timer, jiffies + timeout);
+}
+
+/*
+ * Function irttp_todo_expired (data)
+ *
+ * Todo timer has expired!
+ *
+ * One of the restriction of the timer is that it is run only on the timer
+ * interrupt which run every 10ms. This mean that even if you set the timer
+ * with a delay of 0, it may take up to 10ms before it's run.
+ * So, to minimise latency and keep cache fresh, we try to avoid using
+ * it as much as possible.
+ * Note : we can't use tasklets, because they can't be asynchronously
+ * killed (need user context), and we can't guarantee that here...
+ * Jean II
+ */
+static void irttp_todo_expired(unsigned long data)
+{
+ struct tsap_cb *self = (struct tsap_cb *) data;
+
+ /* Check that we still exist */
+ if (!self || self->magic != TTP_TSAP_MAGIC)
+ return;
+
+ pr_debug("%s(instance=%p)\n", __func__, self);
+
+ /* Try to make some progress, especially on Tx side - Jean II */
+ irttp_run_rx_queue(self);
+ irttp_run_tx_queue(self);
+
+ /* Check if time for disconnect */
+ if (test_bit(0, &self->disconnect_pend)) {
+ /* Check if it's possible to disconnect yet */
+ if (skb_queue_empty(&self->tx_queue)) {
+ /* Make sure disconnect is not pending anymore */
+ clear_bit(0, &self->disconnect_pend); /* FALSE */
+
+ /* Note : self->disconnect_skb may be NULL */
+ irttp_disconnect_request(self, self->disconnect_skb,
+ P_NORMAL);
+ self->disconnect_skb = NULL;
+ } else {
+ /* Try again later */
+ irttp_start_todo_timer(self, HZ/10);
+
+ /* No reason to try and close now */
+ return;
+ }
+ }
+
+ /* Check if it's closing time */
+ if (self->close_pend)
+ /* Finish cleanup */
+ irttp_close_tsap(self);
+}
+
+/*
+ * Function irttp_flush_queues (self)
+ *
+ * Flushes (removes all frames) in transitt-buffer (tx_list)
+ */
+static void irttp_flush_queues(struct tsap_cb *self)
+{
+ struct sk_buff *skb;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ /* Deallocate frames waiting to be sent */
+ while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
+ dev_kfree_skb(skb);
+
+ /* Deallocate received frames */
+ while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
+ dev_kfree_skb(skb);
+
+ /* Deallocate received fragments */
+ while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function irttp_reassemble (self)
+ *
+ * Makes a new (continuous) skb of all the fragments in the fragment
+ * queue
+ *
+ */
+static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
+{
+ struct sk_buff *skb, *frag;
+ int n = 0; /* Fragment index */
+
+ IRDA_ASSERT(self != NULL, return NULL;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
+
+ pr_debug("%s(), self->rx_sdu_size=%d\n", __func__,
+ self->rx_sdu_size);
+
+ skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
+ if (!skb)
+ return NULL;
+
+ /*
+ * Need to reserve space for TTP header in case this skb needs to
+ * be requeued in case delivery failes
+ */
+ skb_reserve(skb, TTP_HEADER);
+ skb_put(skb, self->rx_sdu_size);
+
+ /*
+ * Copy all fragments to a new buffer
+ */
+ while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
+ skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
+ n += frag->len;
+
+ dev_kfree_skb(frag);
+ }
+
+ pr_debug("%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
+ __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
+ /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
+ * by summing the size of all fragments, so we should always
+ * have n == self->rx_sdu_size, except in cases where we
+ * droped the last fragment (when self->rx_sdu_size exceed
+ * self->rx_max_sdu_size), where n < self->rx_sdu_size.
+ * Jean II */
+ IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
+
+ /* Set the new length */
+ skb_trim(skb, n);
+
+ self->rx_sdu_size = 0;
+
+ return skb;
+}
+
+/*
+ * Function irttp_fragment_skb (skb)
+ *
+ * Fragments a frame and queues all the fragments for transmission
+ *
+ */
+static inline void irttp_fragment_skb(struct tsap_cb *self,
+ struct sk_buff *skb)
+{
+ struct sk_buff *frag;
+ __u8 *frame;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ /*
+ * Split frame into a number of segments
+ */
+ while (skb->len > self->max_seg_size) {
+ pr_debug("%s(), fragmenting ...\n", __func__);
+
+ /* Make new segment */
+ frag = alloc_skb(self->max_seg_size+self->max_header_size,
+ GFP_ATOMIC);
+ if (!frag)
+ return;
+
+ skb_reserve(frag, self->max_header_size);
+
+ /* Copy data from the original skb into this fragment. */
+ skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
+ self->max_seg_size);
+
+ /* Insert TTP header, with the more bit set */
+ frame = skb_push(frag, TTP_HEADER);
+ frame[0] = TTP_MORE;
+
+ /* Hide the copied data from the original skb */
+ skb_pull(skb, self->max_seg_size);
+
+ /* Queue fragment */
+ skb_queue_tail(&self->tx_queue, frag);
+ }
+ /* Queue what is left of the original skb */
+ pr_debug("%s(), queuing last segment\n", __func__);
+
+ frame = skb_push(skb, TTP_HEADER);
+ frame[0] = 0x00; /* Clear more bit */
+
+ /* Queue fragment */
+ skb_queue_tail(&self->tx_queue, skb);
+}
+
+/*
+ * Function irttp_param_max_sdu_size (self, param)
+ *
+ * Handle the MaxSduSize parameter in the connect frames, this function
+ * will be called both when this parameter needs to be inserted into, and
+ * extracted from the connect frames
+ */
+static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
+ int get)
+{
+ struct tsap_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->tx_max_sdu_size;
+ else
+ self->tx_max_sdu_size = param->pv.i;
+
+ pr_debug("%s(), MaxSduSize=%d\n", __func__, param->pv.i);
+
+ return 0;
+}
+
+/*************************** CLIENT CALLS ***************************/
+/************************** LMP CALLBACKS **************************/
+/* Everything is happily mixed up. Waiting for next clean up - Jean II */
+
+/*
+ * Initialization, that has to be done on new tsap
+ * instance allocation and on duplication
+ */
+static void irttp_init_tsap(struct tsap_cb *tsap)
+{
+ spin_lock_init(&tsap->lock);
+ init_timer(&tsap->todo_timer);
+
+ skb_queue_head_init(&tsap->rx_queue);
+ skb_queue_head_init(&tsap->tx_queue);
+ skb_queue_head_init(&tsap->rx_fragments);
+}
+
+/*
+ * Function irttp_open_tsap (stsap, notify)
+ *
+ * Create TSAP connection endpoint,
+ */
+struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
+{
+ struct tsap_cb *self;
+ struct lsap_cb *lsap;
+ notify_t ttp_notify;
+
+ IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
+
+ /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
+ * use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
+ * JeanII */
+ if ((stsap_sel != LSAP_ANY) &&
+ ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
+ pr_debug("%s(), invalid tsap!\n", __func__);
+ return NULL;
+ }
+
+ self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+ if (self == NULL)
+ return NULL;
+
+ /* Initialize internal objects */
+ irttp_init_tsap(self);
+
+ /* Initialise todo timer */
+ self->todo_timer.data = (unsigned long) self;
+ self->todo_timer.function = &irttp_todo_expired;
+
+ /* Initialize callbacks for IrLMP to use */
+ irda_notify_init(&ttp_notify);
+ ttp_notify.connect_confirm = irttp_connect_confirm;
+ ttp_notify.connect_indication = irttp_connect_indication;
+ ttp_notify.disconnect_indication = irttp_disconnect_indication;
+ ttp_notify.data_indication = irttp_data_indication;
+ ttp_notify.udata_indication = irttp_udata_indication;
+ ttp_notify.flow_indication = irttp_flow_indication;
+ if (notify->status_indication != NULL)
+ ttp_notify.status_indication = irttp_status_indication;
+ ttp_notify.instance = self;
+ strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
+
+ self->magic = TTP_TSAP_MAGIC;
+ self->connected = FALSE;
+
+ /*
+ * Create LSAP at IrLMP layer
+ */
+ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
+ if (lsap == NULL) {
+ pr_debug("%s: unable to allocate LSAP!!\n", __func__);
+ __irttp_close_tsap(self);
+ return NULL;
+ }
+
+ /*
+ * If user specified LSAP_ANY as source TSAP selector, then IrLMP
+ * will replace it with whatever source selector which is free, so
+ * the stsap_sel we have might not be valid anymore
+ */
+ self->stsap_sel = lsap->slsap_sel;
+ pr_debug("%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
+
+ self->notify = *notify;
+ self->lsap = lsap;
+
+ hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
+
+ if (credit > TTP_RX_MAX_CREDIT)
+ self->initial_credit = TTP_RX_MAX_CREDIT;
+ else
+ self->initial_credit = credit;
+
+ return self;
+}
+EXPORT_SYMBOL(irttp_open_tsap);
+
+/*
+ * Function irttp_close (handle)
+ *
+ * Remove an instance of a TSAP. This function should only deal with the
+ * deallocation of the TSAP, and resetting of the TSAPs values;
+ *
+ */
+static void __irttp_close_tsap(struct tsap_cb *self)
+{
+ /* First make sure we're connected. */
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ irttp_flush_queues(self);
+
+ del_timer(&self->todo_timer);
+
+ /* This one won't be cleaned up if we are disconnect_pend + close_pend
+ * and we receive a disconnect_indication */
+ if (self->disconnect_skb)
+ dev_kfree_skb(self->disconnect_skb);
+
+ self->connected = FALSE;
+ self->magic = ~TTP_TSAP_MAGIC;
+
+ kfree(self);
+}
+
+/*
+ * Function irttp_close (self)
+ *
+ * Remove TSAP from list of all TSAPs and then deallocate all resources
+ * associated with this TSAP
+ *
+ * Note : because we *free* the tsap structure, it is the responsibility
+ * of the caller to make sure we are called only once and to deal with
+ * possible race conditions. - Jean II
+ */
+int irttp_close_tsap(struct tsap_cb *self)
+{
+ struct tsap_cb *tsap;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+
+ /* Make sure tsap has been disconnected */
+ if (self->connected) {
+ /* Check if disconnect is not pending */
+ if (!test_bit(0, &self->disconnect_pend)) {
+ net_warn_ratelimited("%s: TSAP still connected!\n",
+ __func__);
+ irttp_disconnect_request(self, NULL, P_NORMAL);
+ }
+ self->close_pend = TRUE;
+ irttp_start_todo_timer(self, HZ/10);
+
+ return 0; /* Will be back! */
+ }
+
+ tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
+
+ IRDA_ASSERT(tsap == self, return -1;);
+
+ /* Close corresponding LSAP */
+ if (self->lsap) {
+ irlmp_close_lsap(self->lsap);
+ self->lsap = NULL;
+ }
+
+ __irttp_close_tsap(self);
+
+ return 0;
+}
+EXPORT_SYMBOL(irttp_close_tsap);
+
+/*
+ * Function irttp_udata_request (self, skb)
+ *
+ * Send unreliable data on this TSAP
+ *
+ */
+int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ /* Take shortcut on zero byte packets */
+ if (skb->len == 0) {
+ ret = 0;
+ goto err;
+ }
+
+ /* Check that nothing bad happens */
+ if (!self->connected) {
+ net_warn_ratelimited("%s(), Not connected\n", __func__);
+ ret = -ENOTCONN;
+ goto err;
+ }
+
+ if (skb->len > self->max_seg_size) {
+ net_err_ratelimited("%s(), UData is too large for IrLAP!\n",
+ __func__);
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ irlmp_udata_request(self->lsap, skb);
+ self->stats.tx_packets++;
+
+ return 0;
+
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+EXPORT_SYMBOL(irttp_udata_request);
+
+
+/*
+ * Function irttp_data_request (handle, skb)
+ *
+ * Queue frame for transmission. If SAR is enabled, fragement the frame
+ * and queue the fragments for transmission
+ */
+int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
+{
+ __u8 *frame;
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ pr_debug("%s() : queue len = %d\n", __func__,
+ skb_queue_len(&self->tx_queue));
+
+ /* Take shortcut on zero byte packets */
+ if (skb->len == 0) {
+ ret = 0;
+ goto err;
+ }
+
+ /* Check that nothing bad happens */
+ if (!self->connected) {
+ net_warn_ratelimited("%s: Not connected\n", __func__);
+ ret = -ENOTCONN;
+ goto err;
+ }
+
+ /*
+ * Check if SAR is disabled, and the frame is larger than what fits
+ * inside an IrLAP frame
+ */
+ if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
+ net_err_ratelimited("%s: SAR disabled, and data is too large for IrLAP!\n",
+ __func__);
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ /*
+ * Check if SAR is enabled, and the frame is larger than the
+ * TxMaxSduSize
+ */
+ if ((self->tx_max_sdu_size != 0) &&
+ (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
+ (skb->len > self->tx_max_sdu_size)) {
+ net_err_ratelimited("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
+ __func__);
+ ret = -EMSGSIZE;
+ goto err;
+ }
+ /*
+ * Check if transmit queue is full
+ */
+ if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
+ /*
+ * Give it a chance to empty itself
+ */
+ irttp_run_tx_queue(self);
+
+ /* Drop packet. This error code should trigger the caller
+ * to resend the data in the client code - Jean II */
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ /* Queue frame, or queue frame segments */
+ if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
+ /* Queue frame */
+ IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
+ frame = skb_push(skb, TTP_HEADER);
+ frame[0] = 0x00; /* Clear more bit */
+
+ skb_queue_tail(&self->tx_queue, skb);
+ } else {
+ /*
+ * Fragment the frame, this function will also queue the
+ * fragments, we don't care about the fact the transmit
+ * queue may be overfilled by all the segments for a little
+ * while
+ */
+ irttp_fragment_skb(self, skb);
+ }
+
+ /* Check if we can accept more data from client */
+ if ((!self->tx_sdu_busy) &&
+ (skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
+ /* Tx queue filling up, so stop client. */
+ if (self->notify.flow_indication) {
+ self->notify.flow_indication(self->notify.instance,
+ self, FLOW_STOP);
+ }
+ /* self->tx_sdu_busy is the state of the client.
+ * Update state after notifying client to avoid
+ * race condition with irttp_flow_indication().
+ * If the queue empty itself after our test but before
+ * we set the flag, we will fix ourselves below in
+ * irttp_run_tx_queue().
+ * Jean II */
+ self->tx_sdu_busy = TRUE;
+ }
+
+ /* Try to make some progress */
+ irttp_run_tx_queue(self);
+
+ return 0;
+
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+EXPORT_SYMBOL(irttp_data_request);
+
+/*
+ * Function irttp_run_tx_queue (self)
+ *
+ * Transmit packets queued for transmission (if possible)
+ *
+ */
+static void irttp_run_tx_queue(struct tsap_cb *self)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ int n;
+
+ pr_debug("%s() : send_credit = %d, queue_len = %d\n",
+ __func__,
+ self->send_credit, skb_queue_len(&self->tx_queue));
+
+ /* Get exclusive access to the tx queue, otherwise don't touch it */
+ if (irda_lock(&self->tx_queue_lock) == FALSE)
+ return;
+
+ /* Try to send out frames as long as we have credits
+ * and as long as LAP is not full. If LAP is full, it will
+ * poll us through irttp_flow_indication() - Jean II */
+ while ((self->send_credit > 0) &&
+ (!irlmp_lap_tx_queue_full(self->lsap)) &&
+ (skb = skb_dequeue(&self->tx_queue))) {
+ /*
+ * Since we can transmit and receive frames concurrently,
+ * the code below is a critical region and we must assure that
+ * nobody messes with the credits while we update them.
+ */
+ spin_lock_irqsave(&self->lock, flags);
+
+ n = self->avail_credit;
+ self->avail_credit = 0;
+
+ /* Only room for 127 credits in frame */
+ if (n > 127) {
+ self->avail_credit = n-127;
+ n = 127;
+ }
+ self->remote_credit += n;
+ self->send_credit--;
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /*
+ * More bit must be set by the data_request() or fragment()
+ * functions
+ */
+ skb->data[0] |= (n & 0x7f);
+
+ /* Detach from socket.
+ * The current skb has a reference to the socket that sent
+ * it (skb->sk). When we pass it to IrLMP, the skb will be
+ * stored in in IrLAP (self->wx_list). When we are within
+ * IrLAP, we lose the notion of socket, so we should not
+ * have a reference to a socket. So, we drop it here.
+ *
+ * Why does it matter ?
+ * When the skb is freed (kfree_skb), if it is associated
+ * with a socket, it release buffer space on the socket
+ * (through sock_wfree() and sock_def_write_space()).
+ * If the socket no longer exist, we may crash. Hard.
+ * When we close a socket, we make sure that associated packets
+ * in IrTTP are freed. However, we have no way to cancel
+ * the packet that we have passed to IrLAP. So, if a packet
+ * remains in IrLAP (retry on the link or else) after we
+ * close the socket, we are dead !
+ * Jean II */
+ if (skb->sk != NULL) {
+ /* IrSOCK application, IrOBEX, ... */
+ skb_orphan(skb);
+ }
+ /* IrCOMM over IrTTP, IrLAN, ... */
+
+ /* Pass the skb to IrLMP - done */
+ irlmp_data_request(self->lsap, skb);
+ self->stats.tx_packets++;
+ }
+
+ /* Check if we can accept more frames from client.
+ * We don't want to wait until the todo timer to do that, and we
+ * can't use tasklets (grr...), so we are obliged to give control
+ * to client. That's ok, this test will be true not too often
+ * (max once per LAP window) and we are called from places
+ * where we can spend a bit of time doing stuff. - Jean II */
+ if ((self->tx_sdu_busy) &&
+ (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
+ (!self->close_pend)) {
+ if (self->notify.flow_indication)
+ self->notify.flow_indication(self->notify.instance,
+ self, FLOW_START);
+
+ /* self->tx_sdu_busy is the state of the client.
+ * We don't really have a race here, but it's always safer
+ * to update our state after the client - Jean II */
+ self->tx_sdu_busy = FALSE;
+ }
+
+ /* Reset lock */
+ self->tx_queue_lock = 0;
+}
+
+/*
+ * Function irttp_give_credit (self)
+ *
+ * Send a dataless flowdata TTP-PDU and give available credit to peer
+ * TSAP
+ */
+static inline void irttp_give_credit(struct tsap_cb *self)
+{
+ struct sk_buff *tx_skb = NULL;
+ unsigned long flags;
+ int n;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ pr_debug("%s() send=%d,avail=%d,remote=%d\n",
+ __func__,
+ self->send_credit, self->avail_credit, self->remote_credit);
+
+ /* Give credit to peer */
+ tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
+ if (!tx_skb)
+ return;
+
+ /* Reserve space for LMP, and LAP header */
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+
+ /*
+ * Since we can transmit and receive frames concurrently,
+ * the code below is a critical region and we must assure that
+ * nobody messes with the credits while we update them.
+ */
+ spin_lock_irqsave(&self->lock, flags);
+
+ n = self->avail_credit;
+ self->avail_credit = 0;
+
+ /* Only space for 127 credits in frame */
+ if (n > 127) {
+ self->avail_credit = n - 127;
+ n = 127;
+ }
+ self->remote_credit += n;
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ skb_put(tx_skb, 1);
+ tx_skb->data[0] = (__u8) (n & 0x7f);
+
+ irlmp_data_request(self->lsap, tx_skb);
+ self->stats.tx_packets++;
+}
+
+/*
+ * Function irttp_udata_indication (instance, sap, skb)
+ *
+ * Received some unit-data (unreliable)
+ *
+ */
+static int irttp_udata_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct tsap_cb *self;
+ int err;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ IRDA_ASSERT(skb != NULL, return -1;);
+
+ self->stats.rx_packets++;
+
+ /* Just pass data to layer above */
+ if (self->notify.udata_indication) {
+ err = self->notify.udata_indication(self->notify.instance,
+ self, skb);
+ /* Same comment as in irttp_do_data_indication() */
+ if (!err)
+ return 0;
+ }
+ /* Either no handler, or handler returns an error */
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irttp_data_indication (instance, sap, skb)
+ *
+ * Receive segment from IrLMP.
+ *
+ */
+static int irttp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct tsap_cb *self;
+ unsigned long flags;
+ int n;
+
+ self = instance;
+
+ n = skb->data[0] & 0x7f; /* Extract the credits */
+
+ self->stats.rx_packets++;
+
+ /* Deal with inbound credit
+ * Since we can transmit and receive frames concurrently,
+ * the code below is a critical region and we must assure that
+ * nobody messes with the credits while we update them.
+ */
+ spin_lock_irqsave(&self->lock, flags);
+ self->send_credit += n;
+ if (skb->len > 1)
+ self->remote_credit--;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /*
+ * Data or dataless packet? Dataless frames contains only the
+ * TTP_HEADER.
+ */
+ if (skb->len > 1) {
+ /*
+ * We don't remove the TTP header, since we must preserve the
+ * more bit, so the defragment routing knows what to do
+ */
+ skb_queue_tail(&self->rx_queue, skb);
+ } else {
+ /* Dataless flowdata TTP-PDU */
+ dev_kfree_skb(skb);
+ }
+
+
+ /* Push data to the higher layer.
+ * We do it synchronously because running the todo timer for each
+ * receive packet would be too much overhead and latency.
+ * By passing control to the higher layer, we run the risk that
+ * it may take time or grab a lock. Most often, the higher layer
+ * will only put packet in a queue.
+ * Anyway, packets are only dripping through the IrDA, so we can
+ * have time before the next packet.
+ * Further, we are run from NET_BH, so the worse that can happen is
+ * us missing the optimal time to send back the PF bit in LAP.
+ * Jean II */
+ irttp_run_rx_queue(self);
+
+ /* We now give credits to peer in irttp_run_rx_queue().
+ * We need to send credit *NOW*, otherwise we are going
+ * to miss the next Tx window. The todo timer may take
+ * a while before it's run... - Jean II */
+
+ /*
+ * If the peer device has given us some credits and we didn't have
+ * anyone from before, then we need to shedule the tx queue.
+ * We need to do that because our Tx have stopped (so we may not
+ * get any LAP flow indication) and the user may be stopped as
+ * well. - Jean II
+ */
+ if (self->send_credit == n) {
+ /* Restart pushing stuff to LAP */
+ irttp_run_tx_queue(self);
+ /* Note : we don't want to schedule the todo timer
+ * because it has horrible latency. No tasklets
+ * because the tasklet API is broken. - Jean II */
+ }
+
+ return 0;
+}
+
+/*
+ * Function irttp_status_indication (self, reason)
+ *
+ * Status_indication, just pass to the higher layer...
+ *
+ */
+static void irttp_status_indication(void *instance,
+ LINK_STATUS link, LOCK_STATUS lock)
+{
+ struct tsap_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ /* Check if client has already closed the TSAP and gone away */
+ if (self->close_pend)
+ return;
+
+ /*
+ * Inform service user if he has requested it
+ */
+ if (self->notify.status_indication != NULL)
+ self->notify.status_indication(self->notify.instance,
+ link, lock);
+ else
+ pr_debug("%s(), no handler\n", __func__);
+}
+
+/*
+ * Function irttp_flow_indication (self, reason)
+ *
+ * Flow_indication : IrLAP tells us to send more data.
+ *
+ */
+static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
+{
+ struct tsap_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ pr_debug("%s(instance=%p)\n", __func__, self);
+
+ /* We are "polled" directly from LAP, and the LAP want to fill
+ * its Tx window. We want to do our best to send it data, so that
+ * we maximise the window. On the other hand, we want to limit the
+ * amount of work here so that LAP doesn't hang forever waiting
+ * for packets. - Jean II */
+
+ /* Try to send some packets. Currently, LAP calls us every time
+ * there is one free slot, so we will send only one packet.
+ * This allow the scheduler to do its round robin - Jean II */
+ irttp_run_tx_queue(self);
+
+ /* Note regarding the interraction with higher layer.
+ * irttp_run_tx_queue() may call the client when its queue
+ * start to empty, via notify.flow_indication(). Initially.
+ * I wanted this to happen in a tasklet, to avoid client
+ * grabbing the CPU, but we can't use tasklets safely. And timer
+ * is definitely too slow.
+ * This will happen only once per LAP window, and usually at
+ * the third packet (unless window is smaller). LAP is still
+ * doing mtt and sending first packet so it's sort of OK
+ * to do that. Jean II */
+
+ /* If we need to send disconnect. try to do it now */
+ if (self->disconnect_pend)
+ irttp_start_todo_timer(self, 0);
+}
+
+/*
+ * Function irttp_flow_request (self, command)
+ *
+ * This function could be used by the upper layers to tell IrTTP to stop
+ * delivering frames if the receive queues are starting to get full, or
+ * to tell IrTTP to start delivering frames again.
+ */
+void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
+{
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ switch (flow) {
+ case FLOW_STOP:
+ pr_debug("%s(), flow stop\n", __func__);
+ self->rx_sdu_busy = TRUE;
+ break;
+ case FLOW_START:
+ pr_debug("%s(), flow start\n", __func__);
+ self->rx_sdu_busy = FALSE;
+
+ /* Client say he can accept more data, try to free our
+ * queues ASAP - Jean II */
+ irttp_run_rx_queue(self);
+
+ break;
+ default:
+ pr_debug("%s(), Unknown flow command!\n", __func__);
+ }
+}
+EXPORT_SYMBOL(irttp_flow_request);
+
+/*
+ * Function irttp_connect_request (self, dtsap_sel, daddr, qos)
+ *
+ * Try to connect to remote destination TSAP selector
+ *
+ */
+int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *userdata)
+{
+ struct sk_buff *tx_skb;
+ __u8 *frame;
+ __u8 n;
+
+ pr_debug("%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
+
+ IRDA_ASSERT(self != NULL, return -EBADR;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
+
+ if (self->connected) {
+ if (userdata)
+ dev_kfree_skb(userdata);
+ return -EISCONN;
+ }
+
+ /* Any userdata supplied? */
+ if (userdata == NULL) {
+ tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
+ GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ /* Reserve space for MUX_CONTROL and LAP header */
+ skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
+ } else {
+ tx_skb = userdata;
+ /*
+ * Check that the client has reserved enough space for
+ * headers
+ */
+ IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
+ { dev_kfree_skb(userdata); return -1; });
+ }
+
+ /* Initialize connection parameters */
+ self->connected = FALSE;
+ self->avail_credit = 0;
+ self->rx_max_sdu_size = max_sdu_size;
+ self->rx_sdu_size = 0;
+ self->rx_sdu_busy = FALSE;
+ self->dtsap_sel = dtsap_sel;
+
+ n = self->initial_credit;
+
+ self->remote_credit = 0;
+ self->send_credit = 0;
+
+ /*
+ * Give away max 127 credits for now
+ */
+ if (n > 127) {
+ self->avail_credit = n - 127;
+ n = 127;
+ }
+
+ self->remote_credit = n;
+
+ /* SAR enabled? */
+ if (max_sdu_size > 0) {
+ IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
+ { dev_kfree_skb(tx_skb); return -1; });
+
+ /* Insert SAR parameters */
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
+
+ frame[0] = TTP_PARAMETERS | n;
+ frame[1] = 0x04; /* Length */
+ frame[2] = 0x01; /* MaxSduSize */
+ frame[3] = 0x02; /* Value length */
+
+ put_unaligned(cpu_to_be16((__u16) max_sdu_size),
+ (__be16 *)(frame+4));
+ } else {
+ /* Insert plain TTP header */
+ frame = skb_push(tx_skb, TTP_HEADER);
+
+ /* Insert initial credit in frame */
+ frame[0] = n & 0x7f;
+ }
+
+ /* Connect with IrLMP. No QoS parameters for now */
+ return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
+ tx_skb);
+}
+EXPORT_SYMBOL(irttp_connect_request);
+
+/*
+ * Function irttp_connect_confirm (handle, qos, skb)
+ *
+ * Service user confirms TSAP connection with peer.
+ *
+ */
+static void irttp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_seg_size,
+ __u8 max_header_size, struct sk_buff *skb)
+{
+ struct tsap_cb *self;
+ int parameters;
+ int ret;
+ __u8 plen;
+ __u8 n;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ self->max_seg_size = max_seg_size - TTP_HEADER;
+ self->max_header_size = max_header_size + TTP_HEADER;
+
+ /*
+ * Check if we have got some QoS parameters back! This should be the
+ * negotiated QoS for the link.
+ */
+ if (qos) {
+ pr_debug("IrTTP, Negotiated BAUD_RATE: %02x\n",
+ qos->baud_rate.bits);
+ pr_debug("IrTTP, Negotiated BAUD_RATE: %d bps.\n",
+ qos->baud_rate.value);
+ }
+
+ n = skb->data[0] & 0x7f;
+
+ pr_debug("%s(), Initial send_credit=%d\n", __func__, n);
+
+ self->send_credit = n;
+ self->tx_max_sdu_size = 0;
+ self->connected = TRUE;
+
+ parameters = skb->data[0] & 0x80;
+
+ IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
+ skb_pull(skb, TTP_HEADER);
+
+ if (parameters) {
+ plen = skb->data[0];
+
+ ret = irda_param_extract_all(self, skb->data+1,
+ IRDA_MIN(skb->len-1, plen),
+ &param_info);
+
+ /* Any errors in the parameter list? */
+ if (ret < 0) {
+ net_warn_ratelimited("%s: error extracting parameters\n",
+ __func__);
+ dev_kfree_skb(skb);
+
+ /* Do not accept this connection attempt */
+ return;
+ }
+ /* Remove parameters */
+ skb_pull(skb, IRDA_MIN(skb->len, plen+1));
+ }
+
+ pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__,
+ self->send_credit, self->avail_credit, self->remote_credit);
+
+ pr_debug("%s(), MaxSduSize=%d\n", __func__,
+ self->tx_max_sdu_size);
+
+ if (self->notify.connect_confirm) {
+ self->notify.connect_confirm(self->notify.instance, self, qos,
+ self->tx_max_sdu_size,
+ self->max_header_size, skb);
+ } else
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function irttp_connect_indication (handle, skb)
+ *
+ * Some other device is connecting to this TSAP
+ *
+ */
+static void irttp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size,
+ struct sk_buff *skb)
+{
+ struct tsap_cb *self;
+ struct lsap_cb *lsap;
+ int parameters;
+ int ret;
+ __u8 plen;
+ __u8 n;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+ IRDA_ASSERT(skb != NULL, return;);
+
+ lsap = sap;
+
+ self->max_seg_size = max_seg_size - TTP_HEADER;
+ self->max_header_size = max_header_size+TTP_HEADER;
+
+ pr_debug("%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
+
+ /* Need to update dtsap_sel if its equal to LSAP_ANY */
+ self->dtsap_sel = lsap->dlsap_sel;
+
+ n = skb->data[0] & 0x7f;
+
+ self->send_credit = n;
+ self->tx_max_sdu_size = 0;
+
+ parameters = skb->data[0] & 0x80;
+
+ IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
+ skb_pull(skb, TTP_HEADER);
+
+ if (parameters) {
+ plen = skb->data[0];
+
+ ret = irda_param_extract_all(self, skb->data+1,
+ IRDA_MIN(skb->len-1, plen),
+ &param_info);
+
+ /* Any errors in the parameter list? */
+ if (ret < 0) {
+ net_warn_ratelimited("%s: error extracting parameters\n",
+ __func__);
+ dev_kfree_skb(skb);
+
+ /* Do not accept this connection attempt */
+ return;
+ }
+
+ /* Remove parameters */
+ skb_pull(skb, IRDA_MIN(skb->len, plen+1));
+ }
+
+ if (self->notify.connect_indication) {
+ self->notify.connect_indication(self->notify.instance, self,
+ qos, self->tx_max_sdu_size,
+ self->max_header_size, skb);
+ } else
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function irttp_connect_response (handle, userdata)
+ *
+ * Service user is accepting the connection, just pass it down to
+ * IrLMP!
+ *
+ */
+int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
+ struct sk_buff *userdata)
+{
+ struct sk_buff *tx_skb;
+ __u8 *frame;
+ int ret;
+ __u8 n;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+
+ pr_debug("%s(), Source TSAP selector=%02x\n", __func__,
+ self->stsap_sel);
+
+ /* Any userdata supplied? */
+ if (userdata == NULL) {
+ tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
+ GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ /* Reserve space for MUX_CONTROL and LAP header */
+ skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
+ } else {
+ tx_skb = userdata;
+ /*
+ * Check that the client has reserved enough space for
+ * headers
+ */
+ IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
+ { dev_kfree_skb(userdata); return -1; });
+ }
+
+ self->avail_credit = 0;
+ self->remote_credit = 0;
+ self->rx_max_sdu_size = max_sdu_size;
+ self->rx_sdu_size = 0;
+ self->rx_sdu_busy = FALSE;
+
+ n = self->initial_credit;
+
+ /* Frame has only space for max 127 credits (7 bits) */
+ if (n > 127) {
+ self->avail_credit = n - 127;
+ n = 127;
+ }
+
+ self->remote_credit = n;
+ self->connected = TRUE;
+
+ /* SAR enabled? */
+ if (max_sdu_size > 0) {
+ IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
+ { dev_kfree_skb(tx_skb); return -1; });
+
+ /* Insert TTP header with SAR parameters */
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
+
+ frame[0] = TTP_PARAMETERS | n;
+ frame[1] = 0x04; /* Length */
+
+ /* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1, */
+/* TTP_SAR_HEADER, &param_info) */
+
+ frame[2] = 0x01; /* MaxSduSize */
+ frame[3] = 0x02; /* Value length */
+
+ put_unaligned(cpu_to_be16((__u16) max_sdu_size),
+ (__be16 *)(frame+4));
+ } else {
+ /* Insert TTP header */
+ frame = skb_push(tx_skb, TTP_HEADER);
+
+ frame[0] = n & 0x7f;
+ }
+
+ ret = irlmp_connect_response(self->lsap, tx_skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(irttp_connect_response);
+
+/*
+ * Function irttp_dup (self, instance)
+ *
+ * Duplicate TSAP, can be used by servers to confirm a connection on a
+ * new TSAP so it can keep listening on the old one.
+ */
+struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
+{
+ struct tsap_cb *new;
+ unsigned long flags;
+
+ /* Protect our access to the old tsap instance */
+ spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
+
+ /* Find the old instance */
+ if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
+ pr_debug("%s(), unable to find TSAP\n", __func__);
+ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
+ return NULL;
+ }
+
+ /* Allocate a new instance */
+ new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
+ if (!new) {
+ pr_debug("%s(), unable to kmalloc\n", __func__);
+ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
+ return NULL;
+ }
+ spin_lock_init(&new->lock);
+
+ /* We don't need the old instance any more */
+ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
+
+ /* Try to dup the LSAP (may fail if we were too slow) */
+ new->lsap = irlmp_dup(orig->lsap, new);
+ if (!new->lsap) {
+ pr_debug("%s(), dup failed!\n", __func__);
+ kfree(new);
+ return NULL;
+ }
+
+ /* Not everything should be copied */
+ new->notify.instance = instance;
+
+ /* Initialize internal objects */
+ irttp_init_tsap(new);
+
+ /* This is locked */
+ hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
+
+ return new;
+}
+EXPORT_SYMBOL(irttp_dup);
+
+/*
+ * Function irttp_disconnect_request (self)
+ *
+ * Close this connection please! If priority is high, the queued data
+ * segments, if any, will be deallocated first
+ *
+ */
+int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
+ int priority)
+{
+ int ret;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+
+ /* Already disconnected? */
+ if (!self->connected) {
+ pr_debug("%s(), already disconnected!\n", __func__);
+ if (userdata)
+ dev_kfree_skb(userdata);
+ return -1;
+ }
+
+ /* Disconnect already pending ?
+ * We need to use an atomic operation to prevent reentry. This
+ * function may be called from various context, like user, timer
+ * for following a disconnect_indication() (i.e. net_bh).
+ * Jean II */
+ if (test_and_set_bit(0, &self->disconnect_pend)) {
+ pr_debug("%s(), disconnect already pending\n",
+ __func__);
+ if (userdata)
+ dev_kfree_skb(userdata);
+
+ /* Try to make some progress */
+ irttp_run_tx_queue(self);
+ return -1;
+ }
+
+ /*
+ * Check if there is still data segments in the transmit queue
+ */
+ if (!skb_queue_empty(&self->tx_queue)) {
+ if (priority == P_HIGH) {
+ /*
+ * No need to send the queued data, if we are
+ * disconnecting right now since the data will
+ * not have any usable connection to be sent on
+ */
+ pr_debug("%s(): High priority!!()\n", __func__);
+ irttp_flush_queues(self);
+ } else if (priority == P_NORMAL) {
+ /*
+ * Must delay disconnect until after all data segments
+ * have been sent and the tx_queue is empty
+ */
+ /* We'll reuse this one later for the disconnect */
+ self->disconnect_skb = userdata; /* May be NULL */
+
+ irttp_run_tx_queue(self);
+
+ irttp_start_todo_timer(self, HZ/10);
+ return -1;
+ }
+ }
+ /* Note : we don't need to check if self->rx_queue is full and the
+ * state of self->rx_sdu_busy because the disconnect response will
+ * be sent at the LMP level (so even if the peer has its Tx queue
+ * full of data). - Jean II */
+
+ pr_debug("%s(), Disconnecting ...\n", __func__);
+ self->connected = FALSE;
+
+ if (!userdata) {
+ struct sk_buff *tx_skb;
+ tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ /*
+ * Reserve space for MUX and LAP header
+ */
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
+
+ userdata = tx_skb;
+ }
+ ret = irlmp_disconnect_request(self->lsap, userdata);
+
+ /* The disconnect is no longer pending */
+ clear_bit(0, &self->disconnect_pend); /* FALSE */
+
+ return ret;
+}
+EXPORT_SYMBOL(irttp_disconnect_request);
+
+/*
+ * Function irttp_disconnect_indication (self, reason)
+ *
+ * Disconnect indication, TSAP disconnected by peer?
+ *
+ */
+static void irttp_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *skb)
+{
+ struct tsap_cb *self;
+
+ self = instance;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ /* Prevent higher layer to send more data */
+ self->connected = FALSE;
+
+ /* Check if client has already tried to close the TSAP */
+ if (self->close_pend) {
+ /* In this case, the higher layer is probably gone. Don't
+ * bother it and clean up the remains - Jean II */
+ if (skb)
+ dev_kfree_skb(skb);
+ irttp_close_tsap(self);
+ return;
+ }
+
+ /* If we are here, we assume that is the higher layer is still
+ * waiting for the disconnect notification and able to process it,
+ * even if he tried to disconnect. Otherwise, it would have already
+ * attempted to close the tsap and self->close_pend would be TRUE.
+ * Jean II */
+
+ /* No need to notify the client if has already tried to disconnect */
+ if (self->notify.disconnect_indication)
+ self->notify.disconnect_indication(self->notify.instance, self,
+ reason, skb);
+ else
+ if (skb)
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Function irttp_do_data_indication (self, skb)
+ *
+ * Try to deliver reassembled skb to layer above, and requeue it if that
+ * for some reason should fail. We mark rx sdu as busy to apply back
+ * pressure is necessary.
+ */
+static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
+{
+ int err;
+
+ /* Check if client has already closed the TSAP and gone away */
+ if (self->close_pend) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ err = self->notify.data_indication(self->notify.instance, self, skb);
+
+ /* Usually the layer above will notify that it's input queue is
+ * starting to get filled by using the flow request, but this may
+ * be difficult, so it can instead just refuse to eat it and just
+ * give an error back
+ */
+ if (err) {
+ pr_debug("%s() requeueing skb!\n", __func__);
+
+ /* Make sure we take a break */
+ self->rx_sdu_busy = TRUE;
+
+ /* Need to push the header in again */
+ skb_push(skb, TTP_HEADER);
+ skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
+
+ /* Put skb back on queue */
+ skb_queue_head(&self->rx_queue, skb);
+ }
+}
+
+/*
+ * Function irttp_run_rx_queue (self)
+ *
+ * Check if we have any frames to be transmitted, or if we have any
+ * available credit to give away.
+ */
+static void irttp_run_rx_queue(struct tsap_cb *self)
+{
+ struct sk_buff *skb;
+ int more = 0;
+
+ pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__,
+ self->send_credit, self->avail_credit, self->remote_credit);
+
+ /* Get exclusive access to the rx queue, otherwise don't touch it */
+ if (irda_lock(&self->rx_queue_lock) == FALSE)
+ return;
+
+ /*
+ * Reassemble all frames in receive queue and deliver them
+ */
+ while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
+ /* This bit will tell us if it's the last fragment or not */
+ more = skb->data[0] & 0x80;
+
+ /* Remove TTP header */
+ skb_pull(skb, TTP_HEADER);
+
+ /* Add the length of the remaining data */
+ self->rx_sdu_size += skb->len;
+
+ /*
+ * If SAR is disabled, or user has requested no reassembly
+ * of received fragments then we just deliver them
+ * immediately. This can be requested by clients that
+ * implements byte streams without any message boundaries
+ */
+ if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
+ irttp_do_data_indication(self, skb);
+ self->rx_sdu_size = 0;
+
+ continue;
+ }
+
+ /* Check if this is a fragment, and not the last fragment */
+ if (more) {
+ /*
+ * Queue the fragment if we still are within the
+ * limits of the maximum size of the rx_sdu
+ */
+ if (self->rx_sdu_size <= self->rx_max_sdu_size) {
+ pr_debug("%s(), queueing frag\n",
+ __func__);
+ skb_queue_tail(&self->rx_fragments, skb);
+ } else {
+ /* Free the part of the SDU that is too big */
+ dev_kfree_skb(skb);
+ }
+ continue;
+ }
+ /*
+ * This is the last fragment, so time to reassemble!
+ */
+ if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
+ (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) {
+ /*
+ * A little optimizing. Only queue the fragment if
+ * there are other fragments. Since if this is the
+ * last and only fragment, there is no need to
+ * reassemble :-)
+ */
+ if (!skb_queue_empty(&self->rx_fragments)) {
+ skb_queue_tail(&self->rx_fragments,
+ skb);
+
+ skb = irttp_reassemble_skb(self);
+ }
+
+ /* Now we can deliver the reassembled skb */
+ irttp_do_data_indication(self, skb);
+ } else {
+ pr_debug("%s(), Truncated frame\n", __func__);
+
+ /* Free the part of the SDU that is too big */
+ dev_kfree_skb(skb);
+
+ /* Deliver only the valid but truncated part of SDU */
+ skb = irttp_reassemble_skb(self);
+
+ irttp_do_data_indication(self, skb);
+ }
+ self->rx_sdu_size = 0;
+ }
+
+ /*
+ * It's not trivial to keep track of how many credits are available
+ * by incrementing at each packet, because delivery may fail
+ * (irttp_do_data_indication() may requeue the frame) and because
+ * we need to take care of fragmentation.
+ * We want the other side to send up to initial_credit packets.
+ * We have some frames in our queues, and we have already allowed it
+ * to send remote_credit.
+ * No need to spinlock, write is atomic and self correcting...
+ * Jean II
+ */
+ self->avail_credit = (self->initial_credit -
+ (self->remote_credit +
+ skb_queue_len(&self->rx_queue) +
+ skb_queue_len(&self->rx_fragments)));
+
+ /* Do we have too much credits to send to peer ? */
+ if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
+ (self->avail_credit > 0)) {
+ /* Send explicit credit frame */
+ irttp_give_credit(self);
+ /* Note : do *NOT* check if tx_queue is non-empty, that
+ * will produce deadlocks. I repeat : send a credit frame
+ * even if we have something to send in our Tx queue.
+ * If we have credits, it means that our Tx queue is blocked.
+ *
+ * Let's suppose the peer can't keep up with our Tx. He will
+ * flow control us by not sending us any credits, and we
+ * will stop Tx and start accumulating credits here.
+ * Up to the point where the peer will stop its Tx queue,
+ * for lack of credits.
+ * Let's assume the peer application is single threaded.
+ * It will block on Tx and never consume any Rx buffer.
+ * Deadlock. Guaranteed. - Jean II
+ */
+ }
+
+ /* Reset lock */
+ self->rx_queue_lock = 0;
+}
+
+#ifdef CONFIG_PROC_FS
+struct irttp_iter_state {
+ int id;
+};
+
+static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct irttp_iter_state *iter = seq->private;
+ struct tsap_cb *self;
+
+ /* Protect our access to the tsap list */
+ spin_lock_irq(&irttp->tsaps->hb_spinlock);
+ iter->id = 0;
+
+ for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
+ self != NULL;
+ self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
+ if (iter->id == *pos)
+ break;
+ ++iter->id;
+ }
+
+ return self;
+}
+
+static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct irttp_iter_state *iter = seq->private;
+
+ ++*pos;
+ ++iter->id;
+ return (void *) hashbin_get_next(irttp->tsaps);
+}
+
+static void irttp_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_irq(&irttp->tsaps->hb_spinlock);
+}
+
+static int irttp_seq_show(struct seq_file *seq, void *v)
+{
+ const struct irttp_iter_state *iter = seq->private;
+ const struct tsap_cb *self = v;
+
+ seq_printf(seq, "TSAP %d, ", iter->id);
+ seq_printf(seq, "stsap_sel: %02x, ",
+ self->stsap_sel);
+ seq_printf(seq, "dtsap_sel: %02x\n",
+ self->dtsap_sel);
+ seq_printf(seq, " connected: %s, ",
+ self->connected ? "TRUE" : "FALSE");
+ seq_printf(seq, "avail credit: %d, ",
+ self->avail_credit);
+ seq_printf(seq, "remote credit: %d, ",
+ self->remote_credit);
+ seq_printf(seq, "send credit: %d\n",
+ self->send_credit);
+ seq_printf(seq, " tx packets: %lu, ",
+ self->stats.tx_packets);
+ seq_printf(seq, "rx packets: %lu, ",
+ self->stats.rx_packets);
+ seq_printf(seq, "tx_queue len: %u ",
+ skb_queue_len(&self->tx_queue));
+ seq_printf(seq, "rx_queue len: %u\n",
+ skb_queue_len(&self->rx_queue));
+ seq_printf(seq, " tx_sdu_busy: %s, ",
+ self->tx_sdu_busy ? "TRUE" : "FALSE");
+ seq_printf(seq, "rx_sdu_busy: %s\n",
+ self->rx_sdu_busy ? "TRUE" : "FALSE");
+ seq_printf(seq, " max_seg_size: %u, ",
+ self->max_seg_size);
+ seq_printf(seq, "tx_max_sdu_size: %u, ",
+ self->tx_max_sdu_size);
+ seq_printf(seq, "rx_max_sdu_size: %u\n",
+ self->rx_max_sdu_size);
+
+ seq_printf(seq, " Used by (%s)\n\n",
+ self->notify.name);
+ return 0;
+}
+
+static const struct seq_operations irttp_seq_ops = {
+ .start = irttp_seq_start,
+ .next = irttp_seq_next,
+ .stop = irttp_seq_stop,
+ .show = irttp_seq_show,
+};
+
+static int irttp_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_private(file, &irttp_seq_ops,
+ sizeof(struct irttp_iter_state));
+}
+
+const struct file_operations irttp_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = irttp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+#endif /* PROC_FS */
diff --git a/drivers/staging/irda/net/parameters.c b/drivers/staging/irda/net/parameters.c
new file mode 100644
index 000000000000..16ce32ffe004
--- /dev/null
+++ b/drivers/staging/irda/net/parameters.c
@@ -0,0 +1,584 @@
+/*********************************************************************
+ *
+ * Filename: parameters.c
+ * Version: 1.0
+ * Description: A more general way to handle (pi,pl,pv) parameters
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Jun 7 10:25:11 1999
+ * Modified at: Sun Jan 30 14:08:39 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/parameters.h>
+
+static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+static int irda_extract_no_value(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+
+static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func);
+
+static int irda_param_unpack(__u8 *buf, char *fmt, ...);
+
+/* Parameter value call table. Must match PV_TYPE */
+static const PV_HANDLER pv_extract_table[] = {
+ irda_extract_integer, /* Handler for any length integers */
+ irda_extract_integer, /* Handler for 8 bits integers */
+ irda_extract_integer, /* Handler for 16 bits integers */
+ irda_extract_string, /* Handler for strings */
+ irda_extract_integer, /* Handler for 32 bits integers */
+ irda_extract_octseq, /* Handler for octet sequences */
+ irda_extract_no_value /* Handler for no value parameters */
+};
+
+static const PV_HANDLER pv_insert_table[] = {
+ irda_insert_integer, /* Handler for any length integers */
+ irda_insert_integer, /* Handler for 8 bits integers */
+ irda_insert_integer, /* Handler for 16 bits integers */
+ NULL, /* Handler for strings */
+ irda_insert_integer, /* Handler for 32 bits integers */
+ NULL, /* Handler for octet sequences */
+ irda_insert_no_value /* Handler for no value parameters */
+};
+
+/*
+ * Function irda_insert_no_value (self, buf, len, pi, type, func)
+ */
+static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func)
+{
+ irda_param_t p;
+ int ret;
+
+ p.pi = pi;
+ p.pl = 0;
+
+ /* Call handler for this parameter */
+ ret = (*func)(self, &p, PV_GET);
+
+ /* Extract values anyway, since handler may need them */
+ irda_param_pack(buf, "bb", p.pi, p.pl);
+
+ if (ret < 0)
+ return ret;
+
+ return 2; /* Inserted pl+2 bytes */
+}
+
+/*
+ * Function irda_extract_no_value (self, buf, len, type, func)
+ *
+ * Extracts a parameter without a pv field (pl=0)
+ *
+ */
+static int irda_extract_no_value(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func)
+{
+ irda_param_t p;
+ int ret;
+
+ /* Extract values anyway, since handler may need them */
+ irda_param_unpack(buf, "bb", &p.pi, &p.pl);
+
+ /* Call handler for this parameter */
+ ret = (*func)(self, &p, PV_PUT);
+
+ if (ret < 0)
+ return ret;
+
+ return 2; /* Extracted pl+2 bytes */
+}
+
+/*
+ * Function irda_insert_integer (self, buf, len, pi, type, func)
+ */
+static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func)
+{
+ irda_param_t p;
+ int n = 0;
+ int err;
+
+ p.pi = pi; /* In case handler needs to know */
+ p.pl = type & PV_MASK; /* The integer type codes the length as well */
+ p.pv.i = 0; /* Clear value */
+
+ /* Call handler for this parameter */
+ err = (*func)(self, &p, PV_GET);
+ if (err < 0)
+ return err;
+
+ /*
+ * If parameter length is still 0, then (1) this is an any length
+ * integer, and (2) the handler function does not care which length
+ * we choose to use, so we pick the one the gives the fewest bytes.
+ */
+ if (p.pl == 0) {
+ if (p.pv.i < 0xff) {
+ pr_debug("%s(), using 1 byte\n", __func__);
+ p.pl = 1;
+ } else if (p.pv.i < 0xffff) {
+ pr_debug("%s(), using 2 bytes\n", __func__);
+ p.pl = 2;
+ } else {
+ pr_debug("%s(), using 4 bytes\n", __func__);
+ p.pl = 4; /* Default length */
+ }
+ }
+ /* Check if buffer is long enough for insertion */
+ if (len < (2+p.pl)) {
+ net_warn_ratelimited("%s: buffer too short for insertion!\n",
+ __func__);
+ return -1;
+ }
+ pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
+ p.pi, p.pl, p.pv.i);
+ switch (p.pl) {
+ case 1:
+ n += irda_param_pack(buf, "bbb", p.pi, p.pl, (__u8) p.pv.i);
+ break;
+ case 2:
+ if (type & PV_BIG_ENDIAN)
+ p.pv.i = cpu_to_be16((__u16) p.pv.i);
+ else
+ p.pv.i = cpu_to_le16((__u16) p.pv.i);
+ n += irda_param_pack(buf, "bbs", p.pi, p.pl, (__u16) p.pv.i);
+ break;
+ case 4:
+ if (type & PV_BIG_ENDIAN)
+ cpu_to_be32s(&p.pv.i);
+ else
+ cpu_to_le32s(&p.pv.i);
+ n += irda_param_pack(buf, "bbi", p.pi, p.pl, p.pv.i);
+
+ break;
+ default:
+ net_warn_ratelimited("%s: length %d not supported\n",
+ __func__, p.pl);
+ /* Skip parameter */
+ return -1;
+ }
+
+ return p.pl+2; /* Inserted pl+2 bytes */
+}
+
+/*
+ * Function irda_extract integer (self, buf, len, pi, type, func)
+ *
+ * Extract a possibly variable length integer from buffer, and call
+ * handler for processing of the parameter
+ */
+static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func)
+{
+ irda_param_t p;
+ int n = 0;
+ int extract_len; /* Real length we extract */
+ int err;
+
+ p.pi = pi; /* In case handler needs to know */
+ p.pl = buf[1]; /* Extract length of value */
+ p.pv.i = 0; /* Clear value */
+ extract_len = p.pl; /* Default : extract all */
+
+ /* Check if buffer is long enough for parsing */
+ if (len < (2+p.pl)) {
+ net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n",
+ __func__, p.pl, len);
+ return -1;
+ }
+
+ /*
+ * Check that the integer length is what we expect it to be. If the
+ * handler want a 16 bits integer then a 32 bits is not good enough
+ * PV_INTEGER means that the handler is flexible.
+ */
+ if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) {
+ net_err_ratelimited("%s: invalid parameter length! Expected %d bytes, but value had %d bytes!\n",
+ __func__, type & PV_MASK, p.pl);
+
+ /* Most parameters are bit/byte fields or little endian,
+ * so it's ok to only extract a subset of it (the subset
+ * that the handler expect). This is necessary, as some
+ * broken implementations seems to add extra undefined bits.
+ * If the parameter is shorter than we expect or is big
+ * endian, we can't play those tricks. Jean II */
+ if((p.pl < (type & PV_MASK)) || (type & PV_BIG_ENDIAN)) {
+ /* Skip parameter */
+ return p.pl+2;
+ } else {
+ /* Extract subset of it, fallthrough */
+ extract_len = type & PV_MASK;
+ }
+ }
+
+
+ switch (extract_len) {
+ case 1:
+ n += irda_param_unpack(buf+2, "b", &p.pv.i);
+ break;
+ case 2:
+ n += irda_param_unpack(buf+2, "s", &p.pv.i);
+ if (type & PV_BIG_ENDIAN)
+ p.pv.i = be16_to_cpu((__u16) p.pv.i);
+ else
+ p.pv.i = le16_to_cpu((__u16) p.pv.i);
+ break;
+ case 4:
+ n += irda_param_unpack(buf+2, "i", &p.pv.i);
+ if (type & PV_BIG_ENDIAN)
+ be32_to_cpus(&p.pv.i);
+ else
+ le32_to_cpus(&p.pv.i);
+ break;
+ default:
+ net_warn_ratelimited("%s: length %d not supported\n",
+ __func__, p.pl);
+
+ /* Skip parameter */
+ return p.pl+2;
+ }
+
+ pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
+ p.pi, p.pl, p.pv.i);
+ /* Call handler for this parameter */
+ err = (*func)(self, &p, PV_PUT);
+ if (err < 0)
+ return err;
+
+ return p.pl+2; /* Extracted pl+2 bytes */
+}
+
+/*
+ * Function irda_extract_string (self, buf, len, type, func)
+ */
+static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func)
+{
+ char str[33];
+ irda_param_t p;
+ int err;
+
+ p.pi = pi; /* In case handler needs to know */
+ p.pl = buf[1]; /* Extract length of value */
+ if (p.pl > 32)
+ p.pl = 32;
+
+ pr_debug("%s(), pi=%#x, pl=%d\n", __func__,
+ p.pi, p.pl);
+
+ /* Check if buffer is long enough for parsing */
+ if (len < (2+p.pl)) {
+ net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n",
+ __func__, p.pl, len);
+ return -1;
+ }
+
+ /* Should be safe to copy string like this since we have already
+ * checked that the buffer is long enough */
+ strncpy(str, buf+2, p.pl);
+
+ pr_debug("%s(), str=0x%02x 0x%02x\n",
+ __func__, (__u8)str[0], (__u8)str[1]);
+
+ /* Null terminate string */
+ str[p.pl] = '\0';
+
+ p.pv.c = str; /* Handler will need to take a copy */
+
+ /* Call handler for this parameter */
+ err = (*func)(self, &p, PV_PUT);
+ if (err < 0)
+ return err;
+
+ return p.pl+2; /* Extracted pl+2 bytes */
+}
+
+/*
+ * Function irda_extract_octseq (self, buf, len, type, func)
+ */
+static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi,
+ PV_TYPE type, PI_HANDLER func)
+{
+ irda_param_t p;
+
+ p.pi = pi; /* In case handler needs to know */
+ p.pl = buf[1]; /* Extract length of value */
+
+ /* Check if buffer is long enough for parsing */
+ if (len < (2+p.pl)) {
+ net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n",
+ __func__, p.pl, len);
+ return -1;
+ }
+
+ pr_debug("%s(), not impl\n", __func__);
+
+ return p.pl+2; /* Extracted pl+2 bytes */
+}
+
+/*
+ * Function irda_param_pack (skb, fmt, ...)
+ *
+ * Format:
+ * 'i' = 32 bits integer
+ * 's' = string
+ *
+ */
+int irda_param_pack(__u8 *buf, char *fmt, ...)
+{
+ irda_pv_t arg;
+ va_list args;
+ char *p;
+ int n = 0;
+
+ va_start(args, fmt);
+
+ for (p = fmt; *p != '\0'; p++) {
+ switch (*p) {
+ case 'b': /* 8 bits unsigned byte */
+ buf[n++] = (__u8)va_arg(args, int);
+ break;
+ case 's': /* 16 bits unsigned short */
+ arg.i = (__u16)va_arg(args, int);
+ put_unaligned((__u16)arg.i, (__u16 *)(buf+n)); n+=2;
+ break;
+ case 'i': /* 32 bits unsigned integer */
+ arg.i = va_arg(args, __u32);
+ put_unaligned(arg.i, (__u32 *)(buf+n)); n+=4;
+ break;
+#if 0
+ case 'c': /* \0 terminated string */
+ arg.c = va_arg(args, char *);
+ strcpy(buf+n, arg.c);
+ n += strlen(arg.c) + 1;
+ break;
+#endif
+ default:
+ va_end(args);
+ return -1;
+ }
+ }
+ va_end(args);
+
+ return 0;
+}
+EXPORT_SYMBOL(irda_param_pack);
+
+/*
+ * Function irda_param_unpack (skb, fmt, ...)
+ */
+static int irda_param_unpack(__u8 *buf, char *fmt, ...)
+{
+ irda_pv_t arg;
+ va_list args;
+ char *p;
+ int n = 0;
+
+ va_start(args, fmt);
+
+ for (p = fmt; *p != '\0'; p++) {
+ switch (*p) {
+ case 'b': /* 8 bits byte */
+ arg.ip = va_arg(args, __u32 *);
+ *arg.ip = buf[n++];
+ break;
+ case 's': /* 16 bits short */
+ arg.ip = va_arg(args, __u32 *);
+ *arg.ip = get_unaligned((__u16 *)(buf+n)); n+=2;
+ break;
+ case 'i': /* 32 bits unsigned integer */
+ arg.ip = va_arg(args, __u32 *);
+ *arg.ip = get_unaligned((__u32 *)(buf+n)); n+=4;
+ break;
+#if 0
+ case 'c': /* \0 terminated string */
+ arg.c = va_arg(args, char *);
+ strcpy(arg.c, buf+n);
+ n += strlen(arg.c) + 1;
+ break;
+#endif
+ default:
+ va_end(args);
+ return -1;
+ }
+
+ }
+ va_end(args);
+
+ return 0;
+}
+
+/*
+ * Function irda_param_insert (self, pi, buf, len, info)
+ *
+ * Insert the specified parameter (pi) into buffer. Returns number of
+ * bytes inserted
+ */
+int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
+ pi_param_info_t *info)
+{
+ const pi_minor_info_t *pi_minor_info;
+ __u8 pi_minor;
+ __u8 pi_major;
+ int type;
+ int ret = -1;
+ int n = 0;
+
+ IRDA_ASSERT(buf != NULL, return ret;);
+ IRDA_ASSERT(info != NULL, return ret;);
+
+ pi_minor = pi & info->pi_mask;
+ pi_major = pi >> info->pi_major_offset;
+
+ /* Check if the identifier value (pi) is valid */
+ if ((pi_major > info->len-1) ||
+ (pi_minor > info->tables[pi_major].len-1))
+ {
+ pr_debug("%s(), no handler for parameter=0x%02x\n",
+ __func__, pi);
+
+ /* Skip this parameter */
+ return -1;
+ }
+
+ /* Lookup the info on how to parse this parameter */
+ pi_minor_info = &info->tables[pi_major].pi_minor_call_table[pi_minor];
+
+ /* Find expected data type for this parameter identifier (pi)*/
+ type = pi_minor_info->type;
+
+ /* Check if handler has been implemented */
+ if (!pi_minor_info->func) {
+ net_info_ratelimited("%s: no handler for pi=%#x\n",
+ __func__, pi);
+ /* Skip this parameter */
+ return -1;
+ }
+
+ /* Insert parameter value */
+ ret = (*pv_insert_table[type & PV_MASK])(self, buf+n, len, pi, type,
+ pi_minor_info->func);
+ return ret;
+}
+EXPORT_SYMBOL(irda_param_insert);
+
+/*
+ * Function irda_param_extract (self, buf, len, info)
+ *
+ * Parse all parameters. If len is correct, then everything should be
+ * safe. Returns the number of bytes that was parsed
+ *
+ */
+static int irda_param_extract(void *self, __u8 *buf, int len,
+ pi_param_info_t *info)
+{
+ const pi_minor_info_t *pi_minor_info;
+ __u8 pi_minor;
+ __u8 pi_major;
+ int type;
+ int ret = -1;
+ int n = 0;
+
+ IRDA_ASSERT(buf != NULL, return ret;);
+ IRDA_ASSERT(info != NULL, return ret;);
+
+ pi_minor = buf[n] & info->pi_mask;
+ pi_major = buf[n] >> info->pi_major_offset;
+
+ /* Check if the identifier value (pi) is valid */
+ if ((pi_major > info->len-1) ||
+ (pi_minor > info->tables[pi_major].len-1))
+ {
+ pr_debug("%s(), no handler for parameter=0x%02x\n",
+ __func__, buf[0]);
+
+ /* Skip this parameter */
+ return 2 + buf[n + 1]; /* Continue */
+ }
+
+ /* Lookup the info on how to parse this parameter */
+ pi_minor_info = &info->tables[pi_major].pi_minor_call_table[pi_minor];
+
+ /* Find expected data type for this parameter identifier (pi)*/
+ type = pi_minor_info->type;
+
+ pr_debug("%s(), pi=[%d,%d], type=%d\n", __func__,
+ pi_major, pi_minor, type);
+
+ /* Check if handler has been implemented */
+ if (!pi_minor_info->func) {
+ net_info_ratelimited("%s: no handler for pi=%#x\n",
+ __func__, buf[n]);
+ /* Skip this parameter */
+ return 2 + buf[n + 1]; /* Continue */
+ }
+
+ /* Parse parameter value */
+ ret = (*pv_extract_table[type & PV_MASK])(self, buf+n, len, buf[n],
+ type, pi_minor_info->func);
+ return ret;
+}
+
+/*
+ * Function irda_param_extract_all (self, buf, len, info)
+ *
+ * Parse all parameters. If len is correct, then everything should be
+ * safe. Returns the number of bytes that was parsed
+ *
+ */
+int irda_param_extract_all(void *self, __u8 *buf, int len,
+ pi_param_info_t *info)
+{
+ int ret = -1;
+ int n = 0;
+
+ IRDA_ASSERT(buf != NULL, return ret;);
+ IRDA_ASSERT(info != NULL, return ret;);
+
+ /*
+ * Parse all parameters. Each parameter must be at least two bytes
+ * long or else there is no point in trying to parse it
+ */
+ while (len > 2) {
+ ret = irda_param_extract(self, buf+n, len, info);
+ if (ret < 0)
+ return ret;
+
+ n += ret;
+ len -= ret;
+ }
+ return n;
+}
+EXPORT_SYMBOL(irda_param_extract_all);
diff --git a/drivers/staging/irda/net/qos.c b/drivers/staging/irda/net/qos.c
new file mode 100644
index 000000000000..25ba8509ad3e
--- /dev/null
+++ b/drivers/staging/irda/net/qos.c
@@ -0,0 +1,771 @@
+/*********************************************************************
+ *
+ * Filename: qos.c
+ * Version: 1.0
+ * Description: IrLAP QoS parameter negotiation
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Sep 9 00:00:26 1997
+ * Modified at: Sun Jan 30 14:29:16 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/export.h>
+
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/parameters.h>
+#include <net/irda/qos.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlap_frame.h>
+
+/*
+ * Maximum values of the baud rate we negotiate with the other end.
+ * Most often, you don't have to change that, because Linux-IrDA will
+ * use the maximum offered by the link layer, which usually works fine.
+ * In some very rare cases, you may want to limit it to lower speeds...
+ */
+int sysctl_max_baud_rate = 16000000;
+/*
+ * Maximum value of the lap disconnect timer we negotiate with the other end.
+ * Most often, the value below represent the best compromise, but some user
+ * may want to keep the LAP alive longer or shorter in case of link failure.
+ * Remember that the threshold time (early warning) is fixed to 3s...
+ */
+int sysctl_max_noreply_time = 12;
+/*
+ * Minimum turn time to be applied before transmitting to the peer.
+ * Nonzero values (usec) are used as lower limit to the per-connection
+ * mtt value which was announced by the other end during negotiation.
+ * Might be helpful if the peer device provides too short mtt.
+ * Default is 10us which means using the unmodified value given by the
+ * peer except if it's 0 (0 is likely a bug in the other stack).
+ */
+unsigned int sysctl_min_tx_turn_time = 10;
+/*
+ * Maximum data size to be used in transmission in payload of LAP frame.
+ * There is a bit of confusion in the IrDA spec :
+ * The LAP spec defines the payload of a LAP frame (I field) to be
+ * 2048 bytes max (IrLAP 1.1, chapt 6.6.5, p40).
+ * On the other hand, the PHY mention frames of 2048 bytes max (IrPHY
+ * 1.2, chapt 5.3.2.1, p41). But, this number includes the LAP header
+ * (2 bytes), and CRC (32 bits at 4 Mb/s). So, for the I field (LAP
+ * payload), that's only 2042 bytes. Oups !
+ * My nsc-ircc hardware has troubles receiving 2048 bytes frames at 4 Mb/s,
+ * so adjust to 2042... I don't know if this bug applies only for 2048
+ * bytes frames or all negotiated frame sizes, but you can use the sysctl
+ * to play with this value anyway.
+ * Jean II */
+unsigned int sysctl_max_tx_data_size = 2042;
+/*
+ * Maximum transmit window, i.e. number of LAP frames between turn-around.
+ * This allow to override what the peer told us. Some peers are buggy and
+ * don't always support what they tell us.
+ * Jean II */
+unsigned int sysctl_max_tx_window = 7;
+
+static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
+static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
+ int get);
+static int irlap_param_max_turn_time(void *instance, irda_param_t *param,
+ int get);
+static int irlap_param_data_size(void *instance, irda_param_t *param, int get);
+static int irlap_param_window_size(void *instance, irda_param_t *param,
+ int get);
+static int irlap_param_additional_bofs(void *instance, irda_param_t *parm,
+ int get);
+static int irlap_param_min_turn_time(void *instance, irda_param_t *param,
+ int get);
+
+#ifndef CONFIG_IRDA_DYNAMIC_WINDOW
+static __u32 irlap_requested_line_capacity(struct qos_info *qos);
+#endif
+
+static __u32 min_turn_times[] = { 10000, 5000, 1000, 500, 100, 50, 10, 0 }; /* us */
+static __u32 baud_rates[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000,
+ 1152000, 4000000, 16000000 }; /* bps */
+static __u32 data_sizes[] = { 64, 128, 256, 512, 1024, 2048 }; /* bytes */
+static __u32 add_bofs[] = { 48, 24, 12, 5, 3, 2, 1, 0 }; /* bytes */
+static __u32 max_turn_times[] = { 500, 250, 100, 50 }; /* ms */
+static __u32 link_disc_times[] = { 3, 8, 12, 16, 20, 25, 30, 40 }; /* secs */
+
+static __u32 max_line_capacities[10][4] = {
+ /* 500 ms 250 ms 100 ms 50 ms (max turn time) */
+ { 100, 0, 0, 0 }, /* 2400 bps */
+ { 400, 0, 0, 0 }, /* 9600 bps */
+ { 800, 0, 0, 0 }, /* 19200 bps */
+ { 1600, 0, 0, 0 }, /* 38400 bps */
+ { 2360, 0, 0, 0 }, /* 57600 bps */
+ { 4800, 2400, 960, 480 }, /* 115200 bps */
+ { 28800, 11520, 5760, 2880 }, /* 576000 bps */
+ { 57600, 28800, 11520, 5760 }, /* 1152000 bps */
+ { 200000, 100000, 40000, 20000 }, /* 4000000 bps */
+ { 800000, 400000, 160000, 80000 }, /* 16000000 bps */
+};
+
+static const pi_minor_info_t pi_minor_call_table_type_0[] = {
+ { NULL, 0 },
+/* 01 */{ irlap_param_baud_rate, PV_INTEGER | PV_LITTLE_ENDIAN },
+ { NULL, 0 },
+ { NULL, 0 },
+ { NULL, 0 },
+ { NULL, 0 },
+ { NULL, 0 },
+ { NULL, 0 },
+/* 08 */{ irlap_param_link_disconnect, PV_INT_8_BITS }
+};
+
+static const pi_minor_info_t pi_minor_call_table_type_1[] = {
+ { NULL, 0 },
+ { NULL, 0 },
+/* 82 */{ irlap_param_max_turn_time, PV_INT_8_BITS },
+/* 83 */{ irlap_param_data_size, PV_INT_8_BITS },
+/* 84 */{ irlap_param_window_size, PV_INT_8_BITS },
+/* 85 */{ irlap_param_additional_bofs, PV_INT_8_BITS },
+/* 86 */{ irlap_param_min_turn_time, PV_INT_8_BITS },
+};
+
+static const pi_major_info_t pi_major_call_table[] = {
+ { pi_minor_call_table_type_0, 9 },
+ { pi_minor_call_table_type_1, 7 },
+};
+
+static pi_param_info_t irlap_param_info = { pi_major_call_table, 2, 0x7f, 7 };
+
+/* ---------------------- LOCAL SUBROUTINES ---------------------- */
+/* Note : we start with a bunch of local subroutines.
+ * As the compiler is "one pass", this is the only way to get them to
+ * inline properly...
+ * Jean II
+ */
+/*
+ * Function value_index (value, array, size)
+ *
+ * Returns the index to the value in the specified array
+ */
+static inline int value_index(__u32 value, __u32 *array, int size)
+{
+ int i;
+
+ for (i=0; i < size; i++)
+ if (array[i] == value)
+ break;
+ return i;
+}
+
+/*
+ * Function index_value (index, array)
+ *
+ * Returns value to index in array, easy!
+ *
+ */
+static inline __u32 index_value(int index, __u32 *array)
+{
+ return array[index];
+}
+
+/*
+ * Function msb_index (word)
+ *
+ * Returns index to most significant bit (MSB) in word
+ *
+ */
+static int msb_index (__u16 word)
+{
+ __u16 msb = 0x8000;
+ int index = 15; /* Current MSB */
+
+ /* Check for buggy peers.
+ * Note : there is a small probability that it could be us, but I
+ * would expect driver authors to catch that pretty early and be
+ * able to check precisely what's going on. If a end user sees this,
+ * it's very likely the peer. - Jean II */
+ if (word == 0) {
+ net_warn_ratelimited("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
+ __func__);
+ /* The only safe choice (we don't know the array size) */
+ word = 0x1;
+ }
+
+ while (msb) {
+ if (word & msb)
+ break; /* Found it! */
+ msb >>=1;
+ index--;
+ }
+ return index;
+}
+
+/*
+ * Function value_lower_bits (value, array)
+ *
+ * Returns a bit field marking all possibility lower than value.
+ */
+static inline int value_lower_bits(__u32 value, __u32 *array, int size, __u16 *field)
+{
+ int i;
+ __u16 mask = 0x1;
+ __u16 result = 0x0;
+
+ for (i=0; i < size; i++) {
+ /* Add the current value to the bit field, shift mask */
+ result |= mask;
+ mask <<= 1;
+ /* Finished ? */
+ if (array[i] >= value)
+ break;
+ }
+ /* Send back a valid index */
+ if(i >= size)
+ i = size - 1; /* Last item */
+ *field = result;
+ return i;
+}
+
+/*
+ * Function value_highest_bit (value, array)
+ *
+ * Returns a bit field marking the highest possibility lower than value.
+ */
+static inline int value_highest_bit(__u32 value, __u32 *array, int size, __u16 *field)
+{
+ int i;
+ __u16 mask = 0x1;
+ __u16 result = 0x0;
+
+ for (i=0; i < size; i++) {
+ /* Finished ? */
+ if (array[i] <= value)
+ break;
+ /* Shift mask */
+ mask <<= 1;
+ }
+ /* Set the current value to the bit field */
+ result |= mask;
+ /* Send back a valid index */
+ if(i >= size)
+ i = size - 1; /* Last item */
+ *field = result;
+ return i;
+}
+
+/* -------------------------- MAIN CALLS -------------------------- */
+
+/*
+ * Function irda_qos_compute_intersection (qos, new)
+ *
+ * Compute the intersection of the old QoS capabilities with new ones
+ *
+ */
+void irda_qos_compute_intersection(struct qos_info *qos, struct qos_info *new)
+{
+ IRDA_ASSERT(qos != NULL, return;);
+ IRDA_ASSERT(new != NULL, return;);
+
+ /* Apply */
+ qos->baud_rate.bits &= new->baud_rate.bits;
+ qos->window_size.bits &= new->window_size.bits;
+ qos->min_turn_time.bits &= new->min_turn_time.bits;
+ qos->max_turn_time.bits &= new->max_turn_time.bits;
+ qos->data_size.bits &= new->data_size.bits;
+ qos->link_disc_time.bits &= new->link_disc_time.bits;
+ qos->additional_bofs.bits &= new->additional_bofs.bits;
+
+ irda_qos_bits_to_value(qos);
+}
+
+/*
+ * Function irda_init_max_qos_capabilies (qos)
+ *
+ * The purpose of this function is for layers and drivers to be able to
+ * set the maximum QoS possible and then "and in" their own limitations
+ *
+ */
+void irda_init_max_qos_capabilies(struct qos_info *qos)
+{
+ int i;
+ /*
+ * These are the maximum supported values as specified on pages
+ * 39-43 in IrLAP
+ */
+
+ /* Use sysctl to set some configurable values... */
+ /* Set configured max speed */
+ i = value_lower_bits(sysctl_max_baud_rate, baud_rates, 10,
+ &qos->baud_rate.bits);
+ sysctl_max_baud_rate = index_value(i, baud_rates);
+
+ /* Set configured max disc time */
+ i = value_lower_bits(sysctl_max_noreply_time, link_disc_times, 8,
+ &qos->link_disc_time.bits);
+ sysctl_max_noreply_time = index_value(i, link_disc_times);
+
+ /* LSB is first byte, MSB is second byte */
+ qos->baud_rate.bits &= 0x03ff;
+
+ qos->window_size.bits = 0x7f;
+ qos->min_turn_time.bits = 0xff;
+ qos->max_turn_time.bits = 0x0f;
+ qos->data_size.bits = 0x3f;
+ qos->link_disc_time.bits &= 0xff;
+ qos->additional_bofs.bits = 0xff;
+}
+EXPORT_SYMBOL(irda_init_max_qos_capabilies);
+
+/*
+ * Function irlap_adjust_qos_settings (qos)
+ *
+ * Adjust QoS settings in case some values are not possible to use because
+ * of other settings
+ */
+static void irlap_adjust_qos_settings(struct qos_info *qos)
+{
+ __u32 line_capacity;
+ int index;
+
+ /*
+ * Make sure the mintt is sensible.
+ * Main culprit : Ericsson T39. - Jean II
+ */
+ if (sysctl_min_tx_turn_time > qos->min_turn_time.value) {
+ int i;
+
+ net_warn_ratelimited("%s(), Detected buggy peer, adjust mtt to %dus!\n",
+ __func__, sysctl_min_tx_turn_time);
+
+ /* We don't really need bits, but easier this way */
+ i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times,
+ 8, &qos->min_turn_time.bits);
+ sysctl_min_tx_turn_time = index_value(i, min_turn_times);
+ qos->min_turn_time.value = sysctl_min_tx_turn_time;
+ }
+
+ /*
+ * Not allowed to use a max turn time less than 500 ms if the baudrate
+ * is less than 115200
+ */
+ if ((qos->baud_rate.value < 115200) &&
+ (qos->max_turn_time.value < 500))
+ {
+ pr_debug("%s(), adjusting max turn time from %d to 500 ms\n",
+ __func__, qos->max_turn_time.value);
+ qos->max_turn_time.value = 500;
+ }
+
+ /*
+ * The data size must be adjusted according to the baud rate and max
+ * turn time
+ */
+ index = value_index(qos->data_size.value, data_sizes, 6);
+ line_capacity = irlap_max_line_capacity(qos->baud_rate.value,
+ qos->max_turn_time.value);
+
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ while ((qos->data_size.value > line_capacity) && (index > 0)) {
+ qos->data_size.value = data_sizes[index--];
+ pr_debug("%s(), reducing data size to %d\n",
+ __func__, qos->data_size.value);
+ }
+#else /* Use method described in section 6.6.11 of IrLAP */
+ while (irlap_requested_line_capacity(qos) > line_capacity) {
+ IRDA_ASSERT(index != 0, return;);
+
+ /* Must be able to send at least one frame */
+ if (qos->window_size.value > 1) {
+ qos->window_size.value--;
+ pr_debug("%s(), reducing window size to %d\n",
+ __func__, qos->window_size.value);
+ } else if (index > 1) {
+ qos->data_size.value = data_sizes[index--];
+ pr_debug("%s(), reducing data size to %d\n",
+ __func__, qos->data_size.value);
+ } else {
+ net_warn_ratelimited("%s(), nothing more we can do!\n",
+ __func__);
+ }
+ }
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
+ /*
+ * Fix tx data size according to user limits - Jean II
+ */
+ if (qos->data_size.value > sysctl_max_tx_data_size)
+ /* Allow non discrete adjustement to avoid losing capacity */
+ qos->data_size.value = sysctl_max_tx_data_size;
+ /*
+ * Override Tx window if user request it. - Jean II
+ */
+ if (qos->window_size.value > sysctl_max_tx_window)
+ qos->window_size.value = sysctl_max_tx_window;
+}
+
+/*
+ * Function irlap_negotiate (qos_device, qos_session, skb)
+ *
+ * Negotiate QoS values, not really that much negotiation :-)
+ * We just set the QoS capabilities for the peer station
+ *
+ */
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = irda_param_extract_all(self, skb->data, skb->len,
+ &irlap_param_info);
+
+ /* Convert the negotiated bits to values */
+ irda_qos_bits_to_value(&self->qos_tx);
+ irda_qos_bits_to_value(&self->qos_rx);
+
+ irlap_adjust_qos_settings(&self->qos_tx);
+
+ pr_debug("Setting BAUD_RATE to %d bps.\n",
+ self->qos_tx.baud_rate.value);
+ pr_debug("Setting DATA_SIZE to %d bytes\n",
+ self->qos_tx.data_size.value);
+ pr_debug("Setting WINDOW_SIZE to %d\n",
+ self->qos_tx.window_size.value);
+ pr_debug("Setting XBOFS to %d\n",
+ self->qos_tx.additional_bofs.value);
+ pr_debug("Setting MAX_TURN_TIME to %d ms.\n",
+ self->qos_tx.max_turn_time.value);
+ pr_debug("Setting MIN_TURN_TIME to %d usecs.\n",
+ self->qos_tx.min_turn_time.value);
+ pr_debug("Setting LINK_DISC to %d secs.\n",
+ self->qos_tx.link_disc_time.value);
+ return ret;
+}
+
+/*
+ * Function irlap_insert_negotiation_params (qos, fp)
+ *
+ * Insert QoS negotiaion pararameters into frame
+ *
+ */
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ /* Insert data rate */
+ ret = irda_param_insert(self, PI_BAUD_RATE, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ /* Insert max turnaround time */
+ ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ /* Insert data size */
+ ret = irda_param_insert(self, PI_DATA_SIZE, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ /* Insert window size */
+ ret = irda_param_insert(self, PI_WINDOW_SIZE, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ /* Insert additional BOFs */
+ ret = irda_param_insert(self, PI_ADD_BOFS, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ /* Insert minimum turnaround time */
+ ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ /* Insert link disconnect/threshold time */
+ ret = irda_param_insert(self, PI_LINK_DISC, skb_tail_pointer(skb),
+ skb_tailroom(skb), &irlap_param_info);
+ if (ret < 0)
+ return ret;
+ skb_put(skb, ret);
+
+ return 0;
+}
+
+/*
+ * Function irlap_param_baud_rate (instance, param, get)
+ *
+ * Negotiate data-rate
+ *
+ */
+static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get)
+{
+ __u16 final;
+
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get) {
+ param->pv.i = self->qos_rx.baud_rate.bits;
+ pr_debug("%s(), baud rate = 0x%02x\n",
+ __func__, param->pv.i);
+ } else {
+ /*
+ * Stations must agree on baud rate, so calculate
+ * intersection
+ */
+ pr_debug("Requested BAUD_RATE: 0x%04x\n", (__u16)param->pv.i);
+ final = (__u16) param->pv.i & self->qos_rx.baud_rate.bits;
+
+ pr_debug("Final BAUD_RATE: 0x%04x\n", final);
+ self->qos_tx.baud_rate.bits = final;
+ self->qos_rx.baud_rate.bits = final;
+ }
+
+ return 0;
+}
+
+/*
+ * Function irlap_param_link_disconnect (instance, param, get)
+ *
+ * Negotiate link disconnect/threshold time.
+ *
+ */
+static int irlap_param_link_disconnect(void *instance, irda_param_t *param,
+ int get)
+{
+ __u16 final;
+
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->qos_rx.link_disc_time.bits;
+ else {
+ /*
+ * Stations must agree on link disconnect/threshold
+ * time.
+ */
+ pr_debug("LINK_DISC: %02x\n", (__u8)param->pv.i);
+ final = (__u8) param->pv.i & self->qos_rx.link_disc_time.bits;
+
+ pr_debug("Final LINK_DISC: %02x\n", final);
+ self->qos_tx.link_disc_time.bits = final;
+ self->qos_rx.link_disc_time.bits = final;
+ }
+ return 0;
+}
+
+/*
+ * Function irlap_param_max_turn_time (instance, param, get)
+ *
+ * Negotiate the maximum turnaround time. This is a type 1 parameter and
+ * will be negotiated independently for each station
+ *
+ */
+static int irlap_param_max_turn_time(void *instance, irda_param_t *param,
+ int get)
+{
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->qos_rx.max_turn_time.bits;
+ else
+ self->qos_tx.max_turn_time.bits = (__u8) param->pv.i;
+
+ return 0;
+}
+
+/*
+ * Function irlap_param_data_size (instance, param, get)
+ *
+ * Negotiate the data size. This is a type 1 parameter and
+ * will be negotiated independently for each station
+ *
+ */
+static int irlap_param_data_size(void *instance, irda_param_t *param, int get)
+{
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->qos_rx.data_size.bits;
+ else
+ self->qos_tx.data_size.bits = (__u8) param->pv.i;
+
+ return 0;
+}
+
+/*
+ * Function irlap_param_window_size (instance, param, get)
+ *
+ * Negotiate the window size. This is a type 1 parameter and
+ * will be negotiated independently for each station
+ *
+ */
+static int irlap_param_window_size(void *instance, irda_param_t *param,
+ int get)
+{
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->qos_rx.window_size.bits;
+ else
+ self->qos_tx.window_size.bits = (__u8) param->pv.i;
+
+ return 0;
+}
+
+/*
+ * Function irlap_param_additional_bofs (instance, param, get)
+ *
+ * Negotiate additional BOF characters. This is a type 1 parameter and
+ * will be negotiated independently for each station.
+ */
+static int irlap_param_additional_bofs(void *instance, irda_param_t *param, int get)
+{
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->qos_rx.additional_bofs.bits;
+ else
+ self->qos_tx.additional_bofs.bits = (__u8) param->pv.i;
+
+ return 0;
+}
+
+/*
+ * Function irlap_param_min_turn_time (instance, param, get)
+ *
+ * Negotiate the minimum turn around time. This is a type 1 parameter and
+ * will be negotiated independently for each station
+ */
+static int irlap_param_min_turn_time(void *instance, irda_param_t *param,
+ int get)
+{
+ struct irlap_cb *self = (struct irlap_cb *) instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
+
+ if (get)
+ param->pv.i = self->qos_rx.min_turn_time.bits;
+ else
+ self->qos_tx.min_turn_time.bits = (__u8) param->pv.i;
+
+ return 0;
+}
+
+/*
+ * Function irlap_max_line_capacity (speed, max_turn_time, min_turn_time)
+ *
+ * Calculate the maximum line capacity
+ *
+ */
+__u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
+{
+ __u32 line_capacity;
+ int i,j;
+
+ pr_debug("%s(), speed=%d, max_turn_time=%d\n",
+ __func__, speed, max_turn_time);
+
+ i = value_index(speed, baud_rates, 10);
+ j = value_index(max_turn_time, max_turn_times, 4);
+
+ IRDA_ASSERT(((i >=0) && (i <10)), return 0;);
+ IRDA_ASSERT(((j >=0) && (j <4)), return 0;);
+
+ line_capacity = max_line_capacities[i][j];
+
+ pr_debug("%s(), line capacity=%d bytes\n",
+ __func__, line_capacity);
+
+ return line_capacity;
+}
+
+#ifndef CONFIG_IRDA_DYNAMIC_WINDOW
+static __u32 irlap_requested_line_capacity(struct qos_info *qos)
+{
+ __u32 line_capacity;
+
+ line_capacity = qos->window_size.value *
+ (qos->data_size.value + 6 + qos->additional_bofs.value) +
+ irlap_min_turn_time_in_bytes(qos->baud_rate.value,
+ qos->min_turn_time.value);
+
+ pr_debug("%s(), requested line capacity=%d\n",
+ __func__, line_capacity);
+
+ return line_capacity;
+}
+#endif
+
+void irda_qos_bits_to_value(struct qos_info *qos)
+{
+ int index;
+
+ IRDA_ASSERT(qos != NULL, return;);
+
+ index = msb_index(qos->baud_rate.bits);
+ qos->baud_rate.value = baud_rates[index];
+
+ index = msb_index(qos->data_size.bits);
+ qos->data_size.value = data_sizes[index];
+
+ index = msb_index(qos->window_size.bits);
+ qos->window_size.value = index+1;
+
+ index = msb_index(qos->min_turn_time.bits);
+ qos->min_turn_time.value = min_turn_times[index];
+
+ index = msb_index(qos->max_turn_time.bits);
+ qos->max_turn_time.value = max_turn_times[index];
+
+ index = msb_index(qos->link_disc_time.bits);
+ qos->link_disc_time.value = link_disc_times[index];
+
+ index = msb_index(qos->additional_bofs.bits);
+ qos->additional_bofs.value = add_bofs[index];
+}
+EXPORT_SYMBOL(irda_qos_bits_to_value);
diff --git a/drivers/staging/irda/net/timer.c b/drivers/staging/irda/net/timer.c
new file mode 100644
index 000000000000..f2280f73b057
--- /dev/null
+++ b/drivers/staging/irda/net/timer.c
@@ -0,0 +1,231 @@
+/*********************************************************************
+ *
+ * Filename: timer.c
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Aug 16 00:59:29 1997
+ * Modified at: Wed Dec 8 12:50:34 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/delay.h>
+
+#include <net/irda/timer.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlmp.h>
+
+extern int sysctl_slot_timeout;
+
+static void irlap_slot_timer_expired(void* data);
+static void irlap_query_timer_expired(void* data);
+static void irlap_final_timer_expired(void* data);
+static void irlap_wd_timer_expired(void* data);
+static void irlap_backoff_timer_expired(void* data);
+static void irlap_media_busy_expired(void* data);
+
+void irlap_start_slot_timer(struct irlap_cb *self, int timeout)
+{
+ irda_start_timer(&self->slot_timer, timeout, (void *) self,
+ irlap_slot_timer_expired);
+}
+
+void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
+{
+ int timeout;
+
+ /* Calculate when the peer discovery should end. Normally, we
+ * get the end-of-discovery frame, so this is just in case
+ * we miss it.
+ * Basically, we multiply the number of remaining slots by our
+ * slot time, plus add some extra time to properly receive the last
+ * discovery packet (which is longer due to extra discovery info),
+ * to avoid messing with for incoming connections requests and
+ * to accommodate devices that perform discovery slower than us.
+ * Jean II */
+ timeout = msecs_to_jiffies(sysctl_slot_timeout) * (S - s)
+ + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT;
+
+ /* Set or re-set the timer. We reset the timer for each received
+ * discovery query, which allow us to automatically adjust to
+ * the speed of the peer discovery (faster or slower). Jean II */
+ irda_start_timer( &self->query_timer, timeout, (void *) self,
+ irlap_query_timer_expired);
+}
+
+void irlap_start_final_timer(struct irlap_cb *self, int timeout)
+{
+ irda_start_timer(&self->final_timer, timeout, (void *) self,
+ irlap_final_timer_expired);
+}
+
+void irlap_start_wd_timer(struct irlap_cb *self, int timeout)
+{
+ irda_start_timer(&self->wd_timer, timeout, (void *) self,
+ irlap_wd_timer_expired);
+}
+
+void irlap_start_backoff_timer(struct irlap_cb *self, int timeout)
+{
+ irda_start_timer(&self->backoff_timer, timeout, (void *) self,
+ irlap_backoff_timer_expired);
+}
+
+void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout)
+{
+ irda_start_timer(&self->media_busy_timer, timeout,
+ (void *) self, irlap_media_busy_expired);
+}
+
+void irlap_stop_mbusy_timer(struct irlap_cb *self)
+{
+ /* If timer is activated, kill it! */
+ del_timer(&self->media_busy_timer);
+
+ /* If we are in NDM, there is a bunch of events in LAP that
+ * that be pending due to the media_busy condition, such as
+ * CONNECT_REQUEST and SEND_UI_FRAME. If we don't generate
+ * an event, they will wait forever...
+ * Jean II */
+ if (self->state == LAP_NDM)
+ irlap_do_event(self, MEDIA_BUSY_TIMER_EXPIRED, NULL, NULL);
+}
+
+void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout)
+{
+ irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
+ irlmp_watchdog_timer_expired);
+}
+
+void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout)
+{
+ irda_start_timer(&self->discovery_timer, timeout, (void *) self,
+ irlmp_discovery_timer_expired);
+}
+
+void irlmp_start_idle_timer(struct lap_cb *self, int timeout)
+{
+ irda_start_timer(&self->idle_timer, timeout, (void *) self,
+ irlmp_idle_timer_expired);
+}
+
+void irlmp_stop_idle_timer(struct lap_cb *self)
+{
+ /* If timer is activated, kill it! */
+ del_timer(&self->idle_timer);
+}
+
+/*
+ * Function irlap_slot_timer_expired (data)
+ *
+ * IrLAP slot timer has expired
+ *
+ */
+static void irlap_slot_timer_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_do_event(self, SLOT_TIMER_EXPIRED, NULL, NULL);
+}
+
+/*
+ * Function irlap_query_timer_expired (data)
+ *
+ * IrLAP query timer has expired
+ *
+ */
+static void irlap_query_timer_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_do_event(self, QUERY_TIMER_EXPIRED, NULL, NULL);
+}
+
+/*
+ * Function irda_final_timer_expired (data)
+ *
+ *
+ *
+ */
+static void irlap_final_timer_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_do_event(self, FINAL_TIMER_EXPIRED, NULL, NULL);
+}
+
+/*
+ * Function irda_wd_timer_expired (data)
+ *
+ *
+ *
+ */
+static void irlap_wd_timer_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_do_event(self, WD_TIMER_EXPIRED, NULL, NULL);
+}
+
+/*
+ * Function irda_backoff_timer_expired (data)
+ *
+ *
+ *
+ */
+static void irlap_backoff_timer_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
+
+ irlap_do_event(self, BACKOFF_TIMER_EXPIRED, NULL, NULL);
+}
+
+
+/*
+ * Function irtty_media_busy_expired (data)
+ *
+ *
+ */
+static void irlap_media_busy_expired(void *data)
+{
+ struct irlap_cb *self = (struct irlap_cb *) data;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ irda_device_set_media_busy(self->netdev, FALSE);
+ /* Note : the LAP event will be send in irlap_stop_mbusy_timer(),
+ * to catch other cases where the flag is cleared (for example
+ * after a discovery) - Jean II */
+}
diff --git a/drivers/staging/irda/net/wrapper.c b/drivers/staging/irda/net/wrapper.c
new file mode 100644
index 000000000000..40a0f993bf13
--- /dev/null
+++ b/drivers/staging/irda/net/wrapper.c
@@ -0,0 +1,492 @@
+/*********************************************************************
+ *
+ * Filename: wrapper.c
+ * Version: 1.2
+ * Description: IrDA SIR async wrapper layer
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Mon Aug 4 20:40:53 1997
+ * Modified at: Fri Jan 28 13:21:09 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Fri May 28 3:11 CST 1999
+ * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irlap_frame.h>
+#include <net/irda/irda_device.h>
+
+/************************** FRAME WRAPPING **************************/
+/*
+ * Unwrap and unstuff SIR frames
+ *
+ * Note : at FIR and MIR, HDLC framing is used and usually handled
+ * by the controller, so we come here only for SIR... Jean II
+ */
+
+/*
+ * Function stuff_byte (byte, buf)
+ *
+ * Byte stuff one single byte and put the result in buffer pointed to by
+ * buf. The buffer must at all times be able to have two bytes inserted.
+ *
+ * This is in a tight loop, better inline it, so need to be prior to callers.
+ * (2000 bytes on P6 200MHz, non-inlined ~370us, inline ~170us) - Jean II
+ */
+static inline int stuff_byte(__u8 byte, __u8 *buf)
+{
+ switch (byte) {
+ case BOF: /* FALLTHROUGH */
+ case EOF: /* FALLTHROUGH */
+ case CE:
+ /* Insert transparently coded */
+ buf[0] = CE; /* Send link escape */
+ buf[1] = byte^IRDA_TRANS; /* Complement bit 5 */
+ return 2;
+ /* break; */
+ default:
+ /* Non-special value, no transparency required */
+ buf[0] = byte;
+ return 1;
+ /* break; */
+ }
+}
+
+/*
+ * Function async_wrap (skb, *tx_buff, buffsize)
+ *
+ * Makes a new buffer with wrapping and stuffing, should check that
+ * we don't get tx buffer overflow.
+ */
+int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
+{
+ struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
+ int xbofs;
+ int i;
+ int n;
+ union {
+ __u16 value;
+ __u8 bytes[2];
+ } fcs;
+
+ /* Initialize variables */
+ fcs.value = INIT_FCS;
+ n = 0;
+
+ /*
+ * Send XBOF's for required min. turn time and for the negotiated
+ * additional XBOFS
+ */
+
+ if (cb->magic != LAP_MAGIC) {
+ /*
+ * This will happen for all frames sent from user-space.
+ * Nothing to worry about, but we set the default number of
+ * BOF's
+ */
+ pr_debug("%s(), wrong magic in skb!\n", __func__);
+ xbofs = 10;
+ } else
+ xbofs = cb->xbofs + cb->xbofs_delay;
+
+ pr_debug("%s(), xbofs=%d\n", __func__, xbofs);
+
+ /* Check that we never use more than 115 + 48 xbofs */
+ if (xbofs > 163) {
+ pr_debug("%s(), too many xbofs (%d)\n", __func__,
+ xbofs);
+ xbofs = 163;
+ }
+
+ memset(tx_buff + n, XBOF, xbofs);
+ n += xbofs;
+
+ /* Start of packet character BOF */
+ tx_buff[n++] = BOF;
+
+ /* Insert frame and calc CRC */
+ for (i=0; i < skb->len; i++) {
+ /*
+ * Check for the possibility of tx buffer overflow. We use
+ * bufsize-5 since the maximum number of bytes that can be
+ * transmitted after this point is 5.
+ */
+ if(n >= (buffsize-5)) {
+ net_err_ratelimited("%s(), tx buffer overflow (n=%d)\n",
+ __func__, n);
+ return n;
+ }
+
+ n += stuff_byte(skb->data[i], tx_buff+n);
+ fcs.value = irda_fcs(fcs.value, skb->data[i]);
+ }
+
+ /* Insert CRC in little endian format (LSB first) */
+ fcs.value = ~fcs.value;
+#ifdef __LITTLE_ENDIAN
+ n += stuff_byte(fcs.bytes[0], tx_buff+n);
+ n += stuff_byte(fcs.bytes[1], tx_buff+n);
+#else /* ifdef __BIG_ENDIAN */
+ n += stuff_byte(fcs.bytes[1], tx_buff+n);
+ n += stuff_byte(fcs.bytes[0], tx_buff+n);
+#endif
+ tx_buff[n++] = EOF;
+
+ return n;
+}
+EXPORT_SYMBOL(async_wrap_skb);
+
+/************************* FRAME UNWRAPPING *************************/
+/*
+ * Unwrap and unstuff SIR frames
+ *
+ * Complete rewrite by Jean II :
+ * More inline, faster, more compact, more logical. Jean II
+ * (16 bytes on P6 200MHz, old 5 to 7 us, new 4 to 6 us)
+ * (24 bytes on P6 200MHz, old 9 to 10 us, new 7 to 8 us)
+ * (for reference, 115200 b/s is 1 byte every 69 us)
+ * And reduce wrapper.o by ~900B in the process ;-)
+ *
+ * Then, we have the addition of ZeroCopy, which is optional
+ * (i.e. the driver must initiate it) and improve final processing.
+ * (2005 B frame + EOF on P6 200MHz, without 30 to 50 us, with 10 to 25 us)
+ *
+ * Note : at FIR and MIR, HDLC framing is used and usually handled
+ * by the controller, so we come here only for SIR... Jean II
+ */
+
+/*
+ * We can also choose where we want to do the CRC calculation. We can
+ * do it "inline", as we receive the bytes, or "postponed", when
+ * receiving the End-Of-Frame.
+ * (16 bytes on P6 200MHz, inlined 4 to 6 us, postponed 4 to 5 us)
+ * (24 bytes on P6 200MHz, inlined 7 to 8 us, postponed 5 to 7 us)
+ * With ZeroCopy :
+ * (2005 B frame on P6 200MHz, inlined 10 to 25 us, postponed 140 to 180 us)
+ * Without ZeroCopy :
+ * (2005 B frame on P6 200MHz, inlined 30 to 50 us, postponed 150 to 180 us)
+ * (Note : numbers taken with irq disabled)
+ *
+ * From those numbers, it's not clear which is the best strategy, because
+ * we end up running through a lot of data one way or another (i.e. cache
+ * misses). I personally prefer to avoid the huge latency spike of the
+ * "postponed" solution, because it come just at the time when we have
+ * lot's of protocol processing to do and it will hurt our ability to
+ * reach low link turnaround times... Jean II
+ */
+//#define POSTPONE_RX_CRC
+
+/*
+ * Function async_bump (buf, len, stats)
+ *
+ * Got a frame, make a copy of it, and pass it up the stack! We can try
+ * to inline it since it's only called from state_inside_frame
+ */
+static inline void
+async_bump(struct net_device *dev,
+ struct net_device_stats *stats,
+ iobuff_t *rx_buff)
+{
+ struct sk_buff *newskb;
+ struct sk_buff *dataskb;
+ int docopy;
+
+ /* Check if we need to copy the data to a new skb or not.
+ * If the driver doesn't use ZeroCopy Rx, we have to do it.
+ * With ZeroCopy Rx, the rx_buff already point to a valid
+ * skb. But, if the frame is small, it is more efficient to
+ * copy it to save memory (copy will be fast anyway - that's
+ * called Rx-copy-break). Jean II */
+ docopy = ((rx_buff->skb == NULL) ||
+ (rx_buff->len < IRDA_RX_COPY_THRESHOLD));
+
+ /* Allocate a new skb */
+ newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize);
+ if (!newskb) {
+ stats->rx_dropped++;
+ /* We could deliver the current skb if doing ZeroCopy Rx,
+ * but this would stall the Rx path. Better drop the
+ * packet... Jean II */
+ return;
+ }
+
+ /* Align IP header to 20 bytes (i.e. increase skb->data)
+ * Note this is only useful with IrLAN, as PPP has a variable
+ * header size (2 or 1 bytes) - Jean II */
+ skb_reserve(newskb, 1);
+
+ if(docopy) {
+ /* Copy data without CRC (length already checked) */
+ skb_copy_to_linear_data(newskb, rx_buff->data,
+ rx_buff->len - 2);
+ /* Deliver this skb */
+ dataskb = newskb;
+ } else {
+ /* We are using ZeroCopy. Deliver old skb */
+ dataskb = rx_buff->skb;
+ /* And hook the new skb to the rx_buff */
+ rx_buff->skb = newskb;
+ rx_buff->head = newskb->data; /* NOT newskb->head */
+ //printk(KERN_DEBUG "ZeroCopy : len = %d, dataskb = %p, newskb = %p\n", rx_buff->len, dataskb, newskb);
+ }
+
+ /* Set proper length on skb (without CRC) */
+ skb_put(dataskb, rx_buff->len - 2);
+
+ /* Feed it to IrLAP layer */
+ dataskb->dev = dev;
+ skb_reset_mac_header(dataskb);
+ dataskb->protocol = htons(ETH_P_IRDA);
+
+ netif_rx(dataskb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += rx_buff->len;
+
+ /* Clean up rx_buff (redundant with async_unwrap_bof() ???) */
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+}
+
+/*
+ * Function async_unwrap_bof(dev, byte)
+ *
+ * Handle Beginning Of Frame character received within a frame
+ *
+ */
+static inline void
+async_unwrap_bof(struct net_device *dev,
+ struct net_device_stats *stats,
+ iobuff_t *rx_buff, __u8 byte)
+{
+ switch(rx_buff->state) {
+ case LINK_ESCAPE:
+ case INSIDE_FRAME:
+ /* Not supposed to happen, the previous frame is not
+ * finished - Jean II */
+ pr_debug("%s(), Discarding incomplete frame\n",
+ __func__);
+ stats->rx_errors++;
+ stats->rx_missed_errors++;
+ irda_device_set_media_busy(dev, TRUE);
+ break;
+
+ case OUTSIDE_FRAME:
+ case BEGIN_FRAME:
+ default:
+ /* We may receive multiple BOF at the start of frame */
+ break;
+ }
+
+ /* Now receiving frame */
+ rx_buff->state = BEGIN_FRAME;
+ rx_buff->in_frame = TRUE;
+
+ /* Time to initialize receive buffer */
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+ rx_buff->fcs = INIT_FCS;
+}
+
+/*
+ * Function async_unwrap_eof(dev, byte)
+ *
+ * Handle End Of Frame character received within a frame
+ *
+ */
+static inline void
+async_unwrap_eof(struct net_device *dev,
+ struct net_device_stats *stats,
+ iobuff_t *rx_buff, __u8 byte)
+{
+#ifdef POSTPONE_RX_CRC
+ int i;
+#endif
+
+ switch(rx_buff->state) {
+ case OUTSIDE_FRAME:
+ /* Probably missed the BOF */
+ stats->rx_errors++;
+ stats->rx_missed_errors++;
+ irda_device_set_media_busy(dev, TRUE);
+ break;
+
+ case BEGIN_FRAME:
+ case LINK_ESCAPE:
+ case INSIDE_FRAME:
+ default:
+ /* Note : in the case of BEGIN_FRAME and LINK_ESCAPE,
+ * the fcs will most likely not match and generate an
+ * error, as expected - Jean II */
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+
+#ifdef POSTPONE_RX_CRC
+ /* If we haven't done the CRC as we receive bytes, we
+ * must do it now... Jean II */
+ for(i = 0; i < rx_buff->len; i++)
+ rx_buff->fcs = irda_fcs(rx_buff->fcs,
+ rx_buff->data[i]);
+#endif
+
+ /* Test FCS and signal success if the frame is good */
+ if (rx_buff->fcs == GOOD_FCS) {
+ /* Deliver frame */
+ async_bump(dev, stats, rx_buff);
+ break;
+ } else {
+ /* Wrong CRC, discard frame! */
+ irda_device_set_media_busy(dev, TRUE);
+
+ pr_debug("%s(), crc error\n", __func__);
+ stats->rx_errors++;
+ stats->rx_crc_errors++;
+ }
+ break;
+ }
+}
+
+/*
+ * Function async_unwrap_ce(dev, byte)
+ *
+ * Handle Character Escape character received within a frame
+ *
+ */
+static inline void
+async_unwrap_ce(struct net_device *dev,
+ struct net_device_stats *stats,
+ iobuff_t *rx_buff, __u8 byte)
+{
+ switch(rx_buff->state) {
+ case OUTSIDE_FRAME:
+ /* Activate carrier sense */
+ irda_device_set_media_busy(dev, TRUE);
+ break;
+
+ case LINK_ESCAPE:
+ net_warn_ratelimited("%s: state not defined\n", __func__);
+ break;
+
+ case BEGIN_FRAME:
+ case INSIDE_FRAME:
+ default:
+ /* Stuffed byte coming */
+ rx_buff->state = LINK_ESCAPE;
+ break;
+ }
+}
+
+/*
+ * Function async_unwrap_other(dev, byte)
+ *
+ * Handle other characters received within a frame
+ *
+ */
+static inline void
+async_unwrap_other(struct net_device *dev,
+ struct net_device_stats *stats,
+ iobuff_t *rx_buff, __u8 byte)
+{
+ switch(rx_buff->state) {
+ /* This is on the critical path, case are ordered by
+ * probability (most frequent first) - Jean II */
+ case INSIDE_FRAME:
+ /* Must be the next byte of the frame */
+ if (rx_buff->len < rx_buff->truesize) {
+ rx_buff->data[rx_buff->len++] = byte;
+#ifndef POSTPONE_RX_CRC
+ rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
+#endif
+ } else {
+ pr_debug("%s(), Rx buffer overflow, aborting\n",
+ __func__);
+ rx_buff->state = OUTSIDE_FRAME;
+ }
+ break;
+
+ case LINK_ESCAPE:
+ /*
+ * Stuffed char, complement bit 5 of byte
+ * following CE, IrLAP p.114
+ */
+ byte ^= IRDA_TRANS;
+ if (rx_buff->len < rx_buff->truesize) {
+ rx_buff->data[rx_buff->len++] = byte;
+#ifndef POSTPONE_RX_CRC
+ rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
+#endif
+ rx_buff->state = INSIDE_FRAME;
+ } else {
+ pr_debug("%s(), Rx buffer overflow, aborting\n",
+ __func__);
+ rx_buff->state = OUTSIDE_FRAME;
+ }
+ break;
+
+ case OUTSIDE_FRAME:
+ /* Activate carrier sense */
+ if(byte != XBOF)
+ irda_device_set_media_busy(dev, TRUE);
+ break;
+
+ case BEGIN_FRAME:
+ default:
+ rx_buff->data[rx_buff->len++] = byte;
+#ifndef POSTPONE_RX_CRC
+ rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
+#endif
+ rx_buff->state = INSIDE_FRAME;
+ break;
+ }
+}
+
+/*
+ * Function async_unwrap_char (dev, rx_buff, byte)
+ *
+ * Parse and de-stuff frame received from the IrDA-port
+ *
+ * This is the main entry point for SIR drivers.
+ */
+void async_unwrap_char(struct net_device *dev,
+ struct net_device_stats *stats,
+ iobuff_t *rx_buff, __u8 byte)
+{
+ switch(byte) {
+ case CE:
+ async_unwrap_ce(dev, stats, rx_buff, byte);
+ break;
+ case BOF:
+ async_unwrap_bof(dev, stats, rx_buff, byte);
+ break;
+ case EOF:
+ async_unwrap_eof(dev, stats, rx_buff, byte);
+ break;
+ default:
+ async_unwrap_other(dev, stats, rx_buff, byte);
+ break;
+ }
+}
+EXPORT_SYMBOL(async_unwrap_char);
+